diff --git a/.buildkite/scripts/build-bench.sh b/.buildkite/scripts/build-bench.sh index d1ad80389107eb..a19e4291bc1426 100755 --- a/.buildkite/scripts/build-bench.sh +++ b/.buildkite/scripts/build-bench.sh @@ -22,5 +22,5 @@ EOF # shellcheck disable=SC2016 group "bench" \ - "$(build_steps "bench-part-1" "ci/bench/part1.sh")" \ - "$(build_steps "bench-part-2" "ci/bench/part2.sh")" + "$(build_steps "bench-part-1" ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/bench/part1.sh")" \ + "$(build_steps "bench-part-2" ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/bench/part2.sh")" diff --git a/.github/scripts/cargo-clippy-before-script.sh b/.github/scripts/cargo-clippy-before-script.sh new file mode 100755 index 00000000000000..b9426203aa6ffc --- /dev/null +++ b/.github/scripts/cargo-clippy-before-script.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +set -e + +os_name="$1" + +case "$os_name" in +"Windows") + ;; +"macOS") + brew install protobuf + ;; +"Linux") ;; +*) + echo "Unknown Operating System" + ;; +esac diff --git a/.github/workflows/cargo.yml b/.github/workflows/cargo.yml new file mode 100644 index 00000000000000..d0bad722e0d3a6 --- /dev/null +++ b/.github/workflows/cargo.yml @@ -0,0 +1,71 @@ +name: Cargo + +on: + push: + branches: + - master + - v[0-9]+.[0-9]+ + pull_request: + branches: + - master + - v[0-9]+.[0-9]+ + paths: + - "**.rs" + - "**/Cargo.toml" + - "**/Cargo.lock" + - ".github/scripts/cargo-clippy-before-script.sh" + - ".github/workflows/cargo.yml" + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +env: + SHELL: /bin/bash + SCCACHE_GHA_ENABLED: "true" + RUSTC_WRAPPER: "sccache" + +jobs: + clippy-stable: + strategy: + matrix: + os: + - macos-latest + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v4 + + - uses: mozilla-actions/sccache-action@v0.0.3 + with: + version: "v0.5.4" + + - shell: bash + run: .github/scripts/cargo-clippy-before-script.sh ${{ runner.os }} + + - shell: bash + run: | + source ci/rust-version.sh stable + rustup component add clippy --toolchain "$rust_stable" + scripts/cargo-clippy-stable.sh + + clippy-nightly: + strategy: + matrix: + os: + - macos-latest + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v4 + + - uses: mozilla-actions/sccache-action@v0.0.3 + with: + version: "v0.5.4" + + - shell: bash + run: .github/scripts/cargo-clippy-before-script.sh ${{ runner.os }} + + - shell: bash + run: | + source ci/rust-version.sh nightly + rustup component add clippy --toolchain "$rust_nightly" + scripts/cargo-clippy-nightly.sh diff --git a/.github/workflows/downstream-project-anchor.yml b/.github/workflows/downstream-project-anchor.yml new file mode 100644 index 00000000000000..c150beef37981b --- /dev/null +++ b/.github/workflows/downstream-project-anchor.yml @@ -0,0 +1,53 @@ +name: Downstream Project - Anchor + +on: + push: + branches: + - master + - v[0-9]+.[0-9]+ + pull_request: + branches: + - master + - v[0-9]+.[0-9]+ + paths: + - "**.rs" + - "Cargo.toml" + - "Cargo.lock" + - "cargo-build-bpf" + - "cargo-test-bpf" + - "cargo-build-sbf" + - "cargo-test-sbf" + - ".github/workflows/downstream-project-anchor.yml" + workflow_call: + inputs: + branch: + required: false + type: string + default: "master" + +env: + SHELL: /bin/bash + SCCACHE_GHA_ENABLED: "true" + RUSTC_WRAPPER: "sccache" + +jobs: + test: + runs-on: ubuntu-latest + strategy: + matrix: + version: ["v0.29.0"] + steps: + - uses: actions/checkout@v3 + + - shell: bash + run: | + .github/scripts/purge-ubuntu-runner.sh + + - uses: mozilla-actions/sccache-action@v0.0.3 + with: + version: "v0.5.4" + + - shell: bash + run: | + source .github/scripts/downstream-project-spl-install-deps.sh + ./scripts/build-downstream-anchor-projects.sh ${{ matrix.version }} diff --git a/.gitignore b/.gitignore index 3167a9d7207b21..9d9ccf04c28131 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,4 @@ -/dos/farf/ -/farf/ +farf/ /solana-release/ /solana-release.tar.bz2 /solana-metrics/ diff --git a/.mergify.yml b/.mergify.yml index ab81476816764c..bb51b1aae097f9 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -58,6 +58,24 @@ pull_request_rules: # only require docs checks if docs files changed - -files~=^docs/ - status-success=build & deploy docs + - or: + - -files~=(\.rs|Cargo\.toml|Cargo\.lock|\.github/scripts/cargo-clippy-before-script\.sh|\.github/workflows/cargo\.yml)$ + - and: + - check-success=clippy-stable (macos-latest) + - check-success=clippy-nightly (macos-latest) + - or: + - -files~=(\.rs|Cargo\.toml|Cargo\.lock|cargo-build-bpf|cargo-test-bpf|cargo-build-sbf|cargo-test-sbf|ci/downstream-projects/run-spl\.sh|\.github/workflows/downstream-project-spl\.yml)$ + - and: + - status-success=cargo-test-sbf (token/program) + - status-success=cargo-test-sbf (instruction-padding/program, token/program-2022, token/program-2022-test) + - status-success=cargo-test-sbf (associated-token-account/program, associated-token-account/program-test) + - status-success=cargo-test-sbf (token-upgrade/program) + - status-success=cargo-test-sbf (feature-proposal/program) + - status-success=cargo-test-sbf (governance/addin-mock/program, governance/program) + - status-success=cargo-test-sbf (memo/program) + - status-success=cargo-test-sbf (name-service/program) + - status-success=cargo-test-sbf (stake-pool/program) + - status-success=cargo-test-sbf (single-pool/program) actions: merge: method: squash @@ -98,6 +116,19 @@ pull_request_rules: ignore_conflicts: true branches: - v1.16 + - name: v1.16 backport warning comment + conditions: + - label=v1.16 + actions: + comment: + message: > + Backports to the stable branch are to be avoided unless absolutely + necessary for fixing bugs, security issues, and perf regressions. + Changes intended for backport should be structured such that a + minimum effective diff can be committed separately from any + refactoring, plumbing, cleanup, etc that are not strictly + necessary to achieve the goal. Any of the latter should go only + into master and ride the normal stabilization schedule. - name: v1.17 feature-gate backport conditions: - label=v1.17 @@ -122,6 +153,21 @@ pull_request_rules: ignore_conflicts: true branches: - v1.17 + - name: v1.17 backport warning comment + conditions: + - label=v1.17 + actions: + comment: + message: > + Backports to the beta branch are to be avoided unless absolutely + necessary for fixing bugs, security issues, and perf regressions. + Changes intended for backport should be structured such that a + minimum effective diff can be committed separately from any + refactoring, plumbing, cleanup, etc that are not strictly + necessary to achieve the goal. Any of the latter should go only + into master and ride the normal stabilization schedule. Exceptions + include CI/metrics changes, CLI improvements and documentation + updates on a case by case basis. commands_restrictions: # The author of copied PRs is the Mergify user. diff --git a/CHANGELOG.md b/CHANGELOG.md index 4fe1b4fc2ae902..e20f5eff7f196c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,11 @@ # Changelog + All notable changes to this project will be documented in this file. Please follow the [guidance](#adding-to-this-changelog) at the bottom of this file when making changes The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html) -and follows a [Backwards Compatibility Policy](https://docs.solana.com/developing/backwards-compatibility) +and follows a [Backwards Compatibility Policy](https://docs.solanalabs.com/backwards-compatibility) Release channels have their own copy of this changelog: * [edge - v1.18](#edge-channel) @@ -16,11 +17,27 @@ Release channels have their own copy of this changelog: * Changes * Added a github check to support `changelog` label * The default for `--use-snapshot-archives-at-startup` is now `when-newest` (#33883) + * The default for `solana-ledger-tool`, however, remains `always` (#34228) + * Added `central-scheduler` option for `--block-production-method` (#33890) + * Updated to Borsh v1 + * Added allow_commission_decrease_at_any_time feature which will allow commission on a vote account to be + decreased even in the second half of epochs when the commission_updates_only_allowed_in_first_half_of_epoch + feature would have prevented it + * Updated local ledger storage so that the RPC endpoint + `getSignaturesForAddress` always returns signatures in block-inclusion order + * RPC's `simulateTransaction` now returns `innerInstructions` as `json`/`jsonParsed` (#34313). + * Bigtable upload now includes entry summary data for each slot, stored in a + new `entries` table * Upgrade Notes + * `solana-program` and `solana-sdk` default to support for Borsh v1, with +limited backward compatibility for v0.10 and v0.9. Please upgrade to Borsh v1. + * Operators running their own bigtable instances need to create the `entries` + table before upgrading their warehouse nodes ## [1.17.0] * Changes * Added a changelog. + * Added `--use-snapshot-archives-at-startup` for faster validator restarts * Upgrade Notes ## Adding to this Changelog diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9204a7e57b63d8..f5cbd4f5e09ce7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -315,17 +315,17 @@ Inventing new terms is allowed, but should only be done when the term is widely used and understood. Avoid introducing new 3-letter terms, which can be confused with 3-letter acronyms. -[Terms currently in use](docs/src/terminology.md) +[Terms currently in use](https://solana.com/docs/terminology) ## Design Proposals -Solana's architecture is described by docs generated from markdown files in the `docs/src/` -directory and viewable on the official [Solana Documentation](https://docs.solana.com) website. +This Solana validator client's architecture is described by docs generated from markdown files in the `docs/src/` +directory and viewable on the official [Solana Labs Validator Client](https://docs.solanalabs.com) documentation website. Current design proposals may be viewed on the docs site: -1. [Accepted Proposals](https://docs.solana.com/proposals/accepted-design-proposals) -2. [Implemented Proposals](https://docs.solana.com/implemented-proposals/implemented-proposals) +1. [Accepted Proposals](https://docs.solanalabs.com/proposals/accepted-design-proposals) +2. [Implemented Proposals](https://docs.solanalabs.com/implemented-proposals/implemented-proposals) New design proposals should follow this guide on [how to submit a design proposal](./docs/src/proposals.md#submit-a-design-proposal). diff --git a/Cargo.lock b/Cargo.lock index 91835814cfbbd4..31fe949404d92a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -75,9 +75,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" +checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" dependencies = [ "cfg-if 1.0.0", "getrandom 0.2.10", @@ -163,15 +163,15 @@ checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" [[package]] name = "aquamarine" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df752953c49ce90719c7bf1fc587bc8227aed04732ea0c0f85e5397d7fdbd1a1" +checksum = "d1da02abba9f9063d786eab1509833ebb2fac0f966862ca59439c76b9c566760" dependencies = [ "include_dir", "itertools", @@ -444,13 +444,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.74" +version = "0.1.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" +checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -497,7 +497,7 @@ dependencies = [ "matchit", "memchr", "mime", - "percent-encoding 2.3.0", + "percent-encoding 2.3.1", "pin-project-lite", "rustversion", "serde", @@ -610,7 +610,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -732,6 +732,16 @@ dependencies = [ "hashbrown 0.13.2", ] +[[package]] +name = "borsh" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9897ef0f1bd2362169de6d7e436ea2237dc1085d7d1e4db75f4be34d86f309d1" +dependencies = [ + "borsh-derive 1.2.1", + "cfg_aliases", +] + [[package]] name = "borsh-derive" version = "0.9.3" @@ -758,6 +768,20 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "borsh-derive" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "478b41ff04256c5c8330f3dfdaaae2a5cc976a8e75088bafa4625b0d0208de8c" +dependencies = [ + "once_cell", + "proc-macro-crate 2.0.0", + "proc-macro2", + "quote", + "syn 2.0.46", + "syn_derive", +] + [[package]] name = "borsh-derive-internal" version = "0.9.3" @@ -1035,6 +1059,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "chrono" version = "0.4.31" @@ -1375,9 +1405,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.8" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" +checksum = "82a9b73a36529d9c47029b9fb3a6f0ea3cc916a261195352ba19e770fc1748b2" dependencies = [ "cfg-if 1.0.0", "crossbeam-utils", @@ -1408,9 +1438,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "c3a430a770ebd84726f584a90ee7f020d28db52c6d02138900f22341f866d39c" dependencies = [ "cfg-if 1.0.0", ] @@ -1495,12 +1525,12 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.4.1" +version = "3.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e95fbd621905b854affdc67943b043a0fbb6ed7385fd5a25650d19a8a6cfdf" +checksum = "b467862cc8610ca6fc9a1532d7777cee0804e678ab45410897b9396495994a0b" dependencies = [ "nix 0.27.1", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -1537,7 +1567,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -1548,7 +1578,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -1558,7 +1588,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ "cfg-if 1.0.0", - "hashbrown 0.14.1", + "hashbrown 0.14.3", "lock_api", "once_cell", "parking_lot_core 0.9.8", @@ -1673,9 +1703,9 @@ dependencies = [ [[package]] name = "dir-diff" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2860407d7d7e2e004bb2128510ad9e8d669e76fa005ccf567977b5d71b8b4a0b" +checksum = "a7ad16bf5f84253b50d6557681c58c3ab67c47c77d39fed9aeb56e947290bd10" dependencies = [ "walkdir", ] @@ -1732,7 +1762,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -1868,7 +1898,7 @@ checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -1905,23 +1935,12 @@ checksum = "88bffebc5d80432c9b140ee17875ff173a8ab62faad5b257da912bd2f6c1c0a1" [[package]] name = "errno" -version = "0.3.1" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" dependencies = [ - "errno-dragonfly", - "libc", - "windows-sys 0.48.0", -] - -[[package]] -name = "errno-dragonfly" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" -dependencies = [ - "cc", "libc", + "windows-sys 0.52.0", ] [[package]] @@ -2065,11 +2084,11 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ - "percent-encoding 2.3.0", + "percent-encoding 2.3.1", ] [[package]] @@ -2080,9 +2099,12 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "fs-err" -version = "2.9.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0845fa252299212f0389d64ba26f34fa32cfe41588355f21ed507c59a0f64541" +checksum = "88a41f105fe1d5b6b34b2055e3dc59bb79b46b48b2040b9e6c7b4b5de097aa41" +dependencies = [ + "autocfg", +] [[package]] name = "fs_extra" @@ -2104,9 +2126,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -2119,9 +2141,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -2129,15 +2151,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -2147,38 +2169,38 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-macro" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] name = "futures-sink" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-util" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures 0.1.31", "futures-channel", @@ -2320,7 +2342,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8af59a261bcf42f45d1b261232847b9b850ba0a1419d6100698246fb66e9240" dependencies = [ "arc-swap", - "futures 0.3.29", + "futures 0.3.30", "log", "reqwest", "serde", @@ -2412,14 +2434,14 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.6", + "ahash 0.8.7", ] [[package]] name = "hashbrown" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dfda62a12f55daeae5015f81b0baea145391cb4520f86c248fc615d72640d12" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" [[package]] name = "headers" @@ -2533,9 +2555,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" +checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" dependencies = [ "bytes", "fnv", @@ -2573,9 +2595,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.27" +version = "0.14.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" dependencies = [ "bytes", "futures-channel", @@ -2588,7 +2610,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.9", + "socket2 0.5.5", "tokio", "tower-service", "tracing", @@ -2602,7 +2624,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca815a891b24fdfb243fa3239c86154392b0953ee584aa1a2a1f66d20cbe75cc" dependencies = [ "bytes", - "futures 0.3.29", + "futures 0.3.30", "headers", "http", "hyper", @@ -2684,9 +2706,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -2756,7 +2778,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" dependencies = [ "equivalent", - "hashbrown 0.14.1", + "hashbrown 0.14.3", "rayon", ] @@ -2825,9 +2847,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.65" +version = "0.3.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54c0c35952f67de54bb584e9fd912b3023117cbafc0a77d8f3dee1fb5f572fe8" +checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" dependencies = [ "wasm-bindgen", ] @@ -2850,7 +2872,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2b99d4207e2a04fb4581746903c2bb7eb376f88de9c699d0f3e10feeac0cd3a" dependencies = [ "derive_more", - "futures 0.3.29", + "futures 0.3.30", "jsonrpc-core", "jsonrpc-pubsub", "jsonrpc-server-utils", @@ -2868,7 +2890,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" dependencies = [ - "futures 0.3.29", + "futures 0.3.30", "futures-executor", "futures-util", "log", @@ -2883,7 +2905,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b51da17abecbdab3e3d4f26b01c5ec075e88d3abe3ab3b05dc9aa69392764ec0" dependencies = [ - "futures 0.3.29", + "futures 0.3.30", "jsonrpc-client-transports", ] @@ -2905,7 +2927,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1dea6e07251d9ce6a552abfb5d7ad6bc290a4596c8dcc3d795fae2bbdc1f3ff" dependencies = [ - "futures 0.3.29", + "futures 0.3.30", "hyper", "jsonrpc-core", "jsonrpc-server-utils", @@ -2921,7 +2943,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "382bb0206323ca7cda3dcd7e245cea86d37d02457a02a975e3378fb149a48845" dependencies = [ - "futures 0.3.29", + "futures 0.3.30", "jsonrpc-core", "jsonrpc-server-utils", "log", @@ -2936,7 +2958,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240f87695e6c6f62fb37f05c02c04953cf68d6408b8c1c89de85c7a0125b1011" dependencies = [ - "futures 0.3.29", + "futures 0.3.30", "jsonrpc-core", "lazy_static", "log", @@ -2952,7 +2974,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4fdea130485b572c39a460d50888beb00afb3e35de23ccd7fad8ff19f0e0d4" dependencies = [ "bytes", - "futures 0.3.29", + "futures 0.3.30", "globset", "jsonrpc-core", "lazy_static", @@ -2993,9 +3015,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.149" +version = "0.2.151" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" +checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" [[package]] name = "libloading" @@ -3107,9 +3129,9 @@ checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "linux-raw-sys" -version = "0.4.10" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" +checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" [[package]] name = "lock_api" @@ -3461,7 +3483,7 @@ checksum = "cfb77679af88f8b125209d354a202862602672222e7f2313fdd6dc349bad4712" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -3565,7 +3587,7 @@ dependencies = [ "proc-macro-crate 1.1.0", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -3574,10 +3596,10 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c11e44798ad209ccdd91fc192f0526a369a01234f7373e1b141c96d7cee4f0e" dependencies = [ - "proc-macro-crate 1.1.0", + "proc-macro-crate 2.0.0", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -3639,9 +3661,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.59" +version = "0.10.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a257ad03cd8fb16ad4172fedf8094451e1af1c4b70097636ef2eac9a5f0cc33" +checksum = "8cde4d2d9200ad5909f8dac647e29482e07c3a35de8a13fce7c9c7747ad9f671" dependencies = [ "bitflags 2.4.1", "cfg-if 1.0.0", @@ -3680,9 +3702,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.95" +version = "0.9.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40a4130519a360279579c2053038317e40eff64d13fd3f004f9e1b72b8a6aaf9" +checksum = "c1665caf8ab2dc9aef43d1c0023bd904633a6a05cb30b0ad59bec2ae986e57a7" dependencies = [ "cc", "libc", @@ -3704,7 +3726,7 @@ dependencies = [ "futures-util", "js-sys", "lazy_static", - "percent-encoding 2.3.0", + "percent-encoding 2.3.1", "pin-project", "rand 0.8.5", "thiserror", @@ -3757,7 +3779,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9981e32fb75e004cc148f5fb70342f393830e0a4aa62e3cc93b50976218d42b6" dependencies = [ - "futures 0.3.29", + "futures 0.3.30", "libc", "log", "rand 0.7.3", @@ -3860,9 +3882,9 @@ checksum = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "percentage" @@ -4096,14 +4118,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] name = "prio-graph" -version = "0.1.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78dd2fa9ca0901b4d0dbf51d9862d7e3fb004605e4f4b4132472c3d08e7d901b" +checksum = "6492a75ca57066a4479af45efa302bed448680182b0563f96300645d5f896097" [[package]] name = "proc-macro-crate" @@ -4124,6 +4146,15 @@ dependencies = [ "toml 0.5.8", ] +[[package]] +name = "proc-macro-crate" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" +dependencies = [ + "toml_edit 0.20.7", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -4150,18 +4181,18 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.69" +version = "1.0.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" +checksum = "2de98502f212cfcea8d0bb305bd0f49d7ebdd75b64ba0a68f937d888f4e0d6db" dependencies = [ "unicode-ident", ] [[package]] name = "proptest" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c003ac8c77cb07bb74f5f198bce836a689bcd5a42574612bf14d17bfd08c20e" +checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ "bit-set", "bit-vec", @@ -4171,7 +4202,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift", - "regex-syntax 0.7.5", + "regex-syntax", "rusty-fork", "tempfile", "unarray", @@ -4254,7 +4285,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d464fae65fff2680baf48019211ce37aaec0c78e9264c84a3e484717f965104e" dependencies = [ - "percent-encoding 2.3.0", + "percent-encoding 2.3.1", ] [[package]] @@ -4265,7 +4296,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -4324,9 +4355,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.33" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ "proc-macro2", ] @@ -4450,9 +4481,9 @@ dependencies = [ [[package]] name = "raptorq" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "655b020bbf5c89791160a30f0d4706d8ec7aa5718d6a198f6df19c400e4f4470" +checksum = "6c9cf9270cc5903afdef387f06ef1cd89fb77f45c357c2a425bae78b839fd866" [[package]] name = "rayon" @@ -4560,7 +4591,7 @@ dependencies = [ "aho-corasick 1.0.1", "memchr", "regex-automata 0.4.3", - "regex-syntax 0.8.2", + "regex-syntax", ] [[package]] @@ -4577,15 +4608,9 @@ checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" dependencies = [ "aho-corasick 1.0.1", "memchr", - "regex-syntax 0.8.2", + "regex-syntax", ] -[[package]] -name = "regex-syntax" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" - [[package]] name = "regex-syntax" version = "0.8.2" @@ -4594,9 +4619,9 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "reqwest" -version = "0.11.22" +version = "0.11.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" +checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" dependencies = [ "async-compression", "base64 0.21.5", @@ -4616,7 +4641,7 @@ dependencies = [ "mime", "native-tls", "once_cell", - "percent-encoding 2.3.0", + "percent-encoding 2.3.1", "pin-project-lite", "rustls", "rustls-pemfile 1.0.0", @@ -4629,7 +4654,7 @@ dependencies = [ "tokio-rustls", "tokio-util 0.7.1", "tower-service", - "url 2.4.1", + "url 2.5.0", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -4698,13 +4723,13 @@ dependencies = [ [[package]] name = "rpassword" -version = "7.2.0" +version = "7.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6678cf63ab3491898c0d021b493c94c9b221d91295294a2a5746eacbe5928322" +checksum = "80472be3c897911d0137b2d2b9055faf6eeac5b14e324073d83bc17b191d7e3f" dependencies = [ "libc", "rtoolbox", - "winapi 0.3.9", + "windows-sys 0.48.0", ] [[package]] @@ -4758,22 +4783,22 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.21" +version = "0.38.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" +checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" dependencies = [ "bitflags 2.4.1", "errno", "libc", "linux-raw-sys", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "rustls" -version = "0.21.8" +version = "0.21.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "446e14c5cda4f3f30fe71863c34ec70f5ac79d6087097ad0bb433e1be5edf04c" +checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" dependencies = [ "log", "ring 0.17.3", @@ -4974,38 +4999,38 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.192" +version = "1.0.194" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bca2a08484b285dcb282d0f67b26cadc0df8b19f8c12502c13d966bf9482f001" +checksum = "0b114498256798c94a0689e1a15fec6005dee8ac1f41de56404b67afc2a4b773" dependencies = [ "serde_derive", ] [[package]] name = "serde_bytes" -version = "0.11.12" +version = "0.11.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab33ec92f677585af6d88c65593ae2375adde54efdbf16d597f2cbc7a6d368ff" +checksum = "8b8497c313fd43ab992087548117643f6fcd935cbf36f176ffda0aacf9591734" dependencies = [ "serde", ] [[package]] name = "serde_derive" -version = "1.0.192" +version = "1.0.194" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1" +checksum = "a3385e45322e8f9931410f01b3031ec534c3947d0e94c18049af4d9f9907d4e0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] name = "serde_json" -version = "1.0.108" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" +checksum = "cb0652c533506ad7a2e353cce269330d6afd8bdfb6d75e0ace5b35aacbd7b9e9" dependencies = [ "itoa", "ryu", @@ -5014,9 +5039,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" +checksum = "12022b835073e5b11e90a14f86838ceb1c8fb0325b72416845c487ac0fa95e80" dependencies = [ "serde", ] @@ -5052,7 +5077,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -5069,9 +5094,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.25" +version = "0.9.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a49e178e4452f45cb61d0cd8cebc1b0fafd3e41929e996cef79aa3aca91f574" +checksum = "a15e0ef66bf939a7c890a0bf6d5a733c70202225f9888a89ed5c62298b019129" dependencies = [ "indexmap 2.1.0", "itoa", @@ -5087,7 +5112,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e56dd856803e253c8f298af3f4d7eb0ae5e23a737252cd90bb4f3b435033b2d" dependencies = [ "dashmap", - "futures 0.3.29", + "futures 0.3.30", "lazy_static", "log", "parking_lot 0.12.1", @@ -5102,7 +5127,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -5278,9 +5303,9 @@ checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" [[package]] name = "smallvec" -version = "1.11.1" +version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" +checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" [[package]] name = "smpl_jwt" @@ -5326,7 +5351,7 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.1", "bytes", - "futures 0.3.29", + "futures 0.3.30", "httparse", "log", "rand 0.8.5", @@ -5352,6 +5377,7 @@ dependencies = [ "spl-pod", "spl-token", "spl-token-2022", + "spl-token-group-interface", "spl-token-metadata-interface", "thiserror", "zstd", @@ -5527,8 +5553,8 @@ dependencies = [ name = "solana-banks-client" version = "1.18.0" dependencies = [ - "borsh 0.10.3", - "futures 0.3.29", + "borsh 1.2.1", + "futures 0.3.30", "solana-banks-interface", "solana-banks-server", "solana-program", @@ -5555,7 +5581,7 @@ version = "1.18.0" dependencies = [ "bincode", "crossbeam-channel", - "futures 0.3.29", + "futures 0.3.30", "solana-accounts-db", "solana-banks-interface", "solana-client", @@ -5588,7 +5614,7 @@ dependencies = [ "rand 0.8.5", "rayon", "serde_json", - "serde_yaml 0.9.25", + "serde_yaml 0.9.29", "serial_test", "solana-clap-utils", "solana-cli-config", @@ -5653,6 +5679,7 @@ dependencies = [ "solana-sdk", "solana-zk-token-sdk", "solana_rbpf", + "test-case", "thiserror", ] @@ -5741,7 +5768,7 @@ dependencies = [ "tar", "tempfile", "tokio", - "toml 0.8.2", + "toml 0.8.8", ] [[package]] @@ -5773,7 +5800,7 @@ dependencies = [ "thiserror", "tiny-bip39", "uriparse", - "url 2.4.1", + "url 2.5.0", ] [[package]] @@ -5791,7 +5818,7 @@ dependencies = [ "thiserror", "tiny-bip39", "uriparse", - "url 2.4.1", + "url 2.5.0", ] [[package]] @@ -5843,6 +5870,7 @@ dependencies = [ "solana_rbpf", "spl-memo", "tempfile", + "test-case", "thiserror", "tiny-bip39", ] @@ -5856,10 +5884,10 @@ dependencies = [ "lazy_static", "serde", "serde_derive", - "serde_yaml 0.9.25", + "serde_yaml 0.9.29", "solana-clap-utils", "solana-sdk", - "url 2.4.1", + "url 2.5.0", ] [[package]] @@ -5896,7 +5924,7 @@ dependencies = [ "bincode", "crossbeam-channel", "dashmap", - "futures 0.3.29", + "futures 0.3.30", "futures-util", "indexmap 2.1.0", "indicatif", @@ -6010,7 +6038,7 @@ dependencies = [ "eager", "etcd-client", "fs_extra", - "futures 0.3.29", + "futures 0.3.30", "histogram", "itertools", "lazy_static", @@ -6063,6 +6091,7 @@ dependencies = [ "solana-tpu-client", "solana-transaction-status", "solana-turbine", + "solana-unified-scheduler-pool", "solana-version", "solana-vote", "solana-vote-program", @@ -6234,7 +6263,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -6247,7 +6276,7 @@ dependencies = [ "itertools", "serde", "serde_json", - "serde_yaml 0.9.25", + "serde_yaml 0.9.29", "solana-accounts-db", "solana-clap-utils", "solana-cli-config", @@ -6380,7 +6409,7 @@ dependencies = [ "semver 1.0.20", "serde", "serde_yaml 0.8.26", - "serde_yaml 0.9.25", + "serde_yaml 0.9.29", "solana-clap-utils", "solana-config-program", "solana-logger", @@ -6389,7 +6418,7 @@ dependencies = [ "solana-version", "tar", "tempfile", - "url 2.4.1", + "url 2.5.0", "winapi 0.3.9", "winreg", ] @@ -6425,7 +6454,7 @@ dependencies = [ "crossbeam-channel", "dashmap", "fs_extra", - "futures 0.3.29", + "futures 0.3.30", "itertools", "lazy_static", "libc", @@ -6492,7 +6521,7 @@ dependencies = [ "crossbeam-channel", "csv", "dashmap", - "futures 0.3.29", + "futures 0.3.30", "histogram", "itertools", "log", @@ -6522,9 +6551,11 @@ dependencies = [ "solana-storage-bigtable", "solana-streamer", "solana-transaction-status", + "solana-unified-scheduler-pool", "solana-version", "solana-vote-program", "solana_rbpf", + "thiserror", "tikv-jemallocator", "tokio", ] @@ -6679,7 +6710,7 @@ dependencies = [ "solana-sdk", "solana-version", "tokio", - "url 2.4.1", + "url 2.5.0", ] [[package]] @@ -6702,7 +6733,7 @@ dependencies = [ name = "solana-perf" version = "1.18.0" dependencies = [ - "ahash 0.8.6", + "ahash 0.8.7", "assert_matches", "bincode", "bv", @@ -6783,6 +6814,7 @@ dependencies = [ "blake3", "borsh 0.10.3", "borsh 0.9.3", + "borsh 1.2.1", "bs58", "bv", "bytemuck", @@ -6902,7 +6934,7 @@ dependencies = [ "tokio-stream", "tokio-tungstenite", "tungstenite", - "url 2.4.1", + "url 2.5.0", ] [[package]] @@ -6912,7 +6944,7 @@ dependencies = [ "async-mutex", "async-trait", "crossbeam-channel", - "futures 0.3.29", + "futures 0.3.30", "itertools", "lazy_static", "log", @@ -7029,7 +7061,7 @@ dependencies = [ "bincode", "bs58", "crossbeam-channel", - "futures 0.3.29", + "futures 0.3.30", "indicatif", "jsonrpc-core", "jsonrpc-http-server", @@ -7074,7 +7106,7 @@ version = "1.18.0" dependencies = [ "anyhow", "clap 2.33.3", - "futures 0.3.29", + "futures 0.3.30", "serde_json", "solana-account-decoder", "solana-clap-utils", @@ -7197,6 +7229,20 @@ dependencies = [ "zstd", ] +[[package]] +name = "solana-runtime-transaction" +version = "1.18.0" +dependencies = [ + "bincode", + "log", + "rand 0.8.5", + "rustc_version 0.4.0", + "solana-program", + "solana-program-runtime", + "solana-sdk", + "thiserror", +] + [[package]] name = "solana-sdk" version = "1.18.0" @@ -7206,7 +7252,7 @@ dependencies = [ "base64 0.21.5", "bincode", "bitflags 2.4.1", - "borsh 0.10.3", + "borsh 1.2.1", "bs58", "bytemuck", "byteorder", @@ -7265,9 +7311,15 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.39", + "syn 2.0.46", ] +[[package]] +name = "solana-security-txt" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "468aa43b7edb1f9b7b7b686d5c3aeb6630dc1708e86e31343499dd5c4d775183" + [[package]] name = "solana-send-transaction-service" version = "1.18.0" @@ -7326,7 +7378,7 @@ dependencies = [ "bzip2", "enum-iterator", "flate2", - "futures 0.3.29", + "futures 0.3.30", "goauth", "http", "hyper", @@ -7579,7 +7631,7 @@ dependencies = [ "bincode", "bytes", "crossbeam-channel", - "futures 0.3.29", + "futures 0.3.30", "itertools", "log", "lru", @@ -7621,6 +7673,24 @@ dependencies = [ "tokio", ] +[[package]] +name = "solana-unified-scheduler-logic" +version = "1.18.0" + +[[package]] +name = "solana-unified-scheduler-pool" +version = "1.18.0" +dependencies = [ + "assert_matches", + "solana-ledger", + "solana-logger", + "solana-program-runtime", + "solana-runtime", + "solana-sdk", + "solana-unified-scheduler-logic", + "solana-vote", +] + [[package]] name = "solana-upload-perf" version = "1.18.0" @@ -7655,7 +7725,7 @@ dependencies = [ "rayon", "serde", "serde_json", - "serde_yaml 0.9.25", + "serde_yaml 0.9.29", "signal-hook", "solana-account-decoder", "solana-accounts-db", @@ -7907,9 +7977,9 @@ dependencies = [ [[package]] name = "spl-associated-token-account" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "385e31c29981488f2820b2022d8e731aae3b02e6e18e2fd854e4c9a94dc44fc3" +checksum = "992d9c64c2564cc8f63a4b508bf3ebcdf2254b0429b13cd1d31adb6162432a5f" dependencies = [ "assert_matches", "borsh 0.10.3", @@ -7940,7 +8010,7 @@ checksum = "fadbefec4f3c678215ca72bd71862697bb06b41fd77c0088902dd3203354387b" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -7952,7 +8022,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.39", + "syn 2.0.46", "thiserror", ] @@ -8010,14 +8080,14 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] name = "spl-tlv-account-resolution" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "062e148d3eab7b165582757453632ffeef490c02c86a48bfdb4988f63eefb3b9" +checksum = "3f7020347c07892c08560d230fbb8a980316c9e198e22b198b7b9d951ff96047" dependencies = [ "bytemuck", "solana-program", @@ -8044,9 +8114,9 @@ dependencies = [ [[package]] name = "spl-token-2022" -version = "0.9.0" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4abf34a65ba420584a0c35f3903f8d727d1f13ababbdc3f714c6b065a686e86" +checksum = "d697fac19fd74ff472dfcc13f0b442dd71403178ce1de7b5d16f83a33561c059" dependencies = [ "arrayref", "bytemuck", @@ -8054,16 +8124,31 @@ dependencies = [ "num-traits", "num_enum 0.7.1", "solana-program", + "solana-security-txt", "solana-zk-token-sdk", "spl-memo", "spl-pod", "spl-token", + "spl-token-group-interface", "spl-token-metadata-interface", "spl-transfer-hook-interface", "spl-type-length-value", "thiserror", ] +[[package]] +name = "spl-token-group-interface" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b889509d49fa74a4a033ca5dae6c2307e9e918122d97e58562f5c4ffa795c75d" +dependencies = [ + "bytemuck", + "solana-program", + "spl-discriminator", + "spl-pod", + "spl-program-error", +] + [[package]] name = "spl-token-metadata-interface" version = "0.2.0" @@ -8080,9 +8165,9 @@ dependencies = [ [[package]] name = "spl-transfer-hook-interface" -version = "0.3.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "051d31803f873cabe71aec3c1b849f35248beae5d19a347d93a5c9cccc5d5a9b" +checksum = "7aabdb7c471566f6ddcee724beb8618449ea24b399e58d464d6b5bc7db550259" dependencies = [ "arrayref", "bytemuck", @@ -8115,9 +8200,9 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "stream-cancel" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0a9eb2715209fb8cc0d942fcdff45674bfc9f0090a0d897e85a22955ad159b" +checksum = "5f9fbf9bd71e4cf18d68a8a0951c0e5b7255920c0cd992c4ff51cddd6ef514a3" dependencies = [ "futures-core", "pin-project", @@ -8183,15 +8268,27 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.39" +version = "2.0.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" +checksum = "89456b690ff72fddcecf231caedbe615c59480c93358a93dfae7fc29e3ebbf0e" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] +[[package]] +name = "syn_derive" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.46", +] + [[package]] name = "sync_wrapper" version = "0.1.1" @@ -8287,7 +8384,7 @@ checksum = "1c38a012bed6fb9681d3bf71ffaa4f88f3b4b9ed3198cda6e4c8462d24d4bb80" dependencies = [ "anyhow", "fnv", - "futures 0.3.29", + "futures 0.3.30", "humantime", "opentelemetry", "pin-project", @@ -8316,15 +8413,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.8.1" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" +checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" dependencies = [ "cfg-if 1.0.0", "fastrand", "redox_syscall 0.4.1", "rustix", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -8344,9 +8441,9 @@ checksum = "13a4ec180a2de59b57434704ccfad967f789b12737738798fa08798cd5824c16" [[package]] name = "test-case" -version = "3.2.1" +version = "3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8f1e820b7f1d95a0cdbf97a5df9de10e1be731983ab943e56703ac1b8e9d425" +checksum = "eb2550dd13afcd286853192af8601920d959b14c401fcece38071d53bf0768a8" dependencies = [ "test-case-macros", ] @@ -8361,7 +8458,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -8373,7 +8470,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", "test-case-core", ] @@ -8394,22 +8491,22 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.50" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.50" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -8513,8 +8610,7 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" version = "1.29.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da" +source = "git+https://github.com/solana-labs/solana-tokio.git?rev=7cf47705faacf7bf0e43e4131a5377b3291fce21#7cf47705faacf7bf0e43e4131a5377b3291fce21" dependencies = [ "autocfg", "backtrace", @@ -8543,12 +8639,11 @@ dependencies = [ [[package]] name = "tokio-macros" version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +source = "git+https://github.com/solana-labs/solana-tokio.git?rev=7cf47705faacf7bf0e43e4131a5377b3291fce21#7cf47705faacf7bf0e43e4131a5377b3291fce21" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -8654,30 +8749,41 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.2" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "185d8ab0dfbb35cf1399a6344d8484209c088f75f8f68230da55d48d95d43e3d" +checksum = "a1a195ec8c9da26928f773888e0742ca3ca1040c6cd859c919c9f59c1954ab35" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit", + "toml_edit 0.21.0", ] [[package]] name = "toml_datetime" -version = "0.6.3" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.20.2" +version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" +checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" +dependencies = [ + "indexmap 2.1.0", + "toml_datetime", + "winnow", +] + +[[package]] +name = "toml_edit" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" dependencies = [ "indexmap 2.1.0", "serde", @@ -8704,7 +8810,7 @@ dependencies = [ "http-body", "hyper", "hyper-timeout", - "percent-encoding 2.3.0", + "percent-encoding 2.3.1", "pin-project", "prost", "rustls-pemfile 1.0.0", @@ -8846,7 +8952,7 @@ dependencies = [ "rustls", "sha1", "thiserror", - "url 2.4.1", + "url 2.5.0", "utf-8", "webpki-roots 0.24.0", ] @@ -8932,9 +9038,9 @@ dependencies = [ [[package]] name = "unsafe-libyaml" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1865806a559042e51ab5414598446a5871b561d21b6764f2eabb0dd481d880a6" +checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" [[package]] name = "untrusted" @@ -8971,13 +9077,13 @@ dependencies = [ [[package]] name = "url" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", - "idna 0.4.0", - "percent-encoding 2.3.0", + "idna 0.5.0", + "percent-encoding 2.3.1", ] [[package]] @@ -9060,9 +9166,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7daec296f25a1bae309c0cd5c29c4b260e510e6d813c286b19eaadf409d40fce" +checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -9070,16 +9176,16 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e397f4664c0e4e428e8313a469aaa58310d302159845980fd23b0f22a847f217" +checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", "wasm-bindgen-shared", ] @@ -9097,9 +9203,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5961017b3b08ad5f3fe39f1e79877f8ee7c23c5e5fd5eb80de95abc41f1f16b2" +checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -9107,22 +9213,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907" +checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d046c5d029ba91a1ed14da14dca44b68bf2f124cfbaf741c54151fdb3e0750b" +checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" [[package]] name = "web-sys" @@ -9221,6 +9327,15 @@ dependencies = [ "windows-targets 0.48.0", ] +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -9251,6 +9366,21 @@ dependencies = [ "windows_x86_64_msvc 0.48.0", ] +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" @@ -9263,6 +9393,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" + [[package]] name = "windows_aarch64_msvc" version = "0.42.2" @@ -9275,6 +9411,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" + [[package]] name = "windows_i686_gnu" version = "0.42.2" @@ -9287,6 +9429,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" + [[package]] name = "windows_i686_msvc" version = "0.42.2" @@ -9299,6 +9447,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" + [[package]] name = "windows_x86_64_gnu" version = "0.42.2" @@ -9311,6 +9465,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" @@ -9323,6 +9483,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" + [[package]] name = "windows_x86_64_msvc" version = "0.42.2" @@ -9335,6 +9501,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + [[package]] name = "winnow" version = "0.5.16" @@ -9401,22 +9573,22 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.15" +version = "0.7.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81ba595b9f2772fbee2312de30eeb80ec773b4cb2f1e8098db024afadda6c06f" +checksum = "1c4061bedbb353041c12f413700357bec76df2c7e2ca8e4df8bac24c6bf68e3d" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.15" +version = "0.7.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "772666c41fb6dceaf520b564b962d738a8e1a83b41bd48945f50837aed78bb1d" +checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -9436,7 +9608,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 714c29fcebe328..fb4d533157e03e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -85,6 +85,7 @@ members = [ "rpc-client-nonce-utils", "rpc-test", "runtime", + "runtime-transaction", "sdk", "sdk/cargo-build-bpf", "sdk/cargo-build-sbf", @@ -107,6 +108,8 @@ members = [ "transaction-status", "turbine", "udp-client", + "unified-scheduler-logic", + "unified-scheduler-pool", "upload-perf", "validator", "version", @@ -132,10 +135,10 @@ edition = "2021" [workspace.dependencies] Inflector = "0.11.4" -aquamarine = "0.3.2" +aquamarine = "0.3.3" aes-gcm-siv = "0.10.3" -ahash = "0.8.6" -anyhow = "1.0.75" +ahash = "0.8.7" +anyhow = "1.0.79" ark-bn254 = "0.4.0" ark-ec = "0.4.0" ark-ff = "0.4.0" @@ -146,7 +149,7 @@ assert_cmd = "2.0" assert_matches = "1.5.0" async-channel = "1.9.0" async-mutex = "1.4.0" -async-trait = "0.1.74" +async-trait = "0.1.77" atty = "0.2.11" backoff = "0.4.0" base64 = "0.21.5" @@ -154,7 +157,7 @@ bincode = "1.3.3" bitflags = { version = "2.3.3", features = ["serde"] } blake3 = "1.5.0" block-buffer = "0.10.4" -borsh = "0.10.3" +borsh = { version = "1.2.1", features = ["derive", "unstable__schema"] } bs58 = "0.4.0" bv = "0.11.1" byte-unit = "4.0.19" @@ -176,15 +179,15 @@ const_format = "0.2.32" core_affinity = "0.5.10" criterion = "0.5.1" criterion-stats = "0.3.0" -crossbeam-channel = "0.5.8" +crossbeam-channel = "0.5.10" csv = "1.3.0" -ctrlc = "3.4.1" +ctrlc = "3.4.2" curve25519-dalek = "3.2.1" dashmap = "5.5.3" derivation-path = { version = "0.2.0", default-features = false } dialoguer = "0.10.4" digest = "0.10.7" -dir-diff = "0.3.2" +dir-diff = "0.3.3" dirs-next = "2.0.0" dlopen2 = "0.5.0" eager = "0.1.0" @@ -199,9 +202,9 @@ fast-math = "0.1" fd-lock = "3.0.13" flate2 = "1.0.28" fnv = "1.0.7" -fs-err = "2.9.0" +fs-err = "2.11.0" fs_extra = "1.3.0" -futures = "0.3.29" +futures = "0.3.30" futures-util = "0.3.29" gag = "1.0.0" generic-array = { version = "0.14.7", default-features = false } @@ -212,9 +215,9 @@ hex = "0.4.3" hidapi = { version = "2.4.1", default-features = false } histogram = "0.6.9" hmac = "0.12.1" -http = "0.2.9" +http = "0.2.11" humantime = "2.0.1" -hyper = "0.14.27" +hyper = "0.14.28" hyper-proxy = "0.9.1" im = "15.1.0" index_list = "0.2.11" @@ -224,7 +227,7 @@ itertools = "0.10.5" jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = [ "unprefixed_malloc_on_supported_platforms", ] } -js-sys = "0.3.65" +js-sys = "0.3.66" json5 = "0.4.1" jsonrpc-core = "18.0.0" jsonrpc-core-client = "18.0.0" @@ -234,7 +237,7 @@ jsonrpc-ipc-server = "18.0.0" jsonrpc-pubsub = "18.0.0" jsonrpc-server-utils = "18.0.0" lazy_static = "1.4.0" -libc = "0.2.149" +libc = "0.2.151" libloading = "0.7.4" libsecp256k1 = "0.6.0" light-poseidon = "0.2.0" @@ -264,9 +267,9 @@ pickledb = { version = "0.5.1", default-features = false } pkcs8 = "0.8.0" predicates = "2.1" pretty-hex = "0.3.0" -prio-graph = "0.1.0" -proc-macro2 = "1.0.69" -proptest = "1.3" +prio-graph = "0.2.1" +proc-macro2 = "1.0.74" +proptest = "1.4" prost = "0.11.9" prost-build = "0.11.9" prost-types = "0.11.9" @@ -278,32 +281,32 @@ quinn-proto = "0.10.6" quote = "1.0" rand = "0.8.5" rand_chacha = "0.3.1" -raptorq = "1.7.0" +raptorq = "1.8.0" rayon = "1.8.0" rcgen = "0.10.0" reed-solomon-erasure = "6.0.0" regex = "1.10.2" -reqwest = { version = "0.11.22", default-features = false } +reqwest = { version = "0.11.23", default-features = false } rolling-file = "0.2.0" -rpassword = "7.2" +rpassword = "7.3" rustc_version = "0.4" -rustls = { version = "0.21.8", default-features = false, features = ["quic"] } +rustls = { version = "0.21.10", default-features = false, features = ["quic"] } rustversion = "1.0.14" scopeguard = "1.2.0" semver = "1.0.20" seqlock = "0.2.0" -serde = "1.0.192" -serde_bytes = "0.11.12" +serde = "1.0.194" +serde_bytes = "0.11.14" serde_derive = "1.0.103" -serde_json = "1.0.108" +serde_json = "1.0.109" serde_with = { version = "2.3.3", default-features = false } -serde_yaml = "0.9.25" +serde_yaml = "0.9.29" serial_test = "2.0.0" sha2 = "0.10.8" sha3 = "0.10.4" signal-hook = "0.3.17" siphasher = "0.3.11" -smallvec = "1.11.1" +smallvec = "1.11.2" smpl_jwt = "0.7.1" socket2 = "0.5.5" soketto = "0.7" @@ -358,11 +361,14 @@ solana-pubsub-client = { path = "pubsub-client", version = "=1.18.0" } solana-quic-client = { path = "quic-client", version = "=1.18.0" } solana-rayon-threadlimit = { path = "rayon-threadlimit", version = "=1.18.0" } solana-remote-wallet = { path = "remote-wallet", version = "=1.18.0", default-features = false } +solana-unified-scheduler-logic = { path = "unified-scheduler-logic", version = "=1.18.0" } +solana-unified-scheduler-pool = { path = "unified-scheduler-pool", version = "=1.18.0" } solana-rpc = { path = "rpc", version = "=1.18.0" } solana-rpc-client = { path = "rpc-client", version = "=1.18.0", default-features = false } solana-rpc-client-api = { path = "rpc-client-api", version = "=1.18.0" } solana-rpc-client-nonce-utils = { path = "rpc-client-nonce-utils", version = "=1.18.0" } solana-runtime = { path = "runtime", version = "=1.18.0" } +solana-runtime-transaction = { path = "runtime-transaction", version = "=1.18.0" } solana-sdk = { path = "sdk", version = "=1.18.0" } solana-sdk-macro = { path = "sdk/macro", version = "=1.18.0" } solana-send-transaction-service = { path = "send-transaction-service", version = "=1.18.0" } @@ -385,15 +391,16 @@ solana-zk-keygen = { path = "zk-keygen", version = "=1.18.0" } solana-zk-token-proof-program = { path = "programs/zk-token-proof", version = "=1.18.0" } solana-zk-token-sdk = { path = "zk-token-sdk", version = "=1.18.0" } solana_rbpf = "=0.8.0" -spl-associated-token-account = "=2.2.0" +spl-associated-token-account = "=2.3.0" spl-instruction-padding = "0.1" spl-memo = "=4.0.0" spl-pod = "=0.1.0" spl-token = "=4.0.0" -spl-token-2022 = "=0.9.0" +spl-token-2022 = "=1.0.0" +spl-token-group-interface = "=0.1.0" spl-token-metadata-interface = "=0.2.0" static_assertions = "1.1.0" -stream-cancel = "0.8.1" +stream-cancel = "0.8.2" strum = "0.24" strum_macros = "0.24" subtle = "2.4.1" @@ -404,22 +411,23 @@ sysctl = "0.4.6" systemstat = "0.2.3" tar = "0.4.40" tarpc = "0.29.0" -tempfile = "3.8.1" -test-case = "3.2.1" -thiserror = "1.0.50" +tempfile = "3.9.0" +test-case = "3.3.1" +thiserror = "1.0.56" tiny-bip39 = "0.8.2" +# Update solana-tokio patch below when updating this version tokio = "1.29.1" tokio-serde = "0.8" tokio-stream = "0.1.14" tokio-tungstenite = "0.20.1" tokio-util = "0.6" -toml = "0.8.0" +toml = "0.8.8" tonic = "0.9.2" tonic-build = "0.9.2" trees = "0.4.2" tungstenite = "0.20.1" uriparse = "0.6.4" -url = "2.4.1" +url = "2.5.0" wasm-bindgen = "0.2" winapi = "0.3.8" winreg = "0.50" @@ -442,16 +450,18 @@ crossbeam-epoch = { git = "https://github.com/solana-labs/crossbeam", rev = "fd2 # * spl-token-2022 # * spl-token-metadata-interface # -# They, in turn, depend on a number of crates that we also include directly using `path` -# specifications. For example, `spl-token` depends on `solana-program`. And we explicitly specify -# `solana-program` above as a local path dependency: +# They, in turn, depend on a number of crates that we also include directly +# using `path` specifications. For example, `spl-token` depends on +# `solana-program`. And we explicitly specify `solana-program` above as a local +# path dependency: # # solana-program = { path = "../../sdk/program", version = "=1.16.0" } # -# Unfortunately, Cargo will try to resolve the `spl-token` `solana-program` dependency only using -# what is available on crates.io. Crates.io normally contains a previous version of these crates, -# and we end up with two versions of `solana-program` and `solana-zk-token-sdk` and all of their -# dependencies in our build tree. +# Unfortunately, Cargo will try to resolve the `spl-token` `solana-program` +# dependency only using what is available on crates.io. Crates.io normally +# contains a previous version of these crates, and we end up with two versions +# of `solana-program` and `solana-zk-token-sdk` and all of their dependencies in +# our build tree. # # If you are developing downstream using non-crates-io solana-program (local or # forked repo, or from github rev, eg), duplicate the following patch statements @@ -460,8 +470,8 @@ crossbeam-epoch = { git = "https://github.com/solana-labs/crossbeam", rev = "fd2 # -p solana-zk-token-sdk` to remove extraneous versions from your Cargo.lock # file. # -# There is a similar override in `programs/sbf/Cargo.toml`. Please keep both comments and the -# overrides in sync. +# There is a similar override in `programs/sbf/Cargo.toml`. Please keep both +# comments and the overrides in sync. solana-program = { path = "sdk/program" } solana-zk-token-sdk = { path = "zk-token-sdk" } # @@ -481,9 +491,8 @@ solana-zk-token-sdk = { path = "zk-token-sdk" } # newer versions, but we have not updated yet. As we update, we need to remove # these patch requests. # -# When our dependencies are upgraded, we can remove this patches. Before that -# we might need to maintain these patches in sync with our full dependency -# tree. +# When our dependencies are upgraded, we can remove these patches. Before that +# we might need to maintain these patches in sync with our full dependency tree. # Our dependency tree has `aes-gcm-siv` v0.10.3 and the `zeroize` restriction # was removed in the next commit just after the release. So it seems safe to @@ -506,17 +515,17 @@ git = "https://github.com/RustCrypto/AEADs" rev = "6105d7a5591aefa646a95d12b5e8d3f55a9214ef" # Our dependency tree has `curve25519-dalek` v3.2.1. They have removed the -# constrain in the next major release. Commit that removes `zeroize` constrain -# was added to multiple release branches. Bot not to the 3.2 branch. +# constraint in the next major release. The commit that removes the `zeroize` +# constraint was added to multiple release branches, but not to the 3.2 branch. # # `curve25519-dalek` maintainers are saying they do not want to invest any more # time in the 3.2 release: # # https://github.com/dalek-cryptography/curve25519-dalek/issues/452#issuecomment-1749809428 # -# So we have to fork and create our own release, based on v3.2.1. Commit that -# removed `zeroize` constrain on the `main` branch cherry picked on top of the -# v3.2.1 release. +# So we have to fork and create our own release, based on v3.2.1, with the +# commit that removed `zeroize` constraint on the `main` branch cherry-picked on +# top. # # `curve25519-dalek` v3.2.1 release: # @@ -537,3 +546,21 @@ rev = "6105d7a5591aefa646a95d12b5e8d3f55a9214ef" [patch.crates-io.curve25519-dalek] git = "https://github.com/solana-labs/curve25519-dalek.git" rev = "b500cdc2a920cd5bff9e2dd974d7b97349d61464" + +# Solana RPC nodes experience stalls when running with `tokio` containing this +# commit: +# https://github.com/tokio-rs/tokio/commit/4eed411519783ef6f58cbf74f886f91142b5cfa6 +# +# Tokio maintainers believe performance degradation is due to application bugs: +# https://github.com/tokio-rs/tokio/issues/4873#issuecomment-1198277677 +# +# This may indeed be true of the code in this monorepo, but we haven't yet +# identified the bug or a way to fix. As a stopgap, this patches `tokio` to the +# tagged version specified above with commit `4eed411` reverted. +# +# Comparison: +# https://github.com/tokio-rs/tokio/compare/tokio-1.29.1...solana-labs:solana-tokio:tokio-1.29.1-revert-4eed411 +# +[patch.crates-io.tokio] +git = "https://github.com/solana-labs/solana-tokio.git" +rev = "7cf47705faacf7bf0e43e4131a5377b3291fce21" diff --git a/README.md b/README.md index 4fccacf2ba0672..c6183f6ab6183e 100644 --- a/README.md +++ b/README.md @@ -66,11 +66,13 @@ $ ./cargo test ``` ### Starting a local testnet -Start your own testnet locally, instructions are in the [online docs](https://docs.solana.com/cluster/bench-tps). + +Start your own testnet locally, instructions are in the [online docs](https://docs.solanalabs.com/clusters/benchmark). ### Accessing the remote development cluster + * `devnet` - stable public cluster for development accessible via -devnet.solana.com. Runs 24/7. Learn more about the [public clusters](https://docs.solana.com/clusters) +devnet.solana.com. Runs 24/7. Learn more about the [public clusters](https://docs.solanalabs.com/clusters) # Benchmarking diff --git a/RELEASE.md b/RELEASE.md index abb79a32a8ca8d..c5aa5d540b1191 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -98,6 +98,7 @@ Alternatively use the Github UI. ### Miscellaneous Clean up +1. Pin the spl-token-cli version in the newly promoted stable branch by setting `splTokenCliVersion` in scripts/spl-token-cli-version.sh to the latest release that depends on the stable branch (usually this will be the latest spl-token-cli release). 1. Update [mergify.yml](https://github.com/solana-labs/solana/blob/master/.mergify.yml) to add backport actions for the new branch and remove actions for the obsolete branch. 1. Adjust the [Github backport labels](https://github.com/solana-labs/solana/labels) to add the new branch label and remove the label for the obsolete branch. 1. Announce on Discord #development that the release branch exists so people know to use the new backport labels. diff --git a/SECURITY.md b/SECURITY.md index 02b37486a09197..a27ccbe1f2da4a 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -50,7 +50,7 @@ comment as such and then close the report. ### 2. Triage Within the draft security advisory, discuss and determine the severity of the issue. If necessary, members of the solana-labs/security-incident-response group may add other github users to the advisory to assist. -If it is determined that this not a critical network issue then the advisory should be closed and if more follow-up is required a normal Solana public github issue should be created. +If it is determined that this is not a critical network issue then the advisory should be closed and if more follow-up is required a normal Solana public github issue should be created. ### 3. Prepare Fixes For the affected branches, typically all three (edge, beta and stable), prepare a fix for the issue and push them to the corresponding branch in the private repository associated with the draft security advisory. diff --git a/account-decoder/Cargo.toml b/account-decoder/Cargo.toml index 3f883ddc23f9f5..7aee8478b4f126 100644 --- a/account-decoder/Cargo.toml +++ b/account-decoder/Cargo.toml @@ -23,6 +23,7 @@ solana-config-program = { workspace = true } solana-sdk = { workspace = true } spl-token = { workspace = true, features = ["no-entrypoint"] } spl-token-2022 = { workspace = true, features = ["no-entrypoint"] } +spl-token-group-interface = { workspace = true } spl-token-metadata-interface = { workspace = true } thiserror = { workspace = true } zstd = { workspace = true } diff --git a/account-decoder/src/parse_token_extension.rs b/account-decoder/src/parse_token_extension.rs index 39d26d83a20b99..a2fdef41b47407 100644 --- a/account-decoder/src/parse_token_extension.rs +++ b/account-decoder/src/parse_token_extension.rs @@ -6,6 +6,7 @@ use { solana_program::pubkey::Pubkey, solana_zk_token_sdk::zk_token_elgamal::pod::ElGamalPubkey, }, + spl_token_group_interface::state::{TokenGroup, TokenGroupMember}, spl_token_metadata_interface::state::TokenMetadata, }; @@ -32,6 +33,10 @@ pub enum UiExtension { TransferHookAccount(UiTransferHookAccount), MetadataPointer(UiMetadataPointer), TokenMetadata(UiTokenMetadata), + GroupPointer(UiGroupPointer), + GroupMemberPointer(UiGroupMemberPointer), + TokenGroup(UiTokenGroup), + TokenGroupMember(UiTokenGroupMember), UnparseableExtension, } @@ -108,6 +113,22 @@ pub fn parse_extension( .get_extension::() .map(|&extension| UiExtension::TransferHookAccount(extension.into())) .unwrap_or(UiExtension::UnparseableExtension), + ExtensionType::GroupPointer => account + .get_extension::() + .map(|&extension| UiExtension::GroupPointer(extension.into())) + .unwrap_or(UiExtension::UnparseableExtension), + ExtensionType::GroupMemberPointer => account + .get_extension::() + .map(|&extension| UiExtension::GroupMemberPointer(extension.into())) + .unwrap_or(UiExtension::UnparseableExtension), + ExtensionType::TokenGroup => account + .get_extension::() + .map(|&extension| UiExtension::TokenGroup(extension.into())) + .unwrap_or(UiExtension::UnparseableExtension), + ExtensionType::TokenGroupMember => account + .get_extension::() + .map(|&extension| UiExtension::TokenGroupMember(extension.into())) + .unwrap_or(UiExtension::UnparseableExtension), } } @@ -481,3 +502,78 @@ impl From for UiTransferHookAccou } } } + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct UiGroupPointer { + pub authority: Option, + pub group_address: Option, +} + +impl From for UiGroupPointer { + fn from(group_pointer: extension::group_pointer::GroupPointer) -> Self { + let authority: Option = group_pointer.authority.into(); + let group_address: Option = group_pointer.group_address.into(); + Self { + authority: authority.map(|pubkey| pubkey.to_string()), + group_address: group_address.map(|pubkey| pubkey.to_string()), + } + } +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct UiGroupMemberPointer { + pub authority: Option, + pub member_address: Option, +} + +impl From for UiGroupMemberPointer { + fn from(member_pointer: extension::group_member_pointer::GroupMemberPointer) -> Self { + let authority: Option = member_pointer.authority.into(); + let member_address: Option = member_pointer.member_address.into(); + Self { + authority: authority.map(|pubkey| pubkey.to_string()), + member_address: member_address.map(|pubkey| pubkey.to_string()), + } + } +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct UiTokenGroup { + pub update_authority: Option, + pub mint: String, + pub size: u32, + pub max_size: u32, +} + +impl From for UiTokenGroup { + fn from(token_group: TokenGroup) -> Self { + let update_authority: Option = token_group.update_authority.into(); + Self { + update_authority: update_authority.map(|pubkey| pubkey.to_string()), + mint: token_group.mint.to_string(), + size: token_group.size.into(), + max_size: token_group.max_size.into(), + } + } +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct UiTokenGroupMember { + pub mint: String, + pub group: String, + pub member_number: u32, +} + +impl From for UiTokenGroupMember { + fn from(member: TokenGroupMember) -> Self { + Self { + mint: member.mint.to_string(), + group: member.group.to_string(), + member_number: member.member_number.into(), + } + } +} diff --git a/accounts-bench/Cargo.toml b/accounts-bench/Cargo.toml index ebc3ac5beda821..261c5e7b2fc84f 100644 --- a/accounts-bench/Cargo.toml +++ b/accounts-bench/Cargo.toml @@ -12,7 +12,7 @@ edition = { workspace = true } clap = { workspace = true } log = { workspace = true } rayon = { workspace = true } -solana-accounts-db = { workspace = true } +solana-accounts-db = { workspace = true, features = ["dev-context-only-utils"] } solana-logger = { workspace = true } solana-measure = { workspace = true } solana-sdk = { workspace = true } @@ -20,3 +20,6 @@ solana-version = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] + +[features] +dev-context-only-utils = [] diff --git a/accounts-bench/src/main.rs b/accounts-bench/src/main.rs index 1857314a92bcd5..88d15ea72482aa 100644 --- a/accounts-bench/src/main.rs +++ b/accounts-bench/src/main.rs @@ -9,7 +9,8 @@ use { accounts::Accounts, accounts_db::{ test_utils::{create_test_accounts, update_accounts_bench}, - AccountShrinkThreshold, CalcAccountsHashDataSource, + AccountShrinkThreshold, AccountsDb, CalcAccountsHashDataSource, + ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, }, accounts_index::AccountSecondaryIndexes, ancestors::Ancestors, @@ -19,7 +20,7 @@ use { solana_sdk::{ genesis_config::ClusterType, pubkey::Pubkey, sysvar::epoch_schedule::EpochSchedule, }, - std::{env, fs, path::PathBuf}, + std::{env, fs, path::PathBuf, sync::Arc}, }; fn main() { @@ -69,12 +70,16 @@ fn main() { if fs::remove_dir_all(path.clone()).is_err() { println!("Warning: Couldn't remove {path:?}"); } - let accounts = Accounts::new_with_config_for_benches( + let accounts_db = AccountsDb::new_with_config( vec![path], &ClusterType::Testnet, AccountSecondaryIndexes::default(), AccountShrinkThreshold::default(), + Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS), + None, + Arc::default(), ); + let accounts = Accounts::new(Arc::new(accounts_db)); println!("Creating {num_accounts} accounts"); let mut create_time = Measure::start("create accounts"); let pubkeys: Vec<_> = (0..num_slots) diff --git a/accounts-cluster-bench/Cargo.toml b/accounts-cluster-bench/Cargo.toml index 8807020d2f17a7..54a455753831fd 100644 --- a/accounts-cluster-bench/Cargo.toml +++ b/accounts-cluster-bench/Cargo.toml @@ -34,6 +34,7 @@ spl-token = { workspace = true, features = ["no-entrypoint"] } [dev-dependencies] solana-core = { workspace = true } solana-local-cluster = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-test-validator = { workspace = true } [package.metadata.docs.rs] diff --git a/accounts-db/src/account_info.rs b/accounts-db/src/account_info.rs index a261228236ec8a..67c02282fa1702 100644 --- a/accounts-db/src/account_info.rs +++ b/accounts-db/src/account_info.rs @@ -76,7 +76,7 @@ const CACHED_OFFSET: OffsetReduced = (1 << (OffsetReduced::BITS - 1)) - 1; #[repr(C)] #[derive(Debug, Default, Copy, Clone, Eq, PartialEq)] pub struct PackedOffsetAndFlags { - /// this provides 2^31 bits, which when multipled by 8 (sizeof(u64)) = 16G, which is the maximum size of an append vec + /// this provides 2^31 bits, which when multiplied by 8 (sizeof(u64)) = 16G, which is the maximum size of an append vec offset_reduced: B31, /// use 1 bit to specify that the entry is zero lamport is_zero_lamport: bool, diff --git a/accounts-db/src/account_storage.rs b/accounts-db/src/account_storage.rs index 7178d62dd6c1c5..e7a33b711d23ca 100644 --- a/accounts-db/src/account_storage.rs +++ b/accounts-db/src/account_storage.rs @@ -75,7 +75,10 @@ impl AccountStorage { /// return the append vec for 'slot' if it exists /// This is only ever called when shrink is not possibly running and there is a max of 1 append vec per slot. pub fn get_slot_storage_entry(&self, slot: Slot) -> Option> { - assert!(self.no_shrink_in_progress()); + assert!( + self.no_shrink_in_progress(), + "self.no_shrink_in_progress(): {slot}" + ); self.get_slot_storage_entry_shrinking_in_progress_ok(slot) } @@ -95,7 +98,10 @@ impl AccountStorage { /// returns true if there is no entry for 'slot' #[cfg(test)] pub(crate) fn is_empty_entry(&self, slot: Slot) -> bool { - assert!(self.no_shrink_in_progress()); + assert!( + self.no_shrink_in_progress(), + "self.no_shrink_in_progress(): {slot}" + ); self.map.get(&slot).is_none() } @@ -124,7 +130,10 @@ impl AccountStorage { } pub(crate) fn insert(&self, slot: Slot, store: Arc) { - assert!(self.no_shrink_in_progress()); + assert!( + self.no_shrink_in_progress(), + "self.no_shrink_in_progress(): {slot}" + ); assert!(self .map .insert( diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index b11763b1dd5048..8eb0702967790e 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -1,70 +1,43 @@ use { crate::{ - account_overrides::AccountOverrides, - account_rent_state::{check_rent_state_with_account, RentState}, accounts_db::{ - AccountShrinkThreshold, AccountsAddRootTiming, AccountsDb, AccountsDbConfig, LoadHint, - LoadedAccount, ScanStorageResult, VerifyAccountsHashAndLamportsConfig, - ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, ACCOUNTS_DB_CONFIG_FOR_TESTING, + AccountsAddRootTiming, AccountsDb, LoadHint, LoadedAccount, ScanStorageResult, + VerifyAccountsHashAndLamportsConfig, }, - accounts_index::{ - AccountSecondaryIndexes, IndexKey, ScanConfig, ScanError, ScanResult, ZeroLamport, - }, - accounts_update_notifier_interface::AccountsUpdateNotifier, + accounts_index::{IndexKey, ScanConfig, ScanError, ScanResult, ZeroLamport}, ancestors::Ancestors, - blockhash_queue::BlockhashQueue, nonce_info::{NonceFull, NonceInfo}, rent_collector::RentCollector, rent_debits::RentDebits, storable_accounts::StorableAccounts, - transaction_error_metrics::TransactionErrorMetrics, - transaction_results::{TransactionCheckResult, TransactionExecutionResult}, + transaction_results::TransactionExecutionResult, }, dashmap::DashMap, - itertools::Itertools, log::*, - solana_program_runtime::{ - compute_budget_processor::process_compute_budget_instructions, - loaded_programs::LoadedProgramsForTxBatch, - }, solana_sdk::{ - account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, + account::{AccountSharedData, ReadableAccount}, account_utils::StateMut, address_lookup_table::{self, error::AddressLookupError, state::AddressLookupTable}, - bpf_loader_upgradeable::{self, UpgradeableLoaderState}, clock::{BankId, Slot}, - feature_set::{ - self, include_loaded_accounts_data_size_in_fee_calculation, - remove_congestion_multiplier_from_fee_calculation, - simplify_writable_program_account_check, FeatureSet, - }, - fee::FeeStructure, - genesis_config::ClusterType, - message::{ - v0::{LoadedAddresses, MessageAddressTableLookup}, - SanitizedMessage, - }, - native_loader, + message::v0::{LoadedAddresses, MessageAddressTableLookup}, nonce::{ state::{DurableNonce, Versions as NonceVersions}, State as NonceState, }, pubkey::Pubkey, - saturating_add_assign, slot_hashes::SlotHashes, - sysvar::{self, instructions::construct_instructions_data}, transaction::{Result, SanitizedTransaction, TransactionAccountLocks, TransactionError}, transaction_context::{IndexOfAccount, TransactionAccount}, }, - solana_system_program::{get_system_account_kind, SystemAccountKind}, std::{ cmp::Reverse, - collections::{hash_map, BinaryHeap, HashMap, HashSet}, - num::NonZeroUsize, + collections::{ + hash_map::{self}, + BinaryHeap, HashMap, HashSet, + }, ops::RangeBounds, - path::PathBuf, sync::{ - atomic::{AtomicBool, AtomicUsize, Ordering}, + atomic::{AtomicUsize, Ordering}, Arc, Mutex, }, }, @@ -78,14 +51,6 @@ pub struct AccountLocks { readonly_locks: HashMap, } -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub enum RewardInterval { - /// the slot within the epoch is INSIDE the reward distribution interval - InsideInterval, - /// the slot within the epoch is OUTSIDE the reward distribution interval - OutsideInterval, -} - impl AccountLocks { fn is_locked_readonly(&self, key: &Pubkey) -> bool { self.readonly_locks @@ -153,68 +118,6 @@ pub enum AccountAddressFilter { } impl Accounts { - pub fn default_for_tests() -> Self { - Self::new_empty(AccountsDb::default_for_tests()) - } - - pub fn new_with_config_for_tests( - paths: Vec, - cluster_type: &ClusterType, - account_indexes: AccountSecondaryIndexes, - shrink_ratio: AccountShrinkThreshold, - ) -> Self { - Self::new_with_config( - paths, - cluster_type, - account_indexes, - shrink_ratio, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), - None, - Arc::default(), - ) - } - - pub fn new_with_config_for_benches( - paths: Vec, - cluster_type: &ClusterType, - account_indexes: AccountSecondaryIndexes, - shrink_ratio: AccountShrinkThreshold, - ) -> Self { - Self::new_with_config( - paths, - cluster_type, - account_indexes, - shrink_ratio, - Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS), - None, - Arc::default(), - ) - } - - pub fn new_with_config( - paths: Vec, - cluster_type: &ClusterType, - account_indexes: AccountSecondaryIndexes, - shrink_ratio: AccountShrinkThreshold, - accounts_db_config: Option, - accounts_update_notifier: Option, - exit: Arc, - ) -> Self { - Self::new_empty(AccountsDb::new_with_config( - paths, - cluster_type, - account_indexes, - shrink_ratio, - accounts_db_config, - accounts_update_notifier, - exit, - )) - } - - pub fn new_empty(accounts_db: AccountsDb) -> Self { - Self::new(Arc::new(accounts_db)) - } - pub fn new(accounts_db: Arc) -> Self { Self { accounts_db, @@ -222,495 +125,6 @@ impl Accounts { } } - fn construct_instructions_account(message: &SanitizedMessage) -> AccountSharedData { - AccountSharedData::from(Account { - data: construct_instructions_data(&message.decompile_instructions()), - owner: sysvar::id(), - ..Account::default() - }) - } - - /// If feature `cap_transaction_accounts_data_size` is active, total accounts data a - /// transaction can load is limited to - /// if `set_tx_loaded_accounts_data_size` instruction is not activated or not used, then - /// default value of 64MiB to not break anyone in Mainnet-beta today - /// else - /// user requested loaded accounts size. - /// Note, requesting zero bytes will result transaction error - fn get_requested_loaded_accounts_data_size_limit( - tx: &SanitizedTransaction, - feature_set: &FeatureSet, - ) -> Result> { - if feature_set.is_active(&feature_set::cap_transaction_accounts_data_size::id()) { - let compute_budget_limits = process_compute_budget_instructions( - tx.message().program_instructions_iter(), - feature_set, - ) - .unwrap_or_default(); - // sanitize against setting size limit to zero - NonZeroUsize::new( - usize::try_from(compute_budget_limits.loaded_accounts_bytes).unwrap_or_default(), - ) - .map_or( - Err(TransactionError::InvalidLoadedAccountsDataSizeLimit), - |v| Ok(Some(v)), - ) - } else { - // feature not activated, no loaded accounts data limit imposed. - Ok(None) - } - } - - /// Accumulate loaded account data size into `accumulated_accounts_data_size`. - /// Returns TransactionErr::MaxLoadedAccountsDataSizeExceeded if - /// `requested_loaded_accounts_data_size_limit` is specified and - /// `accumulated_accounts_data_size` exceeds it. - fn accumulate_and_check_loaded_account_data_size( - accumulated_loaded_accounts_data_size: &mut usize, - account_data_size: usize, - requested_loaded_accounts_data_size_limit: Option, - error_counters: &mut TransactionErrorMetrics, - ) -> Result<()> { - if let Some(requested_loaded_accounts_data_size) = requested_loaded_accounts_data_size_limit - { - saturating_add_assign!(*accumulated_loaded_accounts_data_size, account_data_size); - if *accumulated_loaded_accounts_data_size > requested_loaded_accounts_data_size.get() { - error_counters.max_loaded_accounts_data_size_exceeded += 1; - Err(TransactionError::MaxLoadedAccountsDataSizeExceeded) - } else { - Ok(()) - } - } else { - Ok(()) - } - } - - fn account_shared_data_from_program( - key: &Pubkey, - program_accounts: &HashMap, - ) -> Result { - // It's an executable program account. The program is already loaded in the cache. - // So the account data is not needed. Return a dummy AccountSharedData with meta - // information. - let mut program_account = AccountSharedData::default(); - let (program_owner, _count) = program_accounts - .get(key) - .ok_or(TransactionError::AccountNotFound)?; - program_account.set_owner(**program_owner); - program_account.set_executable(true); - Ok(program_account) - } - - #[allow(clippy::too_many_arguments)] - fn load_transaction_accounts( - &self, - ancestors: &Ancestors, - tx: &SanitizedTransaction, - fee: u64, - error_counters: &mut TransactionErrorMetrics, - rent_collector: &RentCollector, - feature_set: &FeatureSet, - account_overrides: Option<&AccountOverrides>, - reward_interval: RewardInterval, - program_accounts: &HashMap, - loaded_programs: &LoadedProgramsForTxBatch, - ) -> Result { - let in_reward_interval = reward_interval == RewardInterval::InsideInterval; - - // NOTE: this check will never fail because `tx` is sanitized - if tx.signatures().is_empty() && fee != 0 { - return Err(TransactionError::MissingSignatureForFee); - } - - // There is no way to predict what program will execute without an error - // If a fee can pay for execution then the program will be scheduled - let mut validated_fee_payer = false; - let mut tx_rent: TransactionRent = 0; - let message = tx.message(); - let account_keys = message.account_keys(); - let mut accounts_found = Vec::with_capacity(account_keys.len()); - let mut account_deps = Vec::with_capacity(account_keys.len()); - let mut rent_debits = RentDebits::default(); - - let set_exempt_rent_epoch_max = - feature_set.is_active(&solana_sdk::feature_set::set_exempt_rent_epoch_max::id()); - - let requested_loaded_accounts_data_size_limit = - Self::get_requested_loaded_accounts_data_size_limit(tx, feature_set)?; - let mut accumulated_accounts_data_size: usize = 0; - - let instruction_accounts = message - .instructions() - .iter() - .flat_map(|instruction| &instruction.accounts) - .unique() - .collect::>(); - - let mut accounts = account_keys - .iter() - .enumerate() - .map(|(i, key)| { - let mut account_found = true; - #[allow(clippy::collapsible_else_if)] - let account = if solana_sdk::sysvar::instructions::check_id(key) { - Self::construct_instructions_account(message) - } else { - let instruction_account = u8::try_from(i) - .map(|i| instruction_accounts.contains(&&i)) - .unwrap_or(false); - let (account_size, mut account, rent) = if let Some(account_override) = - account_overrides.and_then(|overrides| overrides.get(key)) - { - (account_override.data().len(), account_override.clone(), 0) - } else if let Some(program) = (feature_set - .is_active(&simplify_writable_program_account_check::id()) - && !instruction_account - && !message.is_writable(i)) - .then_some(()) - .and_then(|_| loaded_programs.find(key)) - { - // This condition block does special handling for accounts that are passed - // as instruction account to any of the instructions in the transaction. - // It's been noticed that some programs are reading other program accounts - // (that are passed to the program as instruction accounts). So such accounts - // are needed to be loaded even though corresponding compiled program may - // already be present in the cache. - Self::account_shared_data_from_program(key, program_accounts) - .map(|program_account| (program.account_size, program_account, 0))? - } else { - self.accounts_db - .load_with_fixed_root(ancestors, key) - .map(|(mut account, _)| { - if message.is_writable(i) { - let rent_due = rent_collector - .collect_from_existing_account( - key, - &mut account, - self.accounts_db.filler_account_suffix.as_ref(), - set_exempt_rent_epoch_max, - ) - .rent_amount; - (account.data().len(), account, rent_due) - } else { - (account.data().len(), account, 0) - } - }) - .unwrap_or_else(|| { - account_found = false; - let mut default_account = AccountSharedData::default(); - if set_exempt_rent_epoch_max { - // All new accounts must be rent-exempt (enforced in Bank::execute_loaded_transaction). - // Currently, rent collection sets rent_epoch to u64::MAX, but initializing the account - // with this field already set would allow us to skip rent collection for these accounts. - default_account.set_rent_epoch(u64::MAX); - } - (default_account.data().len(), default_account, 0) - }) - }; - Self::accumulate_and_check_loaded_account_data_size( - &mut accumulated_accounts_data_size, - account_size, - requested_loaded_accounts_data_size_limit, - error_counters, - )?; - - if !validated_fee_payer && message.is_non_loader_key(i) { - if i != 0 { - warn!("Payer index should be 0! {:?}", tx); - } - - Self::validate_fee_payer( - key, - &mut account, - i as IndexOfAccount, - error_counters, - rent_collector, - feature_set, - fee, - )?; - - validated_fee_payer = true; - } - - if !feature_set.is_active(&simplify_writable_program_account_check::id()) { - if bpf_loader_upgradeable::check_id(account.owner()) { - if message.is_writable(i) && !message.is_upgradeable_loader_present() { - error_counters.invalid_writable_account += 1; - return Err(TransactionError::InvalidWritableAccount); - } - - if account.executable() { - // The upgradeable loader requires the derived ProgramData account - if let Ok(UpgradeableLoaderState::Program { - programdata_address, - }) = account.state() - { - if self - .accounts_db - .load_with_fixed_root(ancestors, &programdata_address) - .is_none() - { - error_counters.account_not_found += 1; - return Err(TransactionError::ProgramAccountNotFound); - } - } else { - error_counters.invalid_program_for_execution += 1; - return Err(TransactionError::InvalidProgramForExecution); - } - } - } else if account.executable() && message.is_writable(i) { - error_counters.invalid_writable_account += 1; - return Err(TransactionError::InvalidWritableAccount); - } - } - - if in_reward_interval - && message.is_writable(i) - && solana_stake_program::check_id(account.owner()) - { - error_counters.program_execution_temporarily_restricted += 1; - return Err(TransactionError::ProgramExecutionTemporarilyRestricted { - account_index: i as u8, - }); - } - - tx_rent += rent; - rent_debits.insert(key, rent, account.lamports()); - - account - }; - - accounts_found.push(account_found); - Ok((*key, account)) - }) - .collect::>>()?; - - if !validated_fee_payer { - error_counters.account_not_found += 1; - return Err(TransactionError::AccountNotFound); - } - - // Appends the account_deps at the end of the accounts, - // this way they can be accessed in a uniform way. - // At places where only the accounts are needed, - // the account_deps are truncated using e.g: - // accounts.iter().take(message.account_keys.len()) - accounts.append(&mut account_deps); - - let disable_builtin_loader_ownership_chains = - feature_set.is_active(&feature_set::disable_builtin_loader_ownership_chains::ID); - let builtins_start_index = accounts.len(); - let program_indices = message - .instructions() - .iter() - .map(|instruction| { - let mut account_indices = Vec::new(); - let mut program_index = instruction.program_id_index as usize; - for _ in 0..5 { - let (program_id, program_account) = accounts - .get(program_index) - .ok_or(TransactionError::ProgramAccountNotFound)?; - let account_found = accounts_found.get(program_index).unwrap_or(&true); - if native_loader::check_id(program_id) { - return Ok(account_indices); - } - if !account_found { - error_counters.account_not_found += 1; - return Err(TransactionError::ProgramAccountNotFound); - } - if !program_account.executable() { - error_counters.invalid_program_for_execution += 1; - return Err(TransactionError::InvalidProgramForExecution); - } - account_indices.insert(0, program_index as IndexOfAccount); - let owner_id = program_account.owner(); - if native_loader::check_id(owner_id) { - return Ok(account_indices); - } - program_index = if let Some(owner_index) = accounts - .get(builtins_start_index..) - .ok_or(TransactionError::ProgramAccountNotFound)? - .iter() - .position(|(key, _)| key == owner_id) - { - builtins_start_index.saturating_add(owner_index) - } else { - let owner_index = accounts.len(); - if let Some((owner_account, _)) = - self.accounts_db.load_with_fixed_root(ancestors, owner_id) - { - if disable_builtin_loader_ownership_chains - && !native_loader::check_id(owner_account.owner()) - || !owner_account.executable() - { - error_counters.invalid_program_for_execution += 1; - return Err(TransactionError::InvalidProgramForExecution); - } - Self::accumulate_and_check_loaded_account_data_size( - &mut accumulated_accounts_data_size, - owner_account.data().len(), - requested_loaded_accounts_data_size_limit, - error_counters, - )?; - accounts.push((*owner_id, owner_account)); - } else { - error_counters.account_not_found += 1; - return Err(TransactionError::ProgramAccountNotFound); - } - owner_index - }; - if disable_builtin_loader_ownership_chains { - account_indices.insert(0, program_index as IndexOfAccount); - return Ok(account_indices); - } - } - error_counters.call_chain_too_deep += 1; - Err(TransactionError::CallChainTooDeep) - }) - .collect::>>>()?; - - Ok(LoadedTransaction { - accounts, - program_indices, - rent: tx_rent, - rent_debits, - }) - } - - fn validate_fee_payer( - payer_address: &Pubkey, - payer_account: &mut AccountSharedData, - payer_index: IndexOfAccount, - error_counters: &mut TransactionErrorMetrics, - rent_collector: &RentCollector, - feature_set: &FeatureSet, - fee: u64, - ) -> Result<()> { - if payer_account.lamports() == 0 { - error_counters.account_not_found += 1; - return Err(TransactionError::AccountNotFound); - } - let min_balance = match get_system_account_kind(payer_account).ok_or_else(|| { - error_counters.invalid_account_for_fee += 1; - TransactionError::InvalidAccountForFee - })? { - SystemAccountKind::System => 0, - SystemAccountKind::Nonce => { - // Should we ever allow a fees charge to zero a nonce account's - // balance. The state MUST be set to uninitialized in that case - rent_collector.rent.minimum_balance(NonceState::size()) - } - }; - - // allow collapsible-else-if to make removing the feature gate safer once activated - #[allow(clippy::collapsible_else_if)] - if feature_set.is_active(&feature_set::checked_arithmetic_in_fee_validation::id()) { - payer_account - .lamports() - .checked_sub(min_balance) - .and_then(|v| v.checked_sub(fee)) - .ok_or_else(|| { - error_counters.insufficient_funds += 1; - TransactionError::InsufficientFundsForFee - })?; - } else { - if payer_account.lamports() < fee + min_balance { - error_counters.insufficient_funds += 1; - return Err(TransactionError::InsufficientFundsForFee); - } - } - - let payer_pre_rent_state = RentState::from_account(payer_account, &rent_collector.rent); - payer_account - .checked_sub_lamports(fee) - .map_err(|_| TransactionError::InsufficientFundsForFee)?; - - let payer_post_rent_state = RentState::from_account(payer_account, &rent_collector.rent); - check_rent_state_with_account( - &payer_pre_rent_state, - &payer_post_rent_state, - payer_address, - payer_account, - payer_index, - ) - } - - #[allow(clippy::too_many_arguments)] - pub fn load_accounts( - &self, - ancestors: &Ancestors, - txs: &[SanitizedTransaction], - lock_results: Vec, - hash_queue: &BlockhashQueue, - error_counters: &mut TransactionErrorMetrics, - rent_collector: &RentCollector, - feature_set: &FeatureSet, - fee_structure: &FeeStructure, - account_overrides: Option<&AccountOverrides>, - in_reward_interval: RewardInterval, - program_accounts: &HashMap, - loaded_programs: &LoadedProgramsForTxBatch, - ) -> Vec { - txs.iter() - .zip(lock_results) - .map(|etx| match etx { - (tx, (Ok(()), nonce)) => { - let lamports_per_signature = nonce - .as_ref() - .map(|nonce| nonce.lamports_per_signature()) - .unwrap_or_else(|| { - hash_queue.get_lamports_per_signature(tx.message().recent_blockhash()) - }); - let fee = if let Some(lamports_per_signature) = lamports_per_signature { - fee_structure.calculate_fee( - tx.message(), - lamports_per_signature, - &process_compute_budget_instructions(tx.message().program_instructions_iter(), feature_set).unwrap_or_default().into(), - feature_set.is_active(&remove_congestion_multiplier_from_fee_calculation::id()), - feature_set.is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()), - ) - } else { - return (Err(TransactionError::BlockhashNotFound), None); - }; - - let loaded_transaction = match self.load_transaction_accounts( - ancestors, - tx, - fee, - error_counters, - rent_collector, - feature_set, - account_overrides, - in_reward_interval, - program_accounts, - loaded_programs, - ) { - Ok(loaded_transaction) => loaded_transaction, - Err(e) => return (Err(e), None), - }; - - // Update nonce with fee-subtracted accounts - let nonce = if let Some(nonce) = nonce { - match NonceFull::from_partial( - nonce, - tx.message(), - &loaded_transaction.accounts, - &loaded_transaction.rent_debits, - ) { - Ok(nonce) => Some(nonce), - Err(e) => return (Err(e), None), - } - } else { - None - }; - - (Ok(loaded_transaction), nonce) - } - (_, (Err(e), _nonce)) => (Err(e), None), - }) - .collect() - } - pub fn load_lookup_table_addresses( &self, ancestors: &Ancestors, @@ -1414,28 +828,20 @@ mod tests { transaction_results::{DurableNonceFee, TransactionExecutionDetails}, }, assert_matches::assert_matches, - solana_program_runtime::{ - compute_budget_processor, - prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, - }, + solana_program_runtime::loaded_programs::LoadedProgramsForTxBatch, solana_sdk::{ account::{AccountSharedData, WritableAccount}, address_lookup_table::state::LookupTableMeta, - compute_budget::ComputeBudgetInstruction, - epoch_schedule::EpochSchedule, - genesis_config::ClusterType, hash::Hash, instruction::{CompiledInstruction, InstructionError}, message::{Message, MessageHeader}, - nonce, nonce_account, - rent::Rent, + native_loader, nonce, nonce_account, signature::{keypair_from_seed, signers::Signers, Keypair, Signer}, system_instruction, system_program, transaction::{Transaction, MAX_TX_ACCOUNT_LOCKS}, }, std::{ borrow::Cow, - convert::TryFrom, sync::atomic::{AtomicBool, AtomicU64, Ordering}, thread, time, }, @@ -1468,97 +874,13 @@ mod tests { accounts_data_len_delta: 0, }, programs_modified_by_tx: Box::::default(), - programs_updated_only_for_global_cache: Box::::default(), - } - } - - fn load_accounts_with_fee_and_rent( - tx: Transaction, - ka: &[TransactionAccount], - lamports_per_signature: u64, - rent_collector: &RentCollector, - error_counters: &mut TransactionErrorMetrics, - feature_set: &FeatureSet, - fee_structure: &FeeStructure, - ) -> Vec { - let mut hash_queue = BlockhashQueue::new(100); - hash_queue.register_hash(&tx.message().recent_blockhash, lamports_per_signature); - let accounts = Accounts::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); - for ka in ka.iter() { - accounts.store_for_tests(0, &ka.0, &ka.1); - } - - let ancestors = vec![(0, 0)].into_iter().collect(); - let sanitized_tx = SanitizedTransaction::from_transaction_for_tests(tx); - accounts.load_accounts( - &ancestors, - &[sanitized_tx], - vec![(Ok(()), None)], - &hash_queue, - error_counters, - rent_collector, - feature_set, - fee_structure, - None, - RewardInterval::OutsideInterval, - &HashMap::new(), - &LoadedProgramsForTxBatch::default(), - ) - } - - /// get a feature set with all features activated - /// with the optional except of 'exclude' - fn all_features_except(exclude: Option<&[Pubkey]>) -> FeatureSet { - let mut features = FeatureSet::all_enabled(); - if let Some(exclude) = exclude { - features.active.retain(|k, _v| !exclude.contains(k)); } - features - } - - fn load_accounts_with_fee( - tx: Transaction, - ka: &[TransactionAccount], - lamports_per_signature: u64, - error_counters: &mut TransactionErrorMetrics, - exclude_features: Option<&[Pubkey]>, - ) -> Vec { - load_accounts_with_fee_and_rent( - tx, - ka, - lamports_per_signature, - &RentCollector::default(), - error_counters, - &all_features_except(exclude_features), - &FeeStructure::default(), - ) - } - - fn load_accounts( - tx: Transaction, - ka: &[TransactionAccount], - error_counters: &mut TransactionErrorMetrics, - ) -> Vec { - load_accounts_with_fee(tx, ka, 0, error_counters, None) - } - - fn load_accounts_with_excluded_features( - tx: Transaction, - ka: &[TransactionAccount], - error_counters: &mut TransactionErrorMetrics, - exclude_features: Option<&[Pubkey]>, - ) -> Vec { - load_accounts_with_fee(tx, ka, 0, error_counters, exclude_features) } #[test] fn test_hold_range_in_memory() { - let accts = Accounts::default_for_tests(); + let accounts_db = AccountsDb::default_for_tests(); + let accts = Accounts::new(Arc::new(accounts_db)); let range = Pubkey::from([0; 32])..=Pubkey::from([0xff; 32]); accts.hold_range_in_memory(&range, true, &test_thread_pool()); accts.hold_range_in_memory(&range, false, &test_thread_pool()); @@ -1570,7 +892,8 @@ mod tests { #[test] fn test_hold_range_in_memory2() { - let accts = Accounts::default_for_tests(); + let accounts_db = AccountsDb::default_for_tests(); + let accts = Accounts::new(Arc::new(accounts_db)); let range = Pubkey::from([0; 32])..=Pubkey::from([0xff; 32]); let idx = &accts.accounts_db.accounts_index; let bins = idx.account_maps.len(); @@ -1604,423 +927,17 @@ mod tests { }); accts.hold_range_in_memory(&range, false, &test_thread_pool()); accts.hold_range_in_memory(&range2, false, &test_thread_pool()); - } - - fn test_thread_pool() -> rayon::ThreadPool { - crate::accounts_db::make_min_priority_thread_pool() - } - - #[test] - fn test_load_accounts_no_account_0_exists() { - let accounts: Vec = Vec::new(); - let mut error_counters = TransactionErrorMetrics::default(); - - let keypair = Keypair::new(); - - let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; - let tx = Transaction::new_with_compiled_instructions( - &[&keypair], - &[], - Hash::default(), - vec![native_loader::id()], - instructions, - ); - - let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters); - - assert_eq!(error_counters.account_not_found, 1); - assert_eq!(loaded_accounts.len(), 1); - assert_eq!( - loaded_accounts[0], - (Err(TransactionError::AccountNotFound), None,), - ); - } - - #[test] - fn test_load_accounts_unknown_program_id() { - let mut accounts: Vec = Vec::new(); - let mut error_counters = TransactionErrorMetrics::default(); - - let keypair = Keypair::new(); - let key0 = keypair.pubkey(); - let key1 = Pubkey::from([5u8; 32]); - - let account = AccountSharedData::new(1, 0, &Pubkey::default()); - accounts.push((key0, account)); - - let account = AccountSharedData::new(2, 1, &Pubkey::default()); - accounts.push((key1, account)); - - let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; - let tx = Transaction::new_with_compiled_instructions( - &[&keypair], - &[], - Hash::default(), - vec![Pubkey::default()], - instructions, - ); - - let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters); - - assert_eq!(error_counters.account_not_found, 1); - assert_eq!(loaded_accounts.len(), 1); - assert_eq!( - loaded_accounts[0], - (Err(TransactionError::ProgramAccountNotFound), None,) - ); - } - - #[test] - fn test_load_accounts_insufficient_funds() { - let lamports_per_signature = 5000; - let mut accounts: Vec = Vec::new(); - let mut error_counters = TransactionErrorMetrics::default(); - - let keypair = Keypair::new(); - let key0 = keypair.pubkey(); - - let account = AccountSharedData::new(1, 0, &Pubkey::default()); - accounts.push((key0, account)); - - let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; - let tx = Transaction::new_with_compiled_instructions( - &[&keypair], - &[], - Hash::default(), - vec![native_loader::id()], - instructions, - ); - - let mut feature_set = FeatureSet::all_enabled(); - feature_set.deactivate(&solana_sdk::feature_set::remove_deprecated_request_unit_ix::id()); - - let message = SanitizedMessage::try_from(tx.message().clone()).unwrap(); - let fee = FeeStructure::default().calculate_fee( - &message, - lamports_per_signature, - &process_compute_budget_instructions(message.program_instructions_iter(), &feature_set) - .unwrap_or_default() - .into(), - true, - false, - ); - assert_eq!(fee, lamports_per_signature); - - let loaded_accounts = load_accounts_with_fee( - tx, - &accounts, - lamports_per_signature, - &mut error_counters, - None, - ); - - assert_eq!(error_counters.insufficient_funds, 1); - assert_eq!(loaded_accounts.len(), 1); - assert_eq!( - loaded_accounts[0].clone(), - (Err(TransactionError::InsufficientFundsForFee), None,), - ); - } - - #[test] - fn test_load_accounts_invalid_account_for_fee() { - let mut accounts: Vec = Vec::new(); - let mut error_counters = TransactionErrorMetrics::default(); - - let keypair = Keypair::new(); - let key0 = keypair.pubkey(); - - let account = AccountSharedData::new(1, 1, &solana_sdk::pubkey::new_rand()); // <-- owner is not the system program - accounts.push((key0, account)); - - let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; - let tx = Transaction::new_with_compiled_instructions( - &[&keypair], - &[], - Hash::default(), - vec![native_loader::id()], - instructions, - ); - - let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters); - - assert_eq!(error_counters.invalid_account_for_fee, 1); - assert_eq!(loaded_accounts.len(), 1); - assert_eq!( - loaded_accounts[0], - (Err(TransactionError::InvalidAccountForFee), None,), - ); - } - - #[test] - fn test_load_accounts_fee_payer_is_nonce() { - let lamports_per_signature = 5000; - let mut error_counters = TransactionErrorMetrics::default(); - let rent_collector = RentCollector::new( - 0, - EpochSchedule::default(), - 500_000.0, - Rent { - lamports_per_byte_year: 42, - ..Rent::default() - }, - ); - let min_balance = rent_collector.rent.minimum_balance(NonceState::size()); - let nonce = Keypair::new(); - let mut accounts = vec![( - nonce.pubkey(), - AccountSharedData::new_data( - min_balance + lamports_per_signature, - &NonceVersions::new(NonceState::Initialized(nonce::state::Data::default())), - &system_program::id(), - ) - .unwrap(), - )]; - let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; - let tx = Transaction::new_with_compiled_instructions( - &[&nonce], - &[], - Hash::default(), - vec![native_loader::id()], - instructions, - ); - - // Fee leaves min_balance balance succeeds - let loaded_accounts = load_accounts_with_fee_and_rent( - tx.clone(), - &accounts, - lamports_per_signature, - &rent_collector, - &mut error_counters, - &all_features_except(None), - &FeeStructure::default(), - ); - assert_eq!(loaded_accounts.len(), 1); - let (load_res, _nonce) = &loaded_accounts[0]; - let loaded_transaction = load_res.as_ref().unwrap(); - assert_eq!(loaded_transaction.accounts[0].1.lamports(), min_balance); - - // Fee leaves zero balance fails - accounts[0].1.set_lamports(lamports_per_signature); - let loaded_accounts = load_accounts_with_fee_and_rent( - tx.clone(), - &accounts, - lamports_per_signature, - &rent_collector, - &mut error_counters, - &FeatureSet::all_enabled(), - &FeeStructure::default(), - ); - assert_eq!(loaded_accounts.len(), 1); - let (load_res, _nonce) = &loaded_accounts[0]; - assert_eq!(*load_res, Err(TransactionError::InsufficientFundsForFee)); - - // Fee leaves non-zero, but sub-min_balance balance fails - accounts[0] - .1 - .set_lamports(lamports_per_signature + min_balance / 2); - let loaded_accounts = load_accounts_with_fee_and_rent( - tx, - &accounts, - lamports_per_signature, - &rent_collector, - &mut error_counters, - &FeatureSet::all_enabled(), - &FeeStructure::default(), - ); - assert_eq!(loaded_accounts.len(), 1); - let (load_res, _nonce) = &loaded_accounts[0]; - assert_eq!(*load_res, Err(TransactionError::InsufficientFundsForFee)); - } - - #[test] - fn test_load_accounts_no_loaders() { - let mut accounts: Vec = Vec::new(); - let mut error_counters = TransactionErrorMetrics::default(); - - let keypair = Keypair::new(); - let key0 = keypair.pubkey(); - let key1 = Pubkey::from([5u8; 32]); - - let mut account = AccountSharedData::new(1, 0, &Pubkey::default()); - account.set_rent_epoch(1); - accounts.push((key0, account)); - - let mut account = AccountSharedData::new(2, 1, &Pubkey::default()); - account.set_rent_epoch(1); - accounts.push((key1, account)); - - let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; - let tx = Transaction::new_with_compiled_instructions( - &[&keypair], - &[key1], - Hash::default(), - vec![native_loader::id()], - instructions, - ); - - let loaded_accounts = - load_accounts_with_excluded_features(tx, &accounts, &mut error_counters, None); - - assert_eq!(error_counters.account_not_found, 0); - assert_eq!(loaded_accounts.len(), 1); - match &loaded_accounts[0] { - (Ok(loaded_transaction), _nonce) => { - assert_eq!(loaded_transaction.accounts.len(), 3); - assert_eq!(loaded_transaction.accounts[0].1, accounts[0].1); - assert_eq!(loaded_transaction.program_indices.len(), 1); - assert_eq!(loaded_transaction.program_indices[0].len(), 0); - } - (Err(e), _nonce) => panic!("{e}"), - } - } - - #[test] - fn test_load_accounts_bad_owner() { - let mut accounts: Vec = Vec::new(); - let mut error_counters = TransactionErrorMetrics::default(); - - let keypair = Keypair::new(); - let key0 = keypair.pubkey(); - let key1 = Pubkey::from([5u8; 32]); - - let account = AccountSharedData::new(1, 0, &Pubkey::default()); - accounts.push((key0, account)); - - let mut account = AccountSharedData::new(40, 1, &Pubkey::default()); - account.set_executable(true); - accounts.push((key1, account)); - - let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; - let tx = Transaction::new_with_compiled_instructions( - &[&keypair], - &[], - Hash::default(), - vec![key1], - instructions, - ); - - let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters); - - assert_eq!(error_counters.account_not_found, 1); - assert_eq!(loaded_accounts.len(), 1); - assert_eq!( - loaded_accounts[0], - (Err(TransactionError::ProgramAccountNotFound), None,) - ); - } - - #[test] - fn test_load_accounts_not_executable() { - let mut accounts: Vec = Vec::new(); - let mut error_counters = TransactionErrorMetrics::default(); - - let keypair = Keypair::new(); - let key0 = keypair.pubkey(); - let key1 = Pubkey::from([5u8; 32]); - - let account = AccountSharedData::new(1, 0, &Pubkey::default()); - accounts.push((key0, account)); - - let account = AccountSharedData::new(40, 1, &native_loader::id()); - accounts.push((key1, account)); - - let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; - let tx = Transaction::new_with_compiled_instructions( - &[&keypair], - &[], - Hash::default(), - vec![key1], - instructions, - ); - - let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters); - - assert_eq!(error_counters.invalid_program_for_execution, 1); - assert_eq!(loaded_accounts.len(), 1); - assert_eq!( - loaded_accounts[0], - (Err(TransactionError::InvalidProgramForExecution), None,) - ); - } - - #[test] - fn test_load_accounts_multiple_loaders() { - let mut accounts: Vec = Vec::new(); - let mut error_counters = TransactionErrorMetrics::default(); - - let keypair = Keypair::new(); - let key0 = keypair.pubkey(); - let key1 = Pubkey::from([5u8; 32]); - let key2 = Pubkey::from([6u8; 32]); - - let mut account = AccountSharedData::new(1, 0, &Pubkey::default()); - account.set_rent_epoch(1); - accounts.push((key0, account)); - - let mut account = AccountSharedData::new(40, 1, &Pubkey::default()); - account.set_executable(true); - account.set_rent_epoch(1); - account.set_owner(native_loader::id()); - accounts.push((key1, account)); - - let mut account = AccountSharedData::new(41, 1, &Pubkey::default()); - account.set_executable(true); - account.set_rent_epoch(1); - account.set_owner(key1); - accounts.push((key2, account)); - - let instructions = vec![ - CompiledInstruction::new(1, &(), vec![0]), - CompiledInstruction::new(2, &(), vec![0]), - ]; - let tx = Transaction::new_with_compiled_instructions( - &[&keypair], - &[], - Hash::default(), - vec![key1, key2], - instructions, - ); + } - let loaded_accounts = - load_accounts_with_excluded_features(tx, &accounts, &mut error_counters, None); - - assert_eq!(error_counters.account_not_found, 0); - assert_eq!(loaded_accounts.len(), 1); - match &loaded_accounts[0] { - (Ok(loaded_transaction), _nonce) => { - assert_eq!(loaded_transaction.accounts.len(), 4); - assert_eq!(loaded_transaction.accounts[0].1, accounts[0].1); - assert_eq!(loaded_transaction.program_indices.len(), 2); - assert_eq!(loaded_transaction.program_indices[0].len(), 1); - assert_eq!(loaded_transaction.program_indices[1].len(), 2); - for program_indices in loaded_transaction.program_indices.iter() { - for (i, program_index) in program_indices.iter().enumerate() { - // +1 to skip first not loader account - assert_eq!( - loaded_transaction.accounts[*program_index as usize].0, - accounts[i + 1].0 - ); - assert_eq!( - loaded_transaction.accounts[*program_index as usize].1, - accounts[i + 1].1 - ); - } - } - } - (Err(e), _nonce) => panic!("{e}"), - } + fn test_thread_pool() -> rayon::ThreadPool { + crate::accounts_db::make_min_priority_thread_pool() } #[test] fn test_load_lookup_table_addresses_account_not_found() { let ancestors = vec![(0, 0)].into_iter().collect(); - let accounts = Accounts::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); + let accounts = Accounts::new(Arc::new(accounts_db)); let invalid_table_key = Pubkey::new_unique(); let address_table_lookup = MessageAddressTableLookup { @@ -2042,12 +959,8 @@ mod tests { #[test] fn test_load_lookup_table_addresses_invalid_account_owner() { let ancestors = vec![(0, 0)].into_iter().collect(); - let accounts = Accounts::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); + let accounts = Accounts::new(Arc::new(accounts_db)); let invalid_table_key = Pubkey::new_unique(); let mut invalid_table_account = AccountSharedData::default(); @@ -2073,12 +986,8 @@ mod tests { #[test] fn test_load_lookup_table_addresses_invalid_account_data() { let ancestors = vec![(0, 0)].into_iter().collect(); - let accounts = Accounts::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); + let accounts = Accounts::new(Arc::new(accounts_db)); let invalid_table_key = Pubkey::new_unique(); let invalid_table_account = @@ -2104,12 +1013,8 @@ mod tests { #[test] fn test_load_lookup_table_addresses() { let ancestors = vec![(1, 1), (0, 0)].into_iter().collect(); - let accounts = Accounts::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); + let accounts = Accounts::new(Arc::new(accounts_db)); let table_key = Pubkey::new_unique(); let table_addresses = vec![Pubkey::new_unique(), Pubkey::new_unique()]; @@ -2149,12 +1054,8 @@ mod tests { #[test] fn test_load_by_program_slot() { - let accounts = Accounts::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); + let accounts = Accounts::new(Arc::new(accounts_db)); // Load accounts owned by various programs into AccountsDb let pubkey0 = solana_sdk::pubkey::new_rand(); @@ -2175,329 +1076,18 @@ mod tests { assert_eq!(loaded, vec![]); } - #[test] - fn test_load_accounts_executable_with_write_lock() { - let mut accounts: Vec = Vec::new(); - let mut error_counters = TransactionErrorMetrics::default(); - - let keypair = Keypair::new(); - let key0 = keypair.pubkey(); - let key1 = Pubkey::from([5u8; 32]); - let key2 = Pubkey::from([6u8; 32]); - - let mut account = AccountSharedData::new(1, 0, &Pubkey::default()); - account.set_rent_epoch(1); - accounts.push((key0, account)); - - let mut account = AccountSharedData::new(40, 1, &native_loader::id()); - account.set_executable(true); - account.set_rent_epoch(1); - accounts.push((key1, account)); - - let mut account = AccountSharedData::new(40, 1, &native_loader::id()); - account.set_executable(true); - account.set_rent_epoch(1); - accounts.push((key2, account)); - - let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; - let mut message = Message::new_with_compiled_instructions( - 1, - 0, - 1, // only one executable marked as readonly - vec![key0, key1, key2], - Hash::default(), - instructions, - ); - let tx = Transaction::new(&[&keypair], message.clone(), Hash::default()); - let loaded_accounts = load_accounts_with_excluded_features( - tx, - &accounts, - &mut error_counters, - Some(&[simplify_writable_program_account_check::id()]), - ); - - assert_eq!(error_counters.invalid_writable_account, 1); - assert_eq!(loaded_accounts.len(), 1); - assert_eq!( - loaded_accounts[0], - (Err(TransactionError::InvalidWritableAccount), None) - ); - - // Mark executables as readonly - message.account_keys = vec![key0, key1, key2]; // revert key change - message.header.num_readonly_unsigned_accounts = 2; // mark both executables as readonly - let tx = Transaction::new(&[&keypair], message, Hash::default()); - let loaded_accounts = load_accounts_with_excluded_features( - tx, - &accounts, - &mut error_counters, - Some(&[simplify_writable_program_account_check::id()]), - ); - - assert_eq!(error_counters.invalid_writable_account, 1); - assert_eq!(loaded_accounts.len(), 1); - let result = loaded_accounts[0].0.as_ref().unwrap(); - assert_eq!(result.accounts[..2], accounts[..2]); - assert_eq!( - result.accounts[result.program_indices[0][0] as usize], - accounts[2] - ); - } - - #[test] - fn test_load_accounts_upgradeable_with_write_lock() { - let mut accounts: Vec = Vec::new(); - let mut error_counters = TransactionErrorMetrics::default(); - - let keypair = Keypair::new(); - let key0 = keypair.pubkey(); - let key1 = Pubkey::from([5u8; 32]); - let key2 = Pubkey::from([6u8; 32]); - let programdata_key1 = Pubkey::from([7u8; 32]); - let programdata_key2 = Pubkey::from([8u8; 32]); - - let mut account = AccountSharedData::new(1, 0, &Pubkey::default()); - account.set_rent_epoch(1); - accounts.push((key0, account)); - - let program_data = UpgradeableLoaderState::ProgramData { - slot: 42, - upgrade_authority_address: None, - }; - - let program = UpgradeableLoaderState::Program { - programdata_address: programdata_key1, - }; - let mut account = - AccountSharedData::new_data(40, &program, &bpf_loader_upgradeable::id()).unwrap(); - account.set_executable(true); - account.set_rent_epoch(1); - accounts.push((key1, account)); - let mut account = - AccountSharedData::new_data(40, &program_data, &bpf_loader_upgradeable::id()).unwrap(); - account.set_rent_epoch(1); - accounts.push((programdata_key1, account)); - - let program = UpgradeableLoaderState::Program { - programdata_address: programdata_key2, - }; - let mut account = - AccountSharedData::new_data(40, &program, &bpf_loader_upgradeable::id()).unwrap(); - account.set_executable(true); - account.set_rent_epoch(1); - accounts.push((key2, account)); - let mut account = - AccountSharedData::new_data(40, &program_data, &bpf_loader_upgradeable::id()).unwrap(); - account.set_rent_epoch(1); - accounts.push((programdata_key2, account)); - - let mut account = AccountSharedData::new(40, 1, &native_loader::id()); // create mock bpf_loader_upgradeable - account.set_executable(true); - account.set_rent_epoch(1); - accounts.push((bpf_loader_upgradeable::id(), account)); - - let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; - let mut message = Message::new_with_compiled_instructions( - 1, - 0, - 1, // only one executable marked as readonly - vec![key0, key1, key2], - Hash::default(), - instructions, - ); - let tx = Transaction::new(&[&keypair], message.clone(), Hash::default()); - let loaded_accounts = load_accounts_with_excluded_features( - tx.clone(), - &accounts, - &mut error_counters, - Some(&[simplify_writable_program_account_check::id()]), - ); - - assert_eq!(error_counters.invalid_writable_account, 1); - assert_eq!(loaded_accounts.len(), 1); - assert_eq!( - loaded_accounts[0], - (Err(TransactionError::InvalidWritableAccount), None) - ); - - // Solution 0: Include feature simplify_writable_program_account_check - let loaded_accounts = - load_accounts_with_excluded_features(tx, &accounts, &mut error_counters, None); - - assert_eq!(error_counters.invalid_writable_account, 1); - assert_eq!(loaded_accounts.len(), 1); - - // Solution 1: include bpf_loader_upgradeable account - message.account_keys = vec![key0, key1, bpf_loader_upgradeable::id()]; - let tx = Transaction::new(&[&keypair], message.clone(), Hash::default()); - let loaded_accounts = load_accounts_with_excluded_features( - tx, - &accounts, - &mut error_counters, - Some(&[simplify_writable_program_account_check::id()]), - ); - - assert_eq!(error_counters.invalid_writable_account, 1); - assert_eq!(loaded_accounts.len(), 1); - let result = loaded_accounts[0].0.as_ref().unwrap(); - assert_eq!(result.accounts[..2], accounts[..2]); - assert_eq!( - result.accounts[result.program_indices[0][0] as usize], - accounts[5] - ); - - // Solution 2: mark programdata as readonly - message.account_keys = vec![key0, key1, key2]; // revert key change - message.header.num_readonly_unsigned_accounts = 2; // mark both executables as readonly - let tx = Transaction::new(&[&keypair], message, Hash::default()); - let loaded_accounts = load_accounts_with_excluded_features( - tx, - &accounts, - &mut error_counters, - Some(&[simplify_writable_program_account_check::id()]), - ); - - assert_eq!(error_counters.invalid_writable_account, 1); - assert_eq!(loaded_accounts.len(), 1); - let result = loaded_accounts[0].0.as_ref().unwrap(); - assert_eq!(result.accounts[..2], accounts[..2]); - assert_eq!( - result.accounts[result.program_indices[0][0] as usize], - accounts[5] - ); - assert_eq!( - result.accounts[result.program_indices[0][1] as usize], - accounts[3] - ); - } - - #[test] - fn test_load_accounts_programdata_with_write_lock() { - let mut accounts: Vec = Vec::new(); - let mut error_counters = TransactionErrorMetrics::default(); - - let keypair = Keypair::new(); - let key0 = keypair.pubkey(); - let key1 = Pubkey::from([5u8; 32]); - let key2 = Pubkey::from([6u8; 32]); - - let mut account = AccountSharedData::new(1, 0, &Pubkey::default()); - account.set_rent_epoch(1); - accounts.push((key0, account)); - - let program_data = UpgradeableLoaderState::ProgramData { - slot: 42, - upgrade_authority_address: None, - }; - let mut account = - AccountSharedData::new_data(40, &program_data, &bpf_loader_upgradeable::id()).unwrap(); - account.set_rent_epoch(1); - accounts.push((key1, account)); - - let mut account = AccountSharedData::new(40, 1, &native_loader::id()); - account.set_executable(true); - account.set_rent_epoch(1); - accounts.push((key2, account)); - - let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; - let mut message = Message::new_with_compiled_instructions( - 1, - 0, - 1, // only the program marked as readonly - vec![key0, key1, key2], - Hash::default(), - instructions, - ); - let tx = Transaction::new(&[&keypair], message.clone(), Hash::default()); - let loaded_accounts = load_accounts_with_excluded_features( - tx.clone(), - &accounts, - &mut error_counters, - Some(&[simplify_writable_program_account_check::id()]), - ); - - assert_eq!(error_counters.invalid_writable_account, 1); - assert_eq!(loaded_accounts.len(), 1); - assert_eq!( - loaded_accounts[0], - (Err(TransactionError::InvalidWritableAccount), None) - ); - - // Solution 0: Include feature simplify_writable_program_account_check - let loaded_accounts = - load_accounts_with_excluded_features(tx, &accounts, &mut error_counters, None); - - assert_eq!(error_counters.invalid_writable_account, 1); - assert_eq!(loaded_accounts.len(), 1); - - // Solution 1: include bpf_loader_upgradeable account - let mut account = AccountSharedData::new(40, 1, &native_loader::id()); // create mock bpf_loader_upgradeable - account.set_executable(true); - account.set_rent_epoch(1); - let accounts_with_upgradeable_loader = vec![ - accounts[0].clone(), - accounts[1].clone(), - (bpf_loader_upgradeable::id(), account), - ]; - message.account_keys = vec![key0, key1, bpf_loader_upgradeable::id()]; - let tx = Transaction::new(&[&keypair], message.clone(), Hash::default()); - let loaded_accounts = load_accounts_with_excluded_features( - tx, - &accounts_with_upgradeable_loader, - &mut error_counters, - Some(&[simplify_writable_program_account_check::id()]), - ); - - assert_eq!(error_counters.invalid_writable_account, 1); - assert_eq!(loaded_accounts.len(), 1); - let result = loaded_accounts[0].0.as_ref().unwrap(); - assert_eq!(result.accounts[..2], accounts_with_upgradeable_loader[..2]); - assert_eq!( - result.accounts[result.program_indices[0][0] as usize], - accounts_with_upgradeable_loader[2] - ); - - // Solution 2: mark programdata as readonly - message.account_keys = vec![key0, key1, key2]; // revert key change - message.header.num_readonly_unsigned_accounts = 2; // extend readonly set to include programdata - let tx = Transaction::new(&[&keypair], message, Hash::default()); - let loaded_accounts = load_accounts_with_excluded_features( - tx, - &accounts, - &mut error_counters, - Some(&[simplify_writable_program_account_check::id()]), - ); - - assert_eq!(error_counters.invalid_writable_account, 1); - assert_eq!(loaded_accounts.len(), 1); - let result = loaded_accounts[0].0.as_ref().unwrap(); - assert_eq!(result.accounts[..2], accounts[..2]); - assert_eq!( - result.accounts[result.program_indices[0][0] as usize], - accounts[2] - ); - } - #[test] fn test_accounts_empty_bank_hash_stats() { - let accounts = Accounts::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); + let accounts = Accounts::new(Arc::new(accounts_db)); assert!(accounts.accounts_db.get_bank_hash_stats(0).is_some()); assert!(accounts.accounts_db.get_bank_hash_stats(1).is_none()); } #[test] fn test_lock_accounts_with_duplicates() { - let accounts = Accounts::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); + let accounts = Accounts::new(Arc::new(accounts_db)); let keypair = Keypair::new(); let message = Message { @@ -2516,12 +1106,8 @@ mod tests { #[test] fn test_lock_accounts_with_too_many_accounts() { - let accounts = Accounts::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); + let accounts = Accounts::new(Arc::new(accounts_db)); let keypair = Keypair::new(); @@ -2581,12 +1167,8 @@ mod tests { let account2 = AccountSharedData::new(3, 0, &Pubkey::default()); let account3 = AccountSharedData::new(4, 0, &Pubkey::default()); - let accounts = Accounts::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); + let accounts = Accounts::new(Arc::new(accounts_db)); accounts.store_for_tests(0, &keypair0.pubkey(), &account0); accounts.store_for_tests(0, &keypair1.pubkey(), &account1); accounts.store_for_tests(0, &keypair2.pubkey(), &account2); @@ -2690,12 +1272,8 @@ mod tests { let account1 = AccountSharedData::new(2, 0, &Pubkey::default()); let account2 = AccountSharedData::new(3, 0, &Pubkey::default()); - let accounts = Accounts::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); + let accounts = Accounts::new(Arc::new(accounts_db)); accounts.store_for_tests(0, &keypair0.pubkey(), &account0); accounts.store_for_tests(0, &keypair1.pubkey(), &account1); accounts.store_for_tests(0, &keypair2.pubkey(), &account2); @@ -2771,12 +1349,8 @@ mod tests { let account2 = AccountSharedData::new(3, 0, &Pubkey::default()); let account3 = AccountSharedData::new(4, 0, &Pubkey::default()); - let accounts = Accounts::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); + let accounts = Accounts::new(Arc::new(accounts_db)); accounts.store_for_tests(0, &keypair0.pubkey(), &account0); accounts.store_for_tests(0, &keypair1.pubkey(), &account1); accounts.store_for_tests(0, &keypair2.pubkey(), &account2); @@ -2847,12 +1421,8 @@ mod tests { let account2 = AccountSharedData::new(3, 0, &Pubkey::default()); let account3 = AccountSharedData::new(4, 0, &Pubkey::default()); - let accounts = Accounts::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); + let accounts = Accounts::new(Arc::new(accounts_db)); accounts.store_for_tests(0, &keypair0.pubkey(), &account0); accounts.store_for_tests(0, &keypair1.pubkey(), &account1); accounts.store_for_tests(0, &keypair2.pubkey(), &account2); @@ -3006,12 +1576,8 @@ mod tests { let mut loaded = vec![loaded0, loaded1]; - let accounts = Accounts::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); + let accounts = Accounts::new(Arc::new(accounts_db)); { accounts .account_locks @@ -3057,12 +1623,8 @@ mod tests { #[test] fn huge_clean() { solana_logger::setup(); - let accounts = Accounts::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); + let accounts = Accounts::new(Arc::new(accounts_db)); let mut old_pubkey = Pubkey::default(); let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); info!("storing.."); @@ -3082,95 +1644,6 @@ mod tests { accounts.accounts_db.clean_accounts_for_tests(); } - fn load_accounts_no_store( - accounts: &Accounts, - tx: Transaction, - account_overrides: Option<&AccountOverrides>, - ) -> Vec { - let tx = SanitizedTransaction::from_transaction_for_tests(tx); - let rent_collector = RentCollector::default(); - let mut hash_queue = BlockhashQueue::new(100); - hash_queue.register_hash(tx.message().recent_blockhash(), 10); - - let ancestors = vec![(0, 0)].into_iter().collect(); - let mut error_counters = TransactionErrorMetrics::default(); - accounts.load_accounts( - &ancestors, - &[tx], - vec![(Ok(()), None)], - &hash_queue, - &mut error_counters, - &rent_collector, - &FeatureSet::all_enabled(), - &FeeStructure::default(), - account_overrides, - RewardInterval::OutsideInterval, - &HashMap::new(), - &LoadedProgramsForTxBatch::default(), - ) - } - - #[test] - fn test_instructions() { - solana_logger::setup(); - let accounts = Accounts::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); - - let instructions_key = solana_sdk::sysvar::instructions::id(); - let keypair = Keypair::new(); - let instructions = vec![CompiledInstruction::new(1, &(), vec![0, 1])]; - let tx = Transaction::new_with_compiled_instructions( - &[&keypair], - &[solana_sdk::pubkey::new_rand(), instructions_key], - Hash::default(), - vec![native_loader::id()], - instructions, - ); - - let loaded_accounts = load_accounts_no_store(&accounts, tx, None); - assert_eq!(loaded_accounts.len(), 1); - assert!(loaded_accounts[0].0.is_err()); - } - - #[test] - fn test_overrides() { - solana_logger::setup(); - let accounts = Accounts::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); - let mut account_overrides = AccountOverrides::default(); - let slot_history_id = sysvar::slot_history::id(); - let account = AccountSharedData::new(42, 0, &Pubkey::default()); - account_overrides.set_slot_history(Some(account)); - - let keypair = Keypair::new(); - let account = AccountSharedData::new(1_000_000, 0, &Pubkey::default()); - accounts.store_slow_uncached(0, &keypair.pubkey(), &account); - - let instructions = vec![CompiledInstruction::new(2, &(), vec![0])]; - let tx = Transaction::new_with_compiled_instructions( - &[&keypair], - &[slot_history_id], - Hash::default(), - vec![native_loader::id()], - instructions, - ); - - let loaded_accounts = load_accounts_no_store(&accounts, tx, Some(&account_overrides)); - assert_eq!(loaded_accounts.len(), 1); - let loaded_transaction = loaded_accounts[0].0.as_ref().unwrap(); - assert_eq!(loaded_transaction.accounts[0].0, keypair.pubkey()); - assert_eq!(loaded_transaction.accounts[1].0, slot_history_id); - assert_eq!(loaded_transaction.accounts[1].1.lamports(), 42); - } - fn create_accounts_prepare_if_nonce_account() -> ( Pubkey, AccountSharedData, @@ -3485,12 +1958,8 @@ mod tests { let mut loaded = vec![loaded]; let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); - let accounts = Accounts::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); + let accounts = Accounts::new(Arc::new(accounts_db)); let txs = vec![tx]; let execution_results = vec![new_execution_result( Err(TransactionError::InstructionError( @@ -3598,12 +2067,8 @@ mod tests { let mut loaded = vec![loaded]; let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); - let accounts = Accounts::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); + let accounts = Accounts::new(Arc::new(accounts_db)); let txs = vec![tx]; let execution_results = vec![new_execution_result( Err(TransactionError::InstructionError( @@ -3639,15 +2104,11 @@ mod tests { #[test] fn test_load_largest_accounts() { - let accounts = Accounts::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); + let accounts = Accounts::new(Arc::new(accounts_db)); /* This test assumes pubkey0 < pubkey1 < pubkey2. - * But the keys created with new_unique() does not gurantee this + * But the keys created with new_unique() does not guarantee this * order because of the endianness. new_unique() calls add 1 at each * key generaration as the little endian integer. A pubkey stores its * value in a 32-byte array bytes, and its eq-partial trait considers @@ -3894,389 +2355,4 @@ mod tests { )); } } - - #[test] - fn test_accumulate_and_check_loaded_account_data_size() { - let mut error_counter = TransactionErrorMetrics::default(); - - // assert check is OK if data limit is not enabled - { - let mut accumulated_data_size: usize = 0; - let data_size = usize::MAX; - let requested_data_size_limit = None; - - assert!(Accounts::accumulate_and_check_loaded_account_data_size( - &mut accumulated_data_size, - data_size, - requested_data_size_limit, - &mut error_counter - ) - .is_ok()); - } - - // assert check will fail with correct error if loaded data exceeds limit - { - let mut accumulated_data_size: usize = 0; - let data_size: usize = 123; - let requested_data_size_limit = NonZeroUsize::new(data_size); - - // OK - loaded data size is up to limit - assert!(Accounts::accumulate_and_check_loaded_account_data_size( - &mut accumulated_data_size, - data_size, - requested_data_size_limit, - &mut error_counter - ) - .is_ok()); - assert_eq!(data_size, accumulated_data_size); - - // fail - loading more data that would exceed limit - let another_byte: usize = 1; - assert_eq!( - Accounts::accumulate_and_check_loaded_account_data_size( - &mut accumulated_data_size, - another_byte, - requested_data_size_limit, - &mut error_counter - ), - Err(TransactionError::MaxLoadedAccountsDataSizeExceeded) - ); - } - } - - #[test] - fn test_get_requested_loaded_accounts_data_size_limit() { - // an prrivate helper function - fn test( - instructions: &[solana_sdk::instruction::Instruction], - feature_set: &FeatureSet, - expected_result: &Result>, - ) { - let payer_keypair = Keypair::new(); - let tx = SanitizedTransaction::from_transaction_for_tests(Transaction::new( - &[&payer_keypair], - Message::new(instructions, Some(&payer_keypair.pubkey())), - Hash::default(), - )); - assert_eq!( - *expected_result, - Accounts::get_requested_loaded_accounts_data_size_limit(&tx, feature_set) - ); - } - - let tx_not_set_limit = &[solana_sdk::instruction::Instruction::new_with_bincode( - Pubkey::new_unique(), - &0_u8, - vec![], - )]; - let tx_set_limit_99 = - &[ - solana_sdk::compute_budget::ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(99u32), - solana_sdk::instruction::Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ]; - let tx_set_limit_0 = - &[ - solana_sdk::compute_budget::ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(0u32), - solana_sdk::instruction::Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ]; - - let result_no_limit = Ok(None); - let result_default_limit = Ok(Some( - NonZeroUsize::new( - usize::try_from(compute_budget_processor::MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES) - .unwrap(), - ) - .unwrap(), - )); - let result_requested_limit: Result> = - Ok(Some(NonZeroUsize::new(99).unwrap())); - let result_invalid_limit = Err(TransactionError::InvalidLoadedAccountsDataSizeLimit); - - let mut feature_set = FeatureSet::default(); - - // if `cap_transaction_accounts_data_size feature` is disable, - // the result will always be no limit - test(tx_not_set_limit, &feature_set, &result_no_limit); - test(tx_set_limit_99, &feature_set, &result_no_limit); - test(tx_set_limit_0, &feature_set, &result_no_limit); - - // if `cap_transaction_accounts_data_size` is enabled, and - // `add_set_tx_loaded_accounts_data_size_instruction` is disabled, - // the result will always be default limit (64MiB) - feature_set.activate(&feature_set::cap_transaction_accounts_data_size::id(), 0); - test(tx_not_set_limit, &feature_set, &result_default_limit); - test(tx_set_limit_99, &feature_set, &result_default_limit); - test(tx_set_limit_0, &feature_set, &result_default_limit); - - // if `cap_transaction_accounts_data_size` and - // `add_set_tx_loaded_accounts_data_size_instruction` are both enabled, - // the results are: - // if tx doesn't set limit, then default limit (64MiB) - // if tx sets limit, then requested limit - // if tx sets limit to zero, then TransactionError::InvalidLoadedAccountsDataSizeLimit - feature_set.activate( - &solana_sdk::feature_set::add_set_tx_loaded_accounts_data_size_instruction::id(), - 0, - ); - test(tx_not_set_limit, &feature_set, &result_default_limit); - test(tx_set_limit_99, &feature_set, &result_requested_limit); - test(tx_set_limit_0, &feature_set, &result_invalid_limit); - } - - #[test] - fn test_load_accounts_too_high_prioritization_fee() { - solana_logger::setup(); - let lamports_per_signature = 5000_u64; - let request_units = 1_000_000_u32; - let request_unit_price = 2_000_000_000_u64; - let prioritization_fee_details = PrioritizationFeeDetails::new( - PrioritizationFeeType::ComputeUnitPrice(request_unit_price), - request_units as u64, - ); - let prioritization_fee = prioritization_fee_details.get_fee(); - - let keypair = Keypair::new(); - let key0 = keypair.pubkey(); - // set up account with balance of `prioritization_fee` - let account = AccountSharedData::new(prioritization_fee, 0, &Pubkey::default()); - let accounts = vec![(key0, account)]; - - let instructions = &[ - ComputeBudgetInstruction::set_compute_unit_limit(request_units), - ComputeBudgetInstruction::set_compute_unit_price(request_unit_price), - ]; - let tx = Transaction::new( - &[&keypair], - Message::new(instructions, Some(&key0)), - Hash::default(), - ); - - let mut feature_set = FeatureSet::all_enabled(); - feature_set.deactivate(&solana_sdk::feature_set::remove_deprecated_request_unit_ix::id()); - - let message = SanitizedMessage::try_from(tx.message().clone()).unwrap(); - let fee = FeeStructure::default().calculate_fee( - &message, - lamports_per_signature, - &process_compute_budget_instructions(message.program_instructions_iter(), &feature_set) - .unwrap_or_default() - .into(), - true, - false, - ); - assert_eq!(fee, lamports_per_signature + prioritization_fee); - - // assert fail to load account with 2B lamport balance for transaction asking for 2B - // lamports as prioritization fee. - let mut error_counters = TransactionErrorMetrics::default(); - let loaded_accounts = load_accounts_with_fee( - tx, - &accounts, - lamports_per_signature, - &mut error_counters, - None, - ); - - assert_eq!(error_counters.insufficient_funds, 1); - assert_eq!(loaded_accounts.len(), 1); - assert_eq!( - loaded_accounts[0].clone(), - (Err(TransactionError::InsufficientFundsForFee), None), - ); - } - - struct ValidateFeePayerTestParameter { - is_nonce: bool, - payer_init_balance: u64, - fee: u64, - expected_result: Result<()>, - payer_post_balance: u64, - feature_checked_arithmmetic_enable: bool, - } - - fn validate_fee_payer_account( - test_parameter: ValidateFeePayerTestParameter, - rent_collector: &RentCollector, - ) { - let payer_account_keys = Keypair::new(); - let mut account = if test_parameter.is_nonce { - AccountSharedData::new_data( - test_parameter.payer_init_balance, - &NonceVersions::new(NonceState::Initialized(nonce::state::Data::default())), - &system_program::id(), - ) - .unwrap() - } else { - AccountSharedData::new(test_parameter.payer_init_balance, 0, &system_program::id()) - }; - let mut feature_set = FeatureSet::default(); - if test_parameter.feature_checked_arithmmetic_enable { - feature_set.activate(&feature_set::checked_arithmetic_in_fee_validation::id(), 0); - }; - let result = Accounts::validate_fee_payer( - &payer_account_keys.pubkey(), - &mut account, - 0, - &mut TransactionErrorMetrics::default(), - rent_collector, - &feature_set, - test_parameter.fee, - ); - - assert_eq!(result, test_parameter.expected_result); - assert_eq!(account.lamports(), test_parameter.payer_post_balance); - } - - #[test] - fn test_validate_fee_payer() { - let rent_collector = RentCollector::new( - 0, - EpochSchedule::default(), - 500_000.0, - Rent { - lamports_per_byte_year: 1, - ..Rent::default() - }, - ); - let min_balance = rent_collector.rent.minimum_balance(NonceState::size()); - let fee = 5_000; - - // If payer account has sufficient balance, expect successful fee deduction, - // regardless feature gate status, or if payer is nonce account. - { - for feature_checked_arithmmetic_enable in [true, false] { - for (is_nonce, min_balance) in [(true, min_balance), (false, 0)] { - validate_fee_payer_account( - ValidateFeePayerTestParameter { - is_nonce, - payer_init_balance: min_balance + fee, - fee, - expected_result: Ok(()), - payer_post_balance: min_balance, - feature_checked_arithmmetic_enable, - }, - &rent_collector, - ); - } - } - } - - // If payer account has no balance, expected AccountNotFound Error - // regardless feature gate status, or if payer is nonce account. - { - for feature_checked_arithmmetic_enable in [true, false] { - for is_nonce in [true, false] { - validate_fee_payer_account( - ValidateFeePayerTestParameter { - is_nonce, - payer_init_balance: 0, - fee, - expected_result: Err(TransactionError::AccountNotFound), - payer_post_balance: 0, - feature_checked_arithmmetic_enable, - }, - &rent_collector, - ); - } - } - } - - // If payer account has insufficent balance, expect InsufficientFundsForFee error - // regardless feature gate status, or if payer is nonce account. - { - for feature_checked_arithmmetic_enable in [true, false] { - for (is_nonce, min_balance) in [(true, min_balance), (false, 0)] { - validate_fee_payer_account( - ValidateFeePayerTestParameter { - is_nonce, - payer_init_balance: min_balance + fee - 1, - fee, - expected_result: Err(TransactionError::InsufficientFundsForFee), - payer_post_balance: min_balance + fee - 1, - feature_checked_arithmmetic_enable, - }, - &rent_collector, - ); - } - } - } - - // normal payer account has balance of u64::MAX, so does fee; since it does not require - // min_balance, expect successful fee deduction, regardless of feature gate status - { - for feature_checked_arithmmetic_enable in [true, false] { - validate_fee_payer_account( - ValidateFeePayerTestParameter { - is_nonce: false, - payer_init_balance: u64::MAX, - fee: u64::MAX, - expected_result: Ok(()), - payer_post_balance: 0, - feature_checked_arithmmetic_enable, - }, - &rent_collector, - ); - } - } - } - - #[test] - fn test_validate_nonce_fee_payer_with_checked_arithmetic() { - let rent_collector = RentCollector::new( - 0, - EpochSchedule::default(), - 500_000.0, - Rent { - lamports_per_byte_year: 1, - ..Rent::default() - }, - ); - - // nonce payer account has balance of u64::MAX, so does fee; due to nonce account - // requires additional min_balance, expect InsufficientFundsForFee error if feature gate is - // enabled - validate_fee_payer_account( - ValidateFeePayerTestParameter { - is_nonce: true, - payer_init_balance: u64::MAX, - fee: u64::MAX, - expected_result: Err(TransactionError::InsufficientFundsForFee), - payer_post_balance: u64::MAX, - feature_checked_arithmmetic_enable: true, - }, - &rent_collector, - ); - } - - #[test] - #[should_panic] - fn test_validate_nonce_fee_payer_without_checked_arithmetic() { - let rent_collector = RentCollector::new( - 0, - EpochSchedule::default(), - 500_000.0, - Rent { - lamports_per_byte_year: 1, - ..Rent::default() - }, - ); - - // same test setup as `test_validate_nonce_fee_payer_with_checked_arithmetic`: - // nonce payer account has balance of u64::MAX, so does fee; and nonce account - // requires additional min_balance, if feature gate is not enabled, in `debug` - // mode, `u64::MAX + min_balance` would panic on "attempt to add with overflow"; - // in `release` mode, the addition will wrap, so the expected result would be - // `Ok(())` with post payer balance `0`, therefore fails test with a panic. - validate_fee_payer_account( - ValidateFeePayerTestParameter { - is_nonce: true, - payer_init_balance: u64::MAX, - fee: u64::MAX, - expected_result: Err(TransactionError::InsufficientFundsForFee), - payer_post_balance: u64::MAX, - feature_checked_arithmmetic_enable: false, - }, - &rent_collector, - ); - } } diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index e8435ff2218edb..570ff8c26a415c 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -31,7 +31,9 @@ use { AccountStorage, AccountStorageStatus, ShrinkInProgress, }, accounts_cache::{AccountsCache, CachedAccount, SlotCache}, - accounts_file::{AccountsFile, AccountsFileError}, + accounts_file::{ + AccountsFile, AccountsFileError, MatchAccountOwnerError, ALIGN_BOUNDARY_OFFSET, + }, accounts_hash::{ AccountHash, AccountsDeltaHash, AccountsHash, AccountsHashKind, AccountsHasher, CalcAccountsHashConfig, CalculateHashIntermediate, HashStats, IncrementalAccountsHash, @@ -54,8 +56,7 @@ use { get_ancient_append_vec_capacity, is_ancient, AccountsToStore, StorageSelector, }, append_vec::{ - aligned_stored_size, AppendVec, MatchAccountOwnerError, APPEND_VEC_MMAPPED_FILES_OPEN, - STORE_META_OVERHEAD, + aligned_stored_size, AppendVec, APPEND_VEC_MMAPPED_FILES_OPEN, STORE_META_OVERHEAD, }, cache_hash_data::{CacheHashData, CacheHashDataFileReference}, contains::Contains, @@ -67,6 +68,7 @@ use { rent_collector::RentCollector, sorted_storages::SortedStorages, storable_accounts::StorableAccounts, + u64_align, verify_accounts_hash_in_background::VerifyAccountsHashInBackground, }, blake3::traits::digest::Digest, @@ -75,6 +77,7 @@ use { log::*, rand::{thread_rng, Rng}, rayon::{prelude::*, ThreadPool}, + seqlock::SeqLock, serde::{Deserialize, Serialize}, smallvec::SmallVec, solana_measure::{measure::Measure, measure_us}, @@ -87,7 +90,6 @@ use { genesis_config::{ClusterType, GenesisConfig}, hash::Hash, pubkey::Pubkey, - rent::Rent, saturating_add_assign, timing::AtomicInterval, transaction::SanitizedTransaction, @@ -100,7 +102,6 @@ use { io::Result as IoResult, ops::{Range, RangeBounds}, path::{Path, PathBuf}, - str::FromStr, sync::{ atomic::{AtomicBool, AtomicU32, AtomicU64, AtomicUsize, Ordering}, Arc, Condvar, Mutex, RwLock, @@ -337,13 +338,17 @@ impl CurrentAncientAppendVec { } } + /// Create ancient append vec for a slot + /// min_bytes: the new append vec needs to have at least this capacity #[must_use] fn create_ancient_append_vec<'a>( &mut self, slot: Slot, db: &'a AccountsDb, + min_bytes: usize, ) -> ShrinkInProgress<'a> { - let shrink_in_progress = db.get_store_for_shrink(slot, get_ancient_append_vec_capacity()); + let size = get_ancient_append_vec_capacity().max(min_bytes as u64); + let shrink_in_progress = db.get_store_for_shrink(slot, size); *self = Self::new(slot, Arc::clone(shrink_in_progress.new_storage())); shrink_in_progress } @@ -352,9 +357,10 @@ impl CurrentAncientAppendVec { &mut self, slot: Slot, db: &'a AccountsDb, + min_bytes: usize, ) -> Option> { if self.slot_and_append_vec.is_none() { - Some(self.create_ancient_append_vec(slot, db)) + Some(self.create_ancient_append_vec(slot, db, min_bytes)) } else { None } @@ -371,21 +377,31 @@ impl CurrentAncientAppendVec { } /// helper function to cleanup call to 'store_accounts_frozen' + /// return timing and bytes written fn store_ancient_accounts( &self, db: &AccountsDb, accounts_to_store: &AccountsToStore, storage_selector: StorageSelector, - ) -> StoreAccountsTiming { + ) -> (StoreAccountsTiming, u64) { let accounts = accounts_to_store.get(storage_selector); - db.store_accounts_frozen( - (self.slot(), accounts, accounts_to_store.slot), + let previous_available = self.append_vec().accounts.remaining_bytes(); + let timing = db.store_accounts_frozen( + (self.slot(), accounts, accounts_to_store.slot()), None::>, self.append_vec(), None, StoreReclaims::Ignore, - ) + ); + let bytes_written = + previous_available.saturating_sub(self.append_vec().accounts.remaining_bytes()); + assert_eq!( + bytes_written, + u64_align!(accounts_to_store.get_bytes(storage_selector)) as u64 + ); + + (timing, bytes_written) } } @@ -459,7 +475,6 @@ impl AncientSlotPubkeys { pub(crate) struct ShrinkCollect<'a, T: ShrinkCollectRefs<'a>> { pub(crate) slot: Slot, pub(crate) capacity: u64, - pub(crate) aligned_total_bytes: u64, pub(crate) unrefed_pubkeys: Vec<&'a Pubkey>, pub(crate) alive_accounts: T, /// total size in storage of all alive accounts @@ -476,7 +491,6 @@ pub const ACCOUNTS_DB_CONFIG_FOR_TESTING: AccountsDbConfig = AccountsDbConfig { index: Some(ACCOUNTS_INDEX_CONFIG_FOR_TESTING), base_working_path: None, accounts_hash_cache_path: None, - filler_accounts_config: FillerAccountsConfig::const_default(), write_cache_limit_bytes: None, ancient_append_vec_offset: None, skip_initial_hash_calc: false, @@ -489,7 +503,6 @@ pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig index: Some(ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS), base_working_path: None, accounts_hash_cache_path: None, - filler_accounts_config: FillerAccountsConfig::const_default(), write_cache_limit_bytes: None, ancient_append_vec_offset: None, skip_initial_hash_calc: false, @@ -523,26 +536,6 @@ pub struct AccountsAddRootTiming { pub store_us: u64, } -#[derive(Debug, Clone, Copy)] -pub struct FillerAccountsConfig { - /// Number of filler accounts - pub count: usize, - /// Data size per account, in bytes - pub size: usize, -} - -impl FillerAccountsConfig { - pub const fn const_default() -> Self { - Self { count: 0, size: 0 } - } -} - -impl Default for FillerAccountsConfig { - fn default() -> Self { - Self::const_default() - } -} - const ANCIENT_APPEND_VEC_DEFAULT_OFFSET: Option = Some(-10_000); #[derive(Debug, Default, Clone)] @@ -551,7 +544,6 @@ pub struct AccountsDbConfig { /// Base directory for various necessary files pub base_working_path: Option, pub accounts_hash_cache_path: Option, - pub filler_accounts_config: FillerAccountsConfig, pub write_cache_limit_bytes: Option, /// if None, ancient append vecs are set to ANCIENT_APPEND_VEC_DEFAULT_OFFSET /// Some(offset) means include slots up to (max_slot - (slots_per_epoch - 'offset')) @@ -1021,7 +1013,7 @@ pub struct AccountStorageEntry { /// any accounts in it /// status corresponding to the storage, lets us know that /// the append_vec, once maxed out, then emptied, can be reclaimed - count_and_status: RwLock<(usize, AccountStorageStatus)>, + count_and_status: SeqLock<(usize, AccountStorageStatus)>, /// This is the total number of accounts stored ever since initialized to keep /// track of lifetime count of all store operations. And this differs from @@ -1044,7 +1036,7 @@ impl AccountStorageEntry { id: AtomicAppendVecId::new(id), slot: AtomicU64::new(slot), accounts, - count_and_status: RwLock::new((0, AccountStorageStatus::Available)), + count_and_status: SeqLock::new((0, AccountStorageStatus::Available)), approx_store_count: AtomicUsize::new(0), alive_bytes: AtomicUsize::new(0), } @@ -1060,14 +1052,14 @@ impl AccountStorageEntry { id: AtomicAppendVecId::new(id), slot: AtomicU64::new(slot), accounts, - count_and_status: RwLock::new((0, AccountStorageStatus::Available)), + count_and_status: SeqLock::new((0, AccountStorageStatus::Available)), approx_store_count: AtomicUsize::new(num_accounts), alive_bytes: AtomicUsize::new(0), } } pub fn set_status(&self, mut status: AccountStorageStatus) { - let mut count_and_status = self.count_and_status.write().unwrap(); + let mut count_and_status = self.count_and_status.lock_write(); let count = count_and_status.0; @@ -1088,7 +1080,7 @@ impl AccountStorageEntry { } pub fn recycle(&self, slot: Slot, id: AppendVecId) { - let mut count_and_status = self.count_and_status.write().unwrap(); + let mut count_and_status = self.count_and_status.lock_write(); self.accounts.reset(); *count_and_status = (0, AccountStorageStatus::Available); self.slot.store(slot, Ordering::Release); @@ -1098,11 +1090,11 @@ impl AccountStorageEntry { } pub fn status(&self) -> AccountStorageStatus { - self.count_and_status.read().unwrap().1 + self.count_and_status.read().1 } pub fn count(&self) -> usize { - self.count_and_status.read().unwrap().0 + self.count_and_status.read().0 } pub fn approx_stored_count(&self) -> usize { @@ -1142,14 +1134,14 @@ impl AccountStorageEntry { } fn add_account(&self, num_bytes: usize) { - let mut count_and_status = self.count_and_status.write().unwrap(); + let mut count_and_status = self.count_and_status.lock_write(); *count_and_status = (count_and_status.0 + 1, count_and_status.1); self.approx_store_count.fetch_add(1, Ordering::Relaxed); self.alive_bytes.fetch_add(num_bytes, Ordering::SeqCst); } fn try_available(&self) -> bool { - let mut count_and_status = self.count_and_status.write().unwrap(); + let mut count_and_status = self.count_and_status.lock_write(); let (count, status) = *count_and_status; if status == AccountStorageStatus::Available { @@ -1165,7 +1157,7 @@ impl AccountStorageEntry { } fn remove_account(&self, num_bytes: usize, reset_accounts: bool) -> usize { - let mut count_and_status = self.count_and_status.write().unwrap(); + let mut count_and_status = self.count_and_status.lock_write(); let (mut count, mut status) = *count_and_status; if count == 1 && status == AccountStorageStatus::Full && reset_accounts { @@ -1538,17 +1530,8 @@ pub struct AccountsDb { /// GeyserPlugin accounts update notifier accounts_update_notifier: Option, - filler_accounts_config: FillerAccountsConfig, - pub filler_account_suffix: Option, - pub(crate) active_stats: ActiveStats, - /// number of filler accounts to add for each slot - pub filler_accounts_per_slot: AtomicU64, - - /// number of slots remaining where filler accounts should be added - pub filler_account_slots_remaining: AtomicU64, - pub verify_accounts_hash_in_bg: VerifyAccountsHashInBackground, /// Used to disable logging dead slots during removal. @@ -2386,7 +2369,6 @@ struct ScanState<'a> { bin_range: &'a Range, config: &'a CalcAccountsHashConfig<'a>, mismatch_found: Arc, - filler_account_suffix: Option<&'a Pubkey>, range: usize, sort_time: Arc, pubkey_to_bin_index: usize, @@ -2416,9 +2398,7 @@ impl<'a> AppendVecScan for ScanState<'a> { let mut loaded_hash = loaded_account.loaded_hash(); let hash_is_missing = loaded_hash == AccountHash(Hash::default()); - if (self.config.check_hash || hash_is_missing) - && !AccountsDb::is_filler_account_helper(pubkey, self.filler_account_suffix) - { + if self.config.check_hash || hash_is_missing { let computed_hash = loaded_account.compute_hash(pubkey); if hash_is_missing { loaded_hash = computed_hash; @@ -2499,8 +2479,6 @@ impl AccountsDb { AccountsDb { create_ancient_storage: CreateAncientStorage::Pack, verify_accounts_hash_in_bg: VerifyAccountsHashInBackground::default(), - filler_accounts_per_slot: AtomicU64::default(), - filler_account_slots_remaining: AtomicU64::default(), active_stats: ActiveStats::default(), skip_initial_hash_calc: false, ancient_append_vec_offset: None, @@ -2553,8 +2531,6 @@ impl AccountsDb { dirty_stores: DashMap::default(), zero_lamport_accounts_to_purge_after_full_snapshot: DashSet::default(), accounts_update_notifier: None, - filler_accounts_config: FillerAccountsConfig::default(), - filler_account_suffix: None, log_dead_slots: AtomicBool::new(true), exhaustively_verify_refcounts: false, partitioned_epoch_rewards_config: PartitionedEpochRewardsConfig::default(), @@ -2563,19 +2539,11 @@ impl AccountsDb { } } - pub fn new_for_tests(paths: Vec, cluster_type: &ClusterType) -> Self { - AccountsDb::new_with_config( - paths, - cluster_type, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), - None, - Arc::default(), - ) + pub fn new_single_for_tests() -> Self { + AccountsDb::new_for_tests(Vec::new(), &ClusterType::Development) } - pub fn new_for_tests_with_caching(paths: Vec, cluster_type: &ClusterType) -> Self { + pub fn new_for_tests(paths: Vec, cluster_type: &ClusterType) -> Self { AccountsDb::new_with_config( paths, cluster_type, @@ -2606,10 +2574,6 @@ impl AccountsDb { let accounts_hash_cache_path = accounts_db_config .as_ref() .and_then(|config| config.accounts_hash_cache_path.clone()); - let filler_accounts_config = accounts_db_config - .as_ref() - .map(|config| config.filler_accounts_config) - .unwrap_or_default(); let skip_initial_hash_calc = accounts_db_config .as_ref() .map(|config| config.skip_initial_hash_calc) @@ -2643,11 +2607,6 @@ impl AccountsDb { let partitioned_epoch_rewards_config: PartitionedEpochRewardsConfig = PartitionedEpochRewardsConfig::new(test_partitioned_epoch_rewards); - let filler_account_suffix = if filler_accounts_config.count > 0 { - Some(solana_sdk::pubkey::new_rand()) - } else { - None - }; let paths_is_empty = paths.is_empty(); let mut new = Self { paths, @@ -2657,8 +2616,6 @@ impl AccountsDb { account_indexes, shrink_ratio, accounts_update_notifier, - filler_accounts_config, - filler_account_suffix, create_ancient_storage, write_cache_limit_bytes: accounts_db_config .as_ref() @@ -2690,20 +2647,6 @@ impl AccountsDb { new } - /// Gradual means filler accounts will be added over the course of an epoch, during cache flush. - /// This is in contrast to adding all the filler accounts immediately before the validator starts. - fn init_gradual_filler_accounts(&self, slots_per_epoch: Slot) { - let count = self.filler_accounts_config.count; - if count > 0 { - // filler accounts are a debug only feature. integer division is fine here - let accounts_per_slot = (count as u64) / slots_per_epoch; - self.filler_accounts_per_slot - .store(accounts_per_slot, Ordering::Release); - self.filler_account_slots_remaining - .store(slots_per_epoch, Ordering::Release); - } - } - pub fn set_shrink_paths(&self, paths: Vec) { assert!(!paths.is_empty()); let mut shrink_paths = self.shrink_paths.write().unwrap(); @@ -2722,14 +2665,6 @@ impl AccountsDb { self.base_working_path.clone() } - pub fn new_single_for_tests() -> Self { - AccountsDb::new_for_tests(Vec::new(), &ClusterType::Development) - } - - pub fn new_single_for_tests_with_caching() -> Self { - AccountsDb::new_for_tests_with_caching(Vec::new(), &ClusterType::Development) - } - fn next_id(&self) -> AppendVecId { let next_id = self.next_id.fetch_add(1, Ordering::AcqRel); assert!(next_id != AppendVecId::MAX, "We've run out of storage ids!"); @@ -4047,7 +3982,6 @@ impl AccountsDb { ShrinkCollect { slot, capacity: *capacity, - aligned_total_bytes, unrefed_pubkeys, alive_accounts, alive_total_bytes, @@ -4114,7 +4048,10 @@ impl AccountsDb { self.shrink_collect::>(store, &unique_accounts, &self.shrink_stats); // This shouldn't happen if alive_bytes/approx_stored_count are accurate - if Self::should_not_shrink(shrink_collect.aligned_total_bytes, shrink_collect.capacity) { + if Self::should_not_shrink( + shrink_collect.alive_total_bytes as u64, + shrink_collect.capacity, + ) { self.shrink_stats .skipped_shrink .fetch_add(1, Ordering::Relaxed); @@ -4130,20 +4067,20 @@ impl AccountsDb { let total_accounts_after_shrink = shrink_collect.alive_accounts.len(); debug!( - "shrinking: slot: {}, accounts: ({} => {}) bytes: ({} ; aligned to: {}) original: {}", + "shrinking: slot: {}, accounts: ({} => {}) bytes: {} original: {}", slot, shrink_collect.total_starting_accounts, total_accounts_after_shrink, shrink_collect.alive_total_bytes, - shrink_collect.aligned_total_bytes, shrink_collect.capacity, ); let mut stats_sub = ShrinkStatsSub::default(); let mut rewrite_elapsed = Measure::start("rewrite_elapsed"); - if shrink_collect.aligned_total_bytes > 0 { - let (shrink_in_progress, time_us) = - measure_us!(self.get_store_for_shrink(slot, shrink_collect.aligned_total_bytes)); + if shrink_collect.alive_total_bytes > 0 { + let (shrink_in_progress, time_us) = measure_us!( + self.get_store_for_shrink(slot, shrink_collect.alive_total_bytes as u64) + ); stats_sub.create_and_insert_store_elapsed_us = time_us; // here, we're writing back alive_accounts. That should be an atomic operation @@ -4421,15 +4358,6 @@ impl AccountsDb { .get_all_less_than(slot) } - fn get_prior_root(&self, slot: Slot) -> Option { - self.accounts_index - .roots_tracker - .read() - .unwrap() - .alive_roots - .get_prior(slot) - } - /// return all slots that are more than one epoch old and thus could already be an ancient append vec /// or which could need to be combined into a new or existing ancient append vec /// offset is used to combine newer slots than we normally would. This is designed to be used for testing. @@ -4662,8 +4590,10 @@ impl AccountsDb { } let mut stats_sub = ShrinkStatsSub::default(); - let (mut shrink_in_progress, create_and_insert_store_elapsed_us) = - measure_us!(current_ancient.create_if_necessary(slot, self)); + let mut bytes_remaining_to_write = shrink_collect.alive_total_bytes; + let (mut shrink_in_progress, create_and_insert_store_elapsed_us) = measure_us!( + current_ancient.create_if_necessary(slot, self, shrink_collect.alive_total_bytes) + ); stats_sub.create_and_insert_store_elapsed_us = create_and_insert_store_elapsed_us; let available_bytes = current_ancient.append_vec().accounts.remaining_bytes(); // split accounts in 'slot' into: @@ -4685,8 +4615,10 @@ impl AccountsDb { let mut rewrite_elapsed = Measure::start("rewrite_elapsed"); // write what we can to the current ancient storage - stats_sub.store_accounts_timing = + let (store_accounts_timing, bytes_written) = current_ancient.store_ancient_accounts(self, &to_store, StorageSelector::Primary); + stats_sub.store_accounts_timing = store_accounts_timing; + bytes_remaining_to_write = bytes_remaining_to_write.saturating_sub(bytes_written as usize); // handle accounts from 'slot' which did not fit into the current ancient append vec if to_store.has_overflow() { @@ -4694,8 +4626,14 @@ impl AccountsDb { // Assert: it cannot be the case that we already had an ancient append vec at this slot and // yet that ancient append vec does not have room for the accounts stored at this slot currently assert_ne!(slot, current_ancient.slot()); - let (shrink_in_progress_overflow, time_us) = - measure_us!(current_ancient.create_ancient_append_vec(slot, self)); + + // Now we create an ancient append vec at `slot` to store the overflows. + let (shrink_in_progress_overflow, time_us) = measure_us!(current_ancient + .create_ancient_append_vec( + slot, + self, + to_store.get_bytes(StorageSelector::Overflow) + )); stats_sub.create_and_insert_store_elapsed_us += time_us; // We cannot possibly be shrinking the original slot that created an ancient append vec // AND not have enough room in the ancient append vec at that slot @@ -4707,10 +4645,16 @@ impl AccountsDb { shrink_in_progress = Some(shrink_in_progress_overflow); // write the overflow accounts to the next ancient storage - let timing = + let (store_accounts_timing, bytes_written) = current_ancient.store_ancient_accounts(self, &to_store, StorageSelector::Overflow); - stats_sub.store_accounts_timing.accumulate(&timing); + bytes_remaining_to_write = + bytes_remaining_to_write.saturating_sub(bytes_written as usize); + + stats_sub + .store_accounts_timing + .accumulate(&store_accounts_timing); } + assert_eq!(bytes_remaining_to_write, 0); rewrite_elapsed.stop(); stats_sub.rewrite_elapsed_us = rewrite_elapsed.as_us(); @@ -5751,7 +5695,7 @@ impl AccountsDb { fn has_space_available(&self, slot: Slot, size: u64) -> bool { let store = self.storage.get_slot_storage_entry(slot).unwrap(); if store.status() == AccountStorageStatus::Available - && (store.accounts.capacity() - store.accounts.len() as u64) > size + && store.accounts.remaining_bytes() >= size { return true; } @@ -6309,6 +6253,14 @@ impl AccountsDb { .unwrap_or_default(); let data_len = (data_len + STORE_META_OVERHEAD) as u64; if !self.has_space_available(slot, data_len) { + info!( + "write_accounts_to_storage, no space: {}, {}, {}, {}, {}", + storage.accounts.capacity(), + storage.accounts.remaining_bytes(), + data_len, + infos.len(), + accounts_and_meta_to_store.len() + ); let special_store_size = std::cmp::max(data_len * 2, self.file_size); if self .try_recycle_and_insert_store(slot, special_store_size, std::u64::MAX) @@ -6567,30 +6519,6 @@ impl AccountsDb { } } - let mut filler_accounts = 0; - if self.filler_accounts_enabled() { - let slots_remaining = self.filler_account_slots_remaining.load(Ordering::Acquire); - if slots_remaining > 0 { - // figure out - let pr = self.get_prior_root(slot); - - if let Some(prior_root) = pr { - let filler_account_slots = - std::cmp::min(slot.saturating_sub(prior_root), slots_remaining); - self.filler_account_slots_remaining - .fetch_sub(filler_account_slots, Ordering::Release); - let filler_accounts_per_slot = - self.filler_accounts_per_slot.load(Ordering::Acquire); - filler_accounts = filler_account_slots * filler_accounts_per_slot; - - // keep space for filler accounts - let addl_size = filler_accounts - * (aligned_stored_size(self.filler_accounts_config.size) as u64); - total_size += addl_size; - } - } - } - let (accounts, hashes): (Vec<(&Pubkey, &AccountSharedData)>, Vec) = iter_items .iter() .filter_map(|iter_item| { @@ -6640,25 +6568,6 @@ impl AccountsDb { StoreReclaims::Default, ); - if filler_accounts > 0 { - // add extra filler accounts at the end of the append vec - let (account, hash) = self.get_filler_account(&Rent::default()); - let mut accounts = Vec::with_capacity(filler_accounts as usize); - let mut hashes = Vec::with_capacity(filler_accounts as usize); - let pubkeys = self.get_filler_account_pubkeys(filler_accounts as usize); - pubkeys.iter().for_each(|key| { - accounts.push((key, &account)); - hashes.push(hash); - }); - self.store_accounts_frozen( - (slot, &accounts[..]), - Some(hashes), - &flushed_store, - None, - StoreReclaims::Ignore, - ); - } - // If the above sizing function is correct, just one AppendVec is enough to hold // all the data for the slot assert!(self.storage.get_slot_storage_entry(slot).is_some()); @@ -6988,7 +6897,6 @@ impl AccountsDb { max_slot: Slot, config: &CalcAccountsHashConfig<'_>, ) -> Result<(AccountsHash, u64), AccountsHashVerificationError> { - use AccountsHashVerificationError::*; let mut collect = Measure::start("collect"); let keys: Vec<_> = self .accounts_index @@ -7016,9 +6924,6 @@ impl AccountsDb { let result: Vec = pubkeys .iter() .filter_map(|pubkey| { - if self.is_filler_account(pubkey) { - return None; - } if let AccountIndexGetResult::Found(lock, index) = self.accounts_index.get(pubkey, config.ancestors, Some(max_slot)) { @@ -7044,7 +6949,7 @@ impl AccountsDb { let mut loaded_hash = loaded_account.loaded_hash(); let balance = loaded_account.lamports(); let hash_is_missing = loaded_hash == AccountHash(Hash::default()); - if (config.check_hash || hash_is_missing) && !self.is_filler_account(pubkey) { + if config.check_hash || hash_is_missing { let computed_hash = loaded_account.compute_hash(pubkey); if hash_is_missing { @@ -7087,7 +6992,7 @@ impl AccountsDb { "{} mismatched account hash(es) found", mismatch_found.load(Ordering::Relaxed) ); - return Err(MismatchedAccountsHash); + return Err(AccountsHashVerificationError::MismatchedAccountsHash); } scan.stop(); @@ -7635,12 +7540,13 @@ impl AccountsDb { bins: usize, bin_range: &Range, config: &CalcAccountsHashConfig<'_>, - filler_account_suffix: Option<&Pubkey>, ) -> Result, AccountsHashVerificationError> { + assert!(bin_range.start < bins); + assert!(bin_range.end <= bins); + assert!(bin_range.start < bin_range.end); let _guard = self.active_stats.activate(ActiveStatItem::HashScan); let bin_calculator = PubkeyBinCalculator24::new(bins); - assert!(bin_range.start < bins && bin_range.end <= bins && bin_range.start < bin_range.end); let mut time = Measure::start("scan all accounts"); stats.num_snapshot_storage = storages.storage_count(); stats.num_slots = storages.slot_count(); @@ -7654,7 +7560,6 @@ impl AccountsDb { bin_calculator: &bin_calculator, config, mismatch_found: mismatch_found.clone(), - filler_account_suffix, range, bin_range, sort_time: sort_time.clone(), @@ -7797,11 +7702,6 @@ impl AccountsDb { }; let accounts_hasher = AccountsHasher { - filler_account_suffix: if self.filler_accounts_config.count > 0 { - self.filler_account_suffix - } else { - None - }, zero_lamport_accounts: kind.zero_lamport_accounts(), dir_for_temp_cache_files: transient_accounts_hash_cache_path, active_stats: &self.active_stats, @@ -7815,7 +7715,6 @@ impl AccountsDb { PUBKEY_BINS_FOR_CALCULATING_HASHES, &bounds, config, - accounts_hasher.filler_account_suffix.as_ref(), )?; let cache_hash_data_files = cache_hash_data_file_references @@ -7873,7 +7772,6 @@ impl AccountsDb { base: Option<(Slot, /*capitalization*/ u64)>, config: VerifyAccountsHashAndLamportsConfig, ) -> Result<(), AccountsHashVerificationError> { - use AccountsHashVerificationError::*; let calc_config = CalcAccountsHashConfig { use_bg_thread_pool: config.use_bg_thread_pool, check_hash: false, @@ -7897,14 +7795,14 @@ impl AccountsDb { )?; let found_incremental_accounts_hash = self .get_incremental_accounts_hash(slot) - .ok_or(MissingAccountsHash)?; + .ok_or(AccountsHashVerificationError::MissingAccountsHash)?; if calculated_incremental_accounts_hash != found_incremental_accounts_hash { warn!( "mismatched incremental accounts hash for slot {slot}: \ {calculated_incremental_accounts_hash:?} (calculated) != {found_incremental_accounts_hash:?} (expected)" ); if hash_mismatch_is_error { - return Err(MismatchedAccountsHash); + return Err(AccountsHashVerificationError::MismatchedAccountsHash); } } } else { @@ -7922,18 +7820,22 @@ impl AccountsDb { "Mismatched total lamports: {} calculated: {}", total_lamports, calculated_lamports ); - return Err(MismatchedTotalLamports(calculated_lamports, total_lamports)); + return Err(AccountsHashVerificationError::MismatchedTotalLamports( + calculated_lamports, + total_lamports, + )); } - let (found_accounts_hash, _) = - self.get_accounts_hash(slot).ok_or(MissingAccountsHash)?; + let (found_accounts_hash, _) = self + .get_accounts_hash(slot) + .ok_or(AccountsHashVerificationError::MissingAccountsHash)?; if calculated_accounts_hash != found_accounts_hash { warn!( "Mismatched accounts hash for slot {slot}: \ {calculated_accounts_hash:?} (calculated) != {found_accounts_hash:?} (expected)" ); if hash_mismatch_is_error { - return Err(MismatchedAccountsHash); + return Err(AccountsHashVerificationError::MismatchedAccountsHash); } } } @@ -8044,11 +7946,6 @@ impl AccountsDb { hashes.retain(|k| k.0 != ignore); } - if self.filler_accounts_enabled() { - // filler accounts must be added to 'dirty_keys' above but cannot be used to calculate hash - hashes.retain(|(pubkey, _hash)| !self.is_filler_account(pubkey)); - } - let accounts_delta_hash = AccountsDeltaHash(AccountsHasher::accumulate_account_hashes(hashes)); accumulate.stop(); @@ -8186,26 +8083,24 @@ impl AccountsDb { } } - fn should_not_shrink(aligned_bytes: u64, total_bytes: u64) -> bool { - aligned_bytes + PAGE_SIZE > total_bytes + fn should_not_shrink(alive_bytes: u64, total_bytes: u64) -> bool { + alive_bytes + PAGE_SIZE > total_bytes } fn is_shrinking_productive(slot: Slot, store: &Arc) -> bool { let alive_count = store.count(); let stored_count = store.approx_stored_count(); - let alive_bytes = store.alive_bytes(); + let alive_bytes = store.alive_bytes() as u64; let total_bytes = store.capacity(); - let aligned_bytes = Self::page_align(alive_bytes as u64); - if Self::should_not_shrink(aligned_bytes, total_bytes) { + if Self::should_not_shrink(alive_bytes, total_bytes) { trace!( - "shrink_slot_forced ({}): not able to shrink at all: alive/stored: ({} / {}) ({}b / {}b) save: {}", + "shrink_slot_forced ({}): not able to shrink at all: alive/stored: {} ({}b / {}b) save: {}", slot, alive_count, stored_count, - aligned_bytes, total_bytes, - total_bytes.saturating_sub(aligned_bytes), + total_bytes.saturating_sub(alive_bytes), ); return false; } @@ -9102,91 +8997,6 @@ impl AccountsDb { } } - fn filler_unique_id_bytes() -> usize { - std::mem::size_of::() - } - - fn filler_rent_partition_prefix_bytes() -> usize { - std::mem::size_of::() - } - - fn filler_prefix_bytes() -> usize { - Self::filler_unique_id_bytes() + Self::filler_rent_partition_prefix_bytes() - } - - pub fn is_filler_account_helper( - pubkey: &Pubkey, - filler_account_suffix: Option<&Pubkey>, - ) -> bool { - let offset = Self::filler_prefix_bytes(); - filler_account_suffix - .as_ref() - .map(|filler_account_suffix| { - pubkey.as_ref()[offset..] == filler_account_suffix.as_ref()[offset..] - }) - .unwrap_or_default() - } - - /// true if 'pubkey' is a filler account - pub fn is_filler_account(&self, pubkey: &Pubkey) -> bool { - Self::is_filler_account_helper(pubkey, self.filler_account_suffix.as_ref()) - } - - /// true if it is possible that there are filler accounts present - pub fn filler_accounts_enabled(&self) -> bool { - self.filler_account_suffix.is_some() - } - - /// return 'AccountSharedData' and a hash for a filler account - fn get_filler_account(&self, rent: &Rent) -> (AccountSharedData, AccountHash) { - let string = "FiLLERACCoUNTooooooooooooooooooooooooooooooo"; - let hash = AccountHash(Hash::from_str(string).unwrap()); - let owner = Pubkey::from_str(string).unwrap(); - let space = self.filler_accounts_config.size; - let rent_exempt_reserve = rent.minimum_balance(space); - let lamports = rent_exempt_reserve; - let mut account = AccountSharedData::new(lamports, space, &owner); - // just non-zero rent epoch. filler accounts are rent-exempt - let dummy_rent_epoch = 2; - account.set_rent_epoch(dummy_rent_epoch); - (account, hash) - } - - fn get_filler_account_pubkeys(&self, count: usize) -> Vec { - (0..count) - .map(|_| { - let subrange = solana_sdk::pubkey::new_rand(); - self.get_filler_account_pubkey(&subrange) - }) - .collect() - } - - fn get_filler_account_pubkey(&self, subrange: &Pubkey) -> Pubkey { - // pubkey begins life as entire filler 'suffix' pubkey - let mut key = self.filler_account_suffix.unwrap(); - let rent_prefix_bytes = Self::filler_rent_partition_prefix_bytes(); - // first bytes are replaced with rent partition range: filler_rent_partition_prefix_bytes - key.as_mut()[0..rent_prefix_bytes] - .copy_from_slice(&subrange.as_ref()[0..rent_prefix_bytes]); - key - } - - /// filler accounts are space-holding accounts which are ignored by hash calculations and rent. - /// They are designed to allow a validator to run against a network successfully while simulating having many more accounts present. - /// All filler accounts share a common pubkey suffix. The suffix is randomly generated per validator on startup. - /// The filler accounts are added to each slot in the snapshot after index generation. - /// The accounts added in a slot are setup to have pubkeys such that rent will be collected from them before (or when?) their slot becomes an epoch old. - /// Thus, the filler accounts are rewritten by rent and the old slot can be thrown away successfully. - pub fn maybe_add_filler_accounts(&self, epoch_schedule: &EpochSchedule, slot: Slot) { - if self.filler_accounts_config.count == 0 { - return; - } - - self.init_gradual_filler_accounts( - epoch_schedule.get_slots_in_epoch(epoch_schedule.get_epoch(slot)), - ); - } - pub fn generate_index( &self, limit_load_slot_count_from_snapshot: Option, @@ -9200,17 +9010,17 @@ impl AccountsDb { slots.truncate(limit); // get rid of the newer slots and keep just the older } let max_slot = slots.last().cloned().unwrap_or_default(); - let schedule = genesis_config.epoch_schedule; + let schedule = &genesis_config.epoch_schedule; let rent_collector = RentCollector::new( schedule.get_epoch(max_slot), - schedule, + schedule.clone(), genesis_config.slots_per_year(), - genesis_config.rent, + genesis_config.rent.clone(), ); let accounts_data_len = AtomicU64::new(0); let rent_paying_accounts_by_partition = - Mutex::new(RentPayingAccountsByPartition::new(&schedule)); + Mutex::new(RentPayingAccountsByPartition::new(schedule)); // pass == 0 always runs and generates the index // pass == 1 only runs if verify == true. @@ -9598,7 +9408,7 @@ impl AccountsDb { store.count(), ); { - let mut count_and_status = store.count_and_status.write().unwrap(); + let mut count_and_status = store.count_and_status.lock_write(); assert_eq!(count_and_status.0, 0); count_and_status.0 = entry.count; } @@ -9611,7 +9421,7 @@ impl AccountsDb { ); } else { trace!("id: {} clearing count", id); - store.count_and_status.write().unwrap().0 = 0; + store.count_and_status.lock_write().0 = 0; } } storage_size_storages_time.stop(); @@ -9628,7 +9438,7 @@ impl AccountsDb { " slot: {} id: {} count_and_status: {:?} approx_store_count: {} len: {} capacity: {} (recycled: {:?})", entry.slot(), entry.append_vec_id(), - *entry.count_and_status.read().unwrap(), + entry.count_and_status.read(), entry.approx_store_count.load(Ordering::Relaxed), entry.accounts.len(), entry.accounts.capacity(), @@ -9666,7 +9476,7 @@ impl AccountsDb { " slot: {} id: {} count_and_status: {:?} approx_store_count: {} len: {} capacity: {}", slot, entry.append_vec_id(), - *entry.count_and_status.read().unwrap(), + entry.count_and_status.read(), entry.approx_store_count.load(Ordering::Relaxed), entry.accounts.len(), entry.accounts.capacity(), @@ -9709,60 +9519,9 @@ pub(crate) enum UpdateIndexThreadSelection { PoolWithThreshold, } -#[cfg(test)] -impl AccountsDb { - pub fn new_with_config_for_tests( - paths: Vec, - cluster_type: &ClusterType, - account_indexes: AccountSecondaryIndexes, - shrink_ratio: AccountShrinkThreshold, - ) -> Self { - Self::new_with_config( - paths, - cluster_type, - account_indexes, - shrink_ratio, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), - None, - Arc::default(), - ) - } - - pub fn new_sized(paths: Vec, file_size: u64) -> Self { - AccountsDb { - file_size, - ..AccountsDb::new(paths, &ClusterType::Development) - } - } - - pub fn new_sized_caching(paths: Vec, file_size: u64) -> Self { - AccountsDb { - file_size, - ..AccountsDb::new(paths, &ClusterType::Development) - } - } - - pub fn new_sized_no_extra_stores(paths: Vec, file_size: u64) -> Self { - AccountsDb { - file_size, - ..AccountsDb::new(paths, &ClusterType::Development) - } - } - - pub fn get_append_vec_id(&self, pubkey: &Pubkey, slot: Slot) -> Option { - let ancestors = vec![(slot, 1)].into_iter().collect(); - let result = self.accounts_index.get(pubkey, Some(&ancestors), None); - result.map(|(list, index)| list.slot_list()[index].1.store_id()) - } -} - // These functions/fields are only usable from a dev context (i.e. tests and benches) #[cfg(feature = "dev-context-only-utils")] impl AccountsDb { - pub fn new(paths: Vec, cluster_type: &ClusterType) -> Self { - Self::new_for_tests(paths, cluster_type) - } - pub fn load_without_fixed_root( &self, ancestors: &Ancestors, @@ -10025,6 +9784,7 @@ pub mod tests { accounts_index::{ tests::*, AccountSecondaryIndexesIncludeExclude, ReadAccountMapEntry, RefCount, }, + ancient_append_vecs, append_vec::{test_utils::TempFile, AppendVecStoredAccountMeta}, cache_hash_data::CacheHashDataFile, inline_spl_token, @@ -10061,6 +9821,12 @@ pub mod tests { } impl AccountsDb { + pub fn get_append_vec_id(&self, pubkey: &Pubkey, slot: Slot) -> Option { + let ancestors = vec![(slot, 1)].into_iter().collect(); + let result = self.accounts_index.get(pubkey, Some(&ancestors), None); + result.map(|(list, index)| list.slot_list()[index].1.store_id()) + } + fn scan_snapshot_stores( &self, storage: &SortedStorages, @@ -10081,7 +9847,6 @@ pub mod tests { check_hash, ..CalcAccountsHashConfig::default() }, - None, ) .map(|references| { references @@ -10202,6 +9967,47 @@ pub mod tests { } } + #[test] + fn test_create_ancient_append_vec() { + let ancient_append_vec_size = ancient_append_vecs::get_ancient_append_vec_capacity(); + let db = AccountsDb::new_single_for_tests(); + + { + // create an ancient appendvec from a small appendvec, the size of + // the ancient appendvec should be the size of the ideal ancient + // appendvec size. + let mut current_ancient = CurrentAncientAppendVec::default(); + let slot0 = 0; + + // there has to be an existing append vec at this slot for a new current ancient at the slot to make sense + let _existing_append_vec = db.create_and_insert_store(slot0, 1000, "test"); + let _ = current_ancient.create_ancient_append_vec(slot0, &db, 0); + assert_eq!( + current_ancient.append_vec().capacity(), + ancient_append_vec_size + ); + } + + { + // create an ancient appendvec from a large appendvec (bigger than + // current ancient_append_vec_size), the ancient appendvec should be + // the size of the bigger ancient appendvec size. + let mut current_ancient = CurrentAncientAppendVec::default(); + let slot1 = 1; + // there has to be an existing append vec at this slot for a new current ancient at the slot to make sense + let _existing_append_vec = db.create_and_insert_store(slot1, 1000, "test"); + let _ = current_ancient.create_ancient_append_vec( + slot1, + &db, + 2 * ancient_append_vec_size as usize, + ); + assert_eq!( + current_ancient.append_vec().capacity(), + 2 * ancient_append_vec_size + ); + } + } + #[test] fn test_maybe_unref_accounts_already_in_ancient() { let db = AccountsDb::new_single_for_tests(); @@ -10248,7 +10054,7 @@ pub mod tests { // there has to be an existing append vec at this slot for a new current ancient at the slot to make sense let _existing_append_vec = db.create_and_insert_store(slot0, 1000, "test"); { - let _shrink_in_progress = current_ancient.create_ancient_append_vec(slot0, &db); + let _shrink_in_progress = current_ancient.create_ancient_append_vec(slot0, &db, 0); } let mut ancient_slot_pubkeys = AncientSlotPubkeys::default(); assert!(ancient_slot_pubkeys.inner.is_none()); @@ -10263,7 +10069,7 @@ pub mod tests { // different slot than current_ancient, so update 'ancient_slot_pubkeys' // there has to be an existing append vec at this slot for a new current ancient at the slot to make sense let _existing_append_vec = db.create_and_insert_store(slot1, 1000, "test"); - let _shrink_in_progress = current_ancient.create_ancient_append_vec(slot1, &db); + let _shrink_in_progress = current_ancient.create_ancient_append_vec(slot1, &db, 0); let slot2 = 2; ancient_slot_pubkeys.maybe_unref_accounts_already_in_ancient( slot2, @@ -10432,9 +10238,7 @@ pub mod tests { } #[test] - #[should_panic( - expected = "bin_range.start < bins && bin_range.end <= bins &&\\n bin_range.start < bin_range.end" - )] + #[should_panic(expected = "bin_range.start < bins")] fn test_accountsdb_scan_snapshot_stores_illegal_range_start() { let mut stats = HashStats::default(); let bounds = Range { start: 2, end: 2 }; @@ -10445,9 +10249,7 @@ pub mod tests { .unwrap(); } #[test] - #[should_panic( - expected = "bin_range.start < bins && bin_range.end <= bins &&\\n bin_range.start < bin_range.end" - )] + #[should_panic(expected = "bin_range.end <= bins")] fn test_accountsdb_scan_snapshot_stores_illegal_range_end() { let mut stats = HashStats::default(); let bounds = Range { start: 1, end: 3 }; @@ -10459,9 +10261,7 @@ pub mod tests { } #[test] - #[should_panic( - expected = "bin_range.start < bins && bin_range.end <= bins &&\\n bin_range.start < bin_range.end" - )] + #[should_panic(expected = "bin_range.start < bin_range.end")] fn test_accountsdb_scan_snapshot_stores_illegal_range_inverse() { let mut stats = HashStats::default(); let bounds = Range { start: 1, end: 0 }; @@ -10938,7 +10738,7 @@ pub mod tests { expected[0].push(raw_expected[index]); } let mut result2 = (0..range).map(|_| Vec::default()).collect::>(); - if let Some(m) = result.get(0) { + if let Some(m) = result.first() { m.load_all(&mut result2, bin, &PubkeyBinCalculator24::new(bins)); } else { result2 = vec![]; @@ -10990,7 +10790,7 @@ pub mod tests { solana_logger::setup(); let (storages, _size, _slot_expected) = sample_storage(); - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let result = db .calculate_accounts_hash_from_storages( &CalcAccountsHashConfig::default(), @@ -11007,7 +10807,7 @@ pub mod tests { fn test_accountsdb_calculate_accounts_hash_from_storages() { solana_logger::setup(); - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let (storages, raw_expected) = sample_storages_and_accounts(&db); let expected_hash = AccountsHasher::compute_merkle_root_loop(raw_expected.clone(), MERKLE_FANOUT, |item| { @@ -11263,7 +11063,7 @@ pub mod tests { sample_storage_with_entries_id(tf, write_version, slot, pubkey, 0, mark_alive, None) } - fn sample_storage_with_entries_id( + fn sample_storage_with_entries_id_fill_percentage( tf: &TempFile, write_version: StoredMetaWriteVersion, slot: Slot, @@ -11271,11 +11071,17 @@ pub mod tests { id: AppendVecId, mark_alive: bool, account_data_size: Option, + fill_percentage: u64, ) -> Arc { let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap(); - let size: usize = aligned_stored_size(account_data_size.unwrap_or(123) as usize); - let mut data = AccountStorageEntry::new(&paths[0], slot, id, size as u64); - let av = AccountsFile::AppendVec(AppendVec::new(&tf.path, true, (1024 * 1024).max(size))); + let file_size = account_data_size.unwrap_or(123) * 100 / fill_percentage; + let size_aligned: usize = aligned_stored_size(file_size as usize); + let mut data = AccountStorageEntry::new(&paths[0], slot, id, size_aligned as u64); + let av = AccountsFile::AppendVec(AppendVec::new( + &tf.path, + true, + (1024 * 1024).max(size_aligned), + )); data.accounts = av; let arc = Arc::new(data); @@ -11283,6 +11089,27 @@ pub mod tests { arc } + fn sample_storage_with_entries_id( + tf: &TempFile, + write_version: StoredMetaWriteVersion, + slot: Slot, + pubkey: &Pubkey, + id: AppendVecId, + mark_alive: bool, + account_data_size: Option, + ) -> Arc { + sample_storage_with_entries_id_fill_percentage( + tf, + write_version, + slot, + pubkey, + id, + mark_alive, + account_data_size, + 100, + ) + } + #[test] fn test_accountsdb_scan_multiple_account_storage_no_bank_one_slot() { solana_logger::setup(); @@ -11362,7 +11189,7 @@ pub mod tests { #[test] fn test_accountsdb_add_root() { solana_logger::setup(); - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let key = Pubkey::default(); let account0 = AccountSharedData::new(1, 0, &key); @@ -11378,7 +11205,7 @@ pub mod tests { #[test] fn test_accountsdb_latest_ancestor() { solana_logger::setup(); - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let key = Pubkey::default(); let account0 = AccountSharedData::new(1, 0, &key); @@ -11414,7 +11241,7 @@ pub mod tests { #[test] fn test_accountsdb_latest_ancestor_with_root() { solana_logger::setup(); - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let key = Pubkey::default(); let account0 = AccountSharedData::new(1, 0, &key); @@ -11440,7 +11267,7 @@ pub mod tests { #[test] fn test_accountsdb_root_one_slot() { solana_logger::setup(); - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let key = Pubkey::default(); let account0 = AccountSharedData::new(1, 0, &key); @@ -11493,7 +11320,7 @@ pub mod tests { #[test] fn test_accountsdb_add_root_many() { - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let mut pubkeys: Vec = vec![]; db.create_account(&mut pubkeys, 0, 100, 0, 0); @@ -11578,7 +11405,7 @@ pub mod tests { let key = Pubkey::default(); // 1 token in the "root", i.e. db zero - let db0 = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db0 = AccountsDb::new_single_for_tests(); let account0 = AccountSharedData::new(1, 0, &key); db0.store_for_tests(0, &[(&key, &account0)]); @@ -11603,7 +11430,7 @@ pub mod tests { fn run_test_remove_unrooted_slot(is_cached: bool) { let unrooted_slot = 9; let unrooted_bank_id = 9; - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let key = Pubkey::default(); let account0 = AccountSharedData::new(1, 0, &key); let ancestors = vec![(unrooted_slot, 1)].into_iter().collect(); @@ -11675,7 +11502,7 @@ pub mod tests { #[test] fn test_account_one() { let (_accounts_dirs, paths) = get_temp_accounts_paths(1).unwrap(); - let db = AccountsDb::new(paths, &ClusterType::Development); + let db = AccountsDb::new_for_tests(paths, &ClusterType::Development); let mut pubkeys: Vec = vec![]; db.create_account(&mut pubkeys, 0, 1, 0, 0); let ancestors = vec![(0, 0)].into_iter().collect(); @@ -11690,7 +11517,7 @@ pub mod tests { #[test] fn test_account_many() { let (_accounts_dirs, paths) = get_temp_accounts_paths(2).unwrap(); - let db = AccountsDb::new(paths, &ClusterType::Development); + let db = AccountsDb::new_for_tests(paths, &ClusterType::Development); let mut pubkeys: Vec = vec![]; db.create_account(&mut pubkeys, 0, 100, 0, 0); db.check_accounts(&pubkeys, 0, 100, 1); @@ -11710,7 +11537,10 @@ pub mod tests { fn test_account_grow_many() { let (_accounts_dir, paths) = get_temp_accounts_paths(2).unwrap(); let size = 4096; - let accounts = AccountsDb::new_sized(paths, size); + let accounts = AccountsDb { + file_size: size, + ..AccountsDb::new_for_tests(paths, &ClusterType::Development) + }; let mut keys = vec![]; for i in 0..9 { let key = solana_sdk::pubkey::new_rand(); @@ -11826,7 +11656,7 @@ pub mod tests { //This test is pedantic //A slot is purged when a non root bank is cleaned up. If a slot is behind root but it is //not root, it means we are retaining dead banks. - let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new_single_for_tests(); let pubkey = solana_sdk::pubkey::new_rand(); let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner()); //store an account @@ -11878,7 +11708,7 @@ pub mod tests { fn test_clean_zero_lamport_and_dead_slot() { solana_logger::setup(); - let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new_single_for_tests(); let pubkey1 = solana_sdk::pubkey::new_rand(); let pubkey2 = solana_sdk::pubkey::new_rand(); let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner()); @@ -11942,7 +11772,7 @@ pub mod tests { fn test_clean_multiple_zero_lamport_decrements_index_ref_count() { solana_logger::setup(); - let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new_single_for_tests(); let pubkey1 = solana_sdk::pubkey::new_rand(); let pubkey2 = solana_sdk::pubkey::new_rand(); let zero_lamport_account = @@ -11990,7 +11820,7 @@ pub mod tests { fn test_clean_zero_lamport_and_old_roots() { solana_logger::setup(); - let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new_single_for_tests(); let pubkey = solana_sdk::pubkey::new_rand(); let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner()); let zero_lamport_account = @@ -12035,7 +11865,7 @@ pub mod tests { fn test_clean_old_with_normal_account() { solana_logger::setup(); - let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new_single_for_tests(); let pubkey = solana_sdk::pubkey::new_rand(); let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner()); //store an account @@ -12063,7 +11893,7 @@ pub mod tests { fn test_clean_old_with_zero_lamport_account() { solana_logger::setup(); - let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new_single_for_tests(); let pubkey1 = solana_sdk::pubkey::new_rand(); let pubkey2 = solana_sdk::pubkey::new_rand(); let normal_account = AccountSharedData::new(1, 0, AccountSharedData::default().owner()); @@ -12097,12 +11927,10 @@ pub mod tests { fn test_clean_old_with_both_normal_and_zero_lamport_accounts() { solana_logger::setup(); - let mut accounts = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - spl_token_mint_index_enabled(), - AccountShrinkThreshold::default(), - ); + let mut accounts = AccountsDb { + account_indexes: spl_token_mint_index_enabled(), + ..AccountsDb::new_single_for_tests() + }; let pubkey1 = solana_sdk::pubkey::new_rand(); let pubkey2 = solana_sdk::pubkey::new_rand(); @@ -12240,7 +12068,7 @@ pub mod tests { fn test_clean_max_slot_zero_lamport_account() { solana_logger::setup(); - let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new_single_for_tests(); let pubkey = solana_sdk::pubkey::new_rand(); let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner()); let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); @@ -12285,7 +12113,7 @@ pub mod tests { fn test_uncleaned_roots_with_account() { solana_logger::setup(); - let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new_single_for_tests(); let pubkey = solana_sdk::pubkey::new_rand(); let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner()); //store an account @@ -12305,7 +12133,7 @@ pub mod tests { fn test_uncleaned_roots_with_no_account() { solana_logger::setup(); - let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new_single_for_tests(); assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0); @@ -12473,7 +12301,10 @@ pub mod tests { let min_file_bytes = std::mem::size_of::() + std::mem::size_of::(); - let db = Arc::new(AccountsDb::new_sized(Vec::new(), min_file_bytes as u64)); + let db = Arc::new(AccountsDb { + file_size: min_file_bytes as u64, + ..AccountsDb::new_single_for_tests() + }); db.add_root(slot); let thread_hdls: Vec<_> = (0..num_threads) @@ -12512,7 +12343,7 @@ pub mod tests { #[test] fn test_accountsdb_scan_accounts() { solana_logger::setup(); - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let key = Pubkey::default(); let key0 = solana_sdk::pubkey::new_rand(); let account0 = AccountSharedData::new(1, 0, &key); @@ -12586,7 +12417,7 @@ pub mod tests { #[test] fn test_store_large_account() { solana_logger::setup(); - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let key = Pubkey::default(); let data_len = DEFAULT_FILE_SIZE as usize + 7; @@ -12701,7 +12532,7 @@ pub mod tests { #[test] fn test_bank_hash_stats() { solana_logger::setup(); - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let key = Pubkey::default(); let some_data_len = 5; @@ -12729,7 +12560,7 @@ pub mod tests { #[test] fn test_calculate_accounts_hash_check_hash_mismatch() { solana_logger::setup(); - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let key = solana_sdk::pubkey::new_rand(); let some_data_len = 0; @@ -12792,7 +12623,7 @@ pub mod tests { #[test] fn test_calculate_accounts_hash_check_hash() { solana_logger::setup(); - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let key = solana_sdk::pubkey::new_rand(); let some_data_len = 0; @@ -12832,9 +12663,8 @@ pub mod tests { #[test] fn test_verify_accounts_hash() { - use AccountsHashVerificationError::*; solana_logger::setup(); - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let key = solana_sdk::pubkey::new_rand(); let some_data_len = 0; @@ -12864,7 +12694,7 @@ pub mod tests { assert_matches!( db.verify_accounts_hash_and_lamports(some_slot, 1, None, config.clone()), - Err(MissingAccountsHash) + Err(AccountsHashVerificationError::MissingAccountsHash) ); db.set_accounts_hash( @@ -12874,16 +12704,15 @@ pub mod tests { assert_matches!( db.verify_accounts_hash_and_lamports(some_slot, 1, None, config), - Err(MismatchedAccountsHash) + Err(AccountsHashVerificationError::MismatchedAccountsHash) ); } #[test] fn test_verify_bank_capitalization() { for pass in 0..2 { - use AccountsHashVerificationError::*; solana_logger::setup(); - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let key = solana_sdk::pubkey::new_rand(); let some_data_len = 0; @@ -12928,7 +12757,7 @@ pub mod tests { assert_matches!( db.verify_accounts_hash_and_lamports(some_slot, 10, None, config), - Err(MismatchedTotalLamports(expected, actual)) if expected == 2 && actual == 10 + Err(AccountsHashVerificationError::MismatchedTotalLamports(expected, actual)) if expected == 2 && actual == 10 ); } } @@ -12936,7 +12765,7 @@ pub mod tests { #[test] fn test_verify_accounts_hash_no_account() { solana_logger::setup(); - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let some_slot: Slot = 0; let ancestors = vec![(some_slot, 0)].into_iter().collect(); @@ -12960,9 +12789,8 @@ pub mod tests { #[test] fn test_verify_accounts_hash_bad_account_hash() { - use AccountsHashVerificationError::*; solana_logger::setup(); - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let key = Pubkey::default(); let some_data_len = 0; @@ -12995,14 +12823,17 @@ pub mod tests { assert_matches!( db.verify_accounts_hash_and_lamports(some_slot, 1, None, config), - Err(MismatchedAccountsHash) + Err(AccountsHashVerificationError::MismatchedAccountsHash) ); } #[test] fn test_storage_finder() { solana_logger::setup(); - let db = AccountsDb::new_sized(Vec::new(), 16 * 1024); + let db = AccountsDb { + file_size: 16 * 1024, + ..AccountsDb::new_single_for_tests() + }; let key = solana_sdk::pubkey::new_rand(); let lamports = 100; let data_len = 8190; @@ -13014,13 +12845,13 @@ pub mod tests { #[test] fn test_get_snapshot_storages_empty() { - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); assert!(db.get_snapshot_storages(..=0).0.is_empty()); } #[test] fn test_get_snapshot_storages_only_older_than_or_equal_to_snapshot_slot() { - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let key = Pubkey::default(); let account = AccountSharedData::new(1, 0, &key); @@ -13039,7 +12870,7 @@ pub mod tests { #[test] fn test_get_snapshot_storages_only_non_empty() { for pass in 0..2 { - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let key = Pubkey::default(); let account = AccountSharedData::new(1, 0, &key); @@ -13062,7 +12893,7 @@ pub mod tests { #[test] fn test_get_snapshot_storages_only_roots() { - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let key = Pubkey::default(); let account = AccountSharedData::new(1, 0, &key); @@ -13078,7 +12909,7 @@ pub mod tests { #[test] fn test_get_snapshot_storages_exclude_empty() { - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let key = Pubkey::default(); let account = AccountSharedData::new(1, 0, &key); @@ -13098,7 +12929,7 @@ pub mod tests { #[test] fn test_get_snapshot_storages_with_base_slot() { - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let key = Pubkey::default(); let account = AccountSharedData::new(1, 0, &key); @@ -13113,7 +12944,7 @@ pub mod tests { #[test] #[should_panic(expected = "double remove of account in slot: 0/store: 0!!")] fn test_storage_remove_account_double_remove() { - let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new_single_for_tests(); let pubkey = solana_sdk::pubkey::new_rand(); let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner()); accounts.store_for_tests(0, &[(&pubkey, &account)]); @@ -13144,7 +12975,10 @@ pub mod tests { let zero_lamport_account = AccountSharedData::new(zero_lamport, data_size, &owner); let mut current_slot = 0; - let accounts = AccountsDb::new_sized_no_extra_stores(Vec::new(), store_size); + let accounts = AccountsDb { + file_size: store_size, + ..AccountsDb::new_single_for_tests() + }; // A: Initialize AccountsDb with pubkey1 and pubkey2 current_slot += 1; @@ -13744,12 +13578,7 @@ pub mod tests { #[test] fn test_store_clean_after_shrink() { solana_logger::setup(); - let accounts = AccountsDb::new_with_config_for_tests( - vec![], - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts = AccountsDb::new_single_for_tests(); let epoch_schedule = EpochSchedule::default(); let account = AccountSharedData::new(1, 16 * 4096, &Pubkey::default()); @@ -13793,7 +13622,10 @@ pub mod tests { #[test] fn test_store_reuse() { solana_logger::setup(); - let accounts = AccountsDb::new_sized_caching(vec![], 4096); + let accounts = AccountsDb { + file_size: 4096, + ..AccountsDb::new_single_for_tests() + }; let size = 100; let num_accounts: usize = 100; @@ -13859,7 +13691,7 @@ pub mod tests { #[test] #[should_panic(expected = "We've run out of storage ids!")] fn test_wrapping_append_vec_id() { - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); @@ -13886,7 +13718,7 @@ pub mod tests { #[should_panic(expected = "We've run out of storage ids!")] fn test_reuse_append_vec_id() { solana_logger::setup(); - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); @@ -13912,7 +13744,7 @@ pub mod tests { #[test] fn test_zero_lamport_new_root_not_cleaned() { - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let account_key = Pubkey::new_unique(); let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); @@ -13937,7 +13769,7 @@ pub mod tests { #[test] fn test_store_load_cached() { - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let key = Pubkey::default(); let account0 = AccountSharedData::new(1, 0, &key); let slot = 0; @@ -13969,7 +13801,7 @@ pub mod tests { #[test] fn test_store_flush_load_cached() { - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let key = Pubkey::default(); let account0 = AccountSharedData::new(1, 0, &key); let slot = 0; @@ -13996,7 +13828,7 @@ pub mod tests { #[test] fn test_flush_accounts_cache() { - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let account0 = AccountSharedData::new(1, 0, &Pubkey::default()); let unrooted_slot = 4; @@ -14061,7 +13893,7 @@ pub mod tests { } fn run_test_flush_accounts_cache_if_needed(num_roots: usize, num_unrooted: usize) { - let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let mut db = AccountsDb::new_single_for_tests(); db.write_cache_limit_bytes = Some(max_cache_slots() as u64); let space = 1; // # data bytes per account. write cache counts data len let account0 = AccountSharedData::new(1, space, &Pubkey::default()); @@ -14116,12 +13948,7 @@ pub mod tests { #[test] fn test_read_only_accounts_cache() { - let db = Arc::new(AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - )); + let db = Arc::new(AccountsDb::new_single_for_tests()); let account_key = Pubkey::new_unique(); let zero_lamport_account = @@ -14161,12 +13988,7 @@ pub mod tests { #[test] fn test_account_matches_owners() { - let db = Arc::new(AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - )); + let db = Arc::new(AccountsDb::new_single_for_tests()); let owners: Vec = (0..2).map(|_| Pubkey::new_unique()).collect(); @@ -14258,12 +14080,7 @@ pub mod tests { #[test] fn test_flush_cache_clean() { - let db = Arc::new(AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - )); + let db = Arc::new(AccountsDb::new_single_for_tests()); let account_key = Pubkey::new_unique(); let zero_lamport_account = @@ -14307,12 +14124,7 @@ pub mod tests { #[test] fn test_flush_cache_dont_clean_zero_lamport_account() { - let db = Arc::new(AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - )); + let db = Arc::new(AccountsDb::new_single_for_tests()); let zero_lamport_account_key = Pubkey::new_unique(); let other_account_key = Pubkey::new_unique(); @@ -14451,12 +14263,7 @@ pub mod tests { #[test] fn test_scan_flush_accounts_cache_then_clean_drop() { - let db = Arc::new(AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - )); + let db = Arc::new(AccountsDb::new_single_for_tests()); let account_key = Pubkey::new_unique(); let account_key2 = Pubkey::new_unique(); let zero_lamport_account = @@ -14566,12 +14373,7 @@ pub mod tests { #[test] fn test_alive_bytes() { - let accounts_db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); let slot: Slot = 0; let num_keys = 10; @@ -14610,12 +14412,7 @@ pub mod tests { scan_slot: Option, write_cache_limit_bytes: Option, ) -> (Arc, Vec, Vec, Option) { - let mut accounts_db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let mut accounts_db = AccountsDb::new_single_for_tests(); accounts_db.write_cache_limit_bytes = write_cache_limit_bytes; let accounts_db = Arc::new(accounts_db); @@ -15017,12 +14814,7 @@ pub mod tests { } fn run_test_shrink_unref(do_intra_cache_clean: bool) { - let db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let db = AccountsDb::new_single_for_tests(); let epoch_schedule = EpochSchedule::default(); let account_key1 = Pubkey::new_unique(); let account_key2 = Pubkey::new_unique(); @@ -15092,7 +14884,7 @@ pub mod tests { #[test] fn test_partial_clean() { solana_logger::setup(); - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let account_key1 = Pubkey::new_unique(); let account_key2 = Pubkey::new_unique(); let account1 = AccountSharedData::new(1, 0, AccountSharedData::default().owner()); @@ -15277,12 +15069,7 @@ pub mod tests { fn do_test_load_account_and_cache_flush_race(with_retry: bool) { solana_logger::setup(); - let mut db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let mut db = AccountsDb::new_single_for_tests(); db.load_delay = RACY_SLEEP_MS; let db = Arc::new(db); let pubkey = Arc::new(Pubkey::new_unique()); @@ -15350,12 +15137,7 @@ pub mod tests { } fn do_test_load_account_and_shrink_race(with_retry: bool) { - let mut db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let mut db = AccountsDb::new_single_for_tests(); let epoch_schedule = EpochSchedule::default(); db.load_delay = RACY_SLEEP_MS; let db = Arc::new(db); @@ -15416,12 +15198,7 @@ pub mod tests { #[test] fn test_cache_flush_delayed_remove_unrooted_race() { - let mut db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let mut db = AccountsDb::new_single_for_tests(); db.load_delay = RACY_SLEEP_MS; let db = Arc::new(db); let slot = 10; @@ -15486,12 +15263,7 @@ pub mod tests { #[test] fn test_cache_flush_remove_unrooted_race_multiple_slots() { - let db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let db = AccountsDb::new_single_for_tests(); let db = Arc::new(db); let num_cached_slots = 100; @@ -15621,7 +15393,7 @@ pub mod tests { #[test] fn test_collect_uncleaned_slots_up_to_slot() { solana_logger::setup(); - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let slot1 = 11; let slot2 = 222; @@ -15651,7 +15423,7 @@ pub mod tests { #[test] fn test_remove_uncleaned_slots_and_collect_pubkeys() { solana_logger::setup(); - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let slot1 = 11; let slot2 = 222; @@ -15709,7 +15481,7 @@ pub mod tests { #[test] fn test_remove_uncleaned_slots_and_collect_pubkeys_up_to_slot() { solana_logger::setup(); - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let slot1 = 11; let slot2 = 222; @@ -15912,7 +15684,7 @@ pub mod tests { // fake out the store count to avoid the assert for (_, store) in accounts.storage.iter() { store.alive_bytes.store(0, Ordering::Release); - let mut count_and_status = store.count_and_status.write().unwrap(); + let mut count_and_status = store.count_and_status.lock_write(); count_and_status.0 = 0; } @@ -15931,14 +15703,14 @@ pub mod tests { ); for (_, store) in accounts.storage.iter() { - assert_eq!(store.count_and_status.read().unwrap().0, 0); + assert_eq!(store.count_and_status.read().0, 0); assert_eq!(store.alive_bytes.load(Ordering::Acquire), 0); } accounts.set_storage_count_and_alive_bytes(dashmap, &mut GenerateIndexTimings::default()); assert_eq!(accounts.storage.len(), 1); for (_, store) in accounts.storage.iter() { assert_eq!(store.append_vec_id(), 0); - assert_eq!(store.count_and_status.read().unwrap().0, count); + assert_eq!(store.count_and_status.read().0, count); assert_eq!(store.alive_bytes.load(Ordering::Acquire), 2); } } @@ -17093,7 +16865,7 @@ pub mod tests { append_vec.append_vec_id() ); - let _shrink_in_progress = current_ancient.create_if_necessary(slot2, &db); + let _shrink_in_progress = current_ancient.create_if_necessary(slot2, &db, 0); assert_eq!(current_ancient.slot(), slot); assert_eq!(current_ancient.append_vec_id(), append_vec.append_vec_id()); } @@ -17105,13 +16877,13 @@ pub mod tests { let _existing_append_vec = db.create_and_insert_store(slot2, 1000, "test"); let mut current_ancient = CurrentAncientAppendVec::default(); - let mut _shrink_in_progress = current_ancient.create_if_necessary(slot2, &db); + let mut _shrink_in_progress = current_ancient.create_if_necessary(slot2, &db, 0); let id = current_ancient.append_vec_id(); assert_eq!(current_ancient.slot(), slot2); assert!(is_ancient(¤t_ancient.append_vec().accounts)); let slot3 = 3; // should do nothing - let _shrink_in_progress = current_ancient.create_if_necessary(slot3, &db); + let _shrink_in_progress = current_ancient.create_if_necessary(slot3, &db, 0); assert_eq!(current_ancient.slot(), slot2); assert_eq!(current_ancient.append_vec_id(), id); assert!(is_ancient(¤t_ancient.append_vec().accounts)); @@ -17125,7 +16897,7 @@ pub mod tests { let _existing_append_vec = db.create_and_insert_store(slot2, 1000, "test"); { - let _shrink_in_progress = current_ancient.create_ancient_append_vec(slot2, &db); + let _shrink_in_progress = current_ancient.create_ancient_append_vec(slot2, &db, 0); } let id = current_ancient.append_vec_id(); assert_eq!(current_ancient.slot(), slot2); @@ -17134,7 +16906,7 @@ pub mod tests { // there has to be an existing append vec at this slot for a new current ancient at the slot to make sense let _existing_append_vec = db.create_and_insert_store(slot3, 1000, "test"); - let mut _shrink_in_progress = current_ancient.create_ancient_append_vec(slot3, &db); + let mut _shrink_in_progress = current_ancient.create_ancient_append_vec(slot3, &db, 0); assert_eq!(current_ancient.slot(), slot3); assert!(is_ancient(¤t_ancient.append_vec().accounts)); assert_ne!(current_ancient.append_vec_id(), id); @@ -17343,17 +17115,6 @@ pub mod tests { let alive_total_one_account = 136 + space; if alive { - assert_eq!( - shrink_collect.aligned_total_bytes, - PAGE_SIZE - * if account_count >= 100 { - 4 - } else if account_count >= 50 { - 2 - } else { - 1 - } - ); let mut expected_alive_total_bytes = alive_total_one_account * normal_account_count; if append_opposite_zero_lamport_account { @@ -17365,13 +17126,11 @@ pub mod tests { expected_alive_total_bytes ); } else if append_opposite_alive_account { - assert_eq!(shrink_collect.aligned_total_bytes, 4096); assert_eq!( shrink_collect.alive_total_bytes, alive_total_one_account ); } else { - assert_eq!(shrink_collect.aligned_total_bytes, 0); assert_eq!(shrink_collect.alive_total_bytes, 0); } // expected_capacity is determined by what size append vec gets created when the write cache is flushed to an append vec. @@ -17488,6 +17247,152 @@ pub mod tests { ); } + #[test] + fn test_shrink_ancient_overflow_with_min_size() { + solana_logger::setup(); + + let ideal_av_size = ancient_append_vecs::get_ancient_append_vec_capacity(); + let num_normal_slots = 2; + + // build an ancient append vec at slot 'ancient_slot' with one `fat` + // account that's larger than the ideal size of ancient append vec to + // simulate the *oversized* append vec for shrinking. + let account_size = (1.5 * ideal_av_size as f64) as u64; + let (db, ancient_slot) = get_one_ancient_append_vec_and_others_with_account_size( + true, + num_normal_slots, + Some(account_size), + ); + + let max_slot_inclusive = ancient_slot + (num_normal_slots as Slot); + let initial_accounts = get_all_accounts(&db, ancient_slot..(max_slot_inclusive + 1)); + + let ancient = db.storage.get_slot_storage_entry(ancient_slot).unwrap(); + + // assert that the min_size, which about 1.5 * ideal_av_size, kicked in + // and result that the ancient append vec capacity exceeds the ideal_av_size + assert!(ancient.capacity() > ideal_av_size); + + // combine 1 normal append vec into existing oversize ancient append vec. + db.combine_ancient_slots( + (ancient_slot..max_slot_inclusive).collect(), + CAN_RANDOMLY_SHRINK_FALSE, + ); + + compare_all_accounts( + &initial_accounts, + &get_all_accounts(&db, ancient_slot..max_slot_inclusive), + ); + + // the append vec at max_slot_inclusive-1 should NOT have been removed + // since the append vec is already oversized and we created an ancient + // append vec there. + let ancient2 = db + .storage + .get_slot_storage_entry(max_slot_inclusive - 1) + .unwrap(); + assert!(is_ancient(&ancient2.accounts)); + assert!(ancient2.capacity() > ideal_av_size); // min_size kicked in, which cause the appendvec to be larger than the ideal_av_size + + // Combine normal append vec(s) into existing ancient append vec this + // will overflow the original ancient append vec because of the oversized + // ancient append vec is full. + db.combine_ancient_slots( + (ancient_slot..=max_slot_inclusive).collect(), + CAN_RANDOMLY_SHRINK_FALSE, + ); + + compare_all_accounts( + &initial_accounts, + &get_all_accounts(&db, ancient_slot..(max_slot_inclusive + 1)), + ); + + // Nothing should be combined because the append vec are oversized. + // min_size kicked in, which cause the appendvecs to be larger than the ideal_av_size. + let ancient = db.storage.get_slot_storage_entry(ancient_slot).unwrap(); + assert!(is_ancient(&ancient.accounts)); + assert!(ancient.capacity() > ideal_av_size); + + let ancient2 = db + .storage + .get_slot_storage_entry(max_slot_inclusive - 1) + .unwrap(); + assert!(is_ancient(&ancient2.accounts)); + assert!(ancient2.capacity() > ideal_av_size); + + let ancient3 = db + .storage + .get_slot_storage_entry(max_slot_inclusive) + .unwrap(); + assert!(is_ancient(&ancient3.accounts)); + assert!(ancient3.capacity() > ideal_av_size); + } + + #[test] + fn test_shink_overflow_too_much() { + let num_normal_slots = 2; + let ideal_av_size = ancient_append_vecs::get_ancient_append_vec_capacity(); + let fat_account_size = (1.5 * ideal_av_size as f64) as u64; + + // Prepare 4 appendvec to combine [small, big, small, small] + let account_data_sizes = vec![100, fat_account_size, 100, 100]; + let (db, slot1) = create_db_with_storages_and_index_with_customized_account_size_per_slot( + true, + num_normal_slots + 1, + account_data_sizes, + ); + let storage = db.get_storage_for_slot(slot1).unwrap(); + let created_accounts = db.get_unique_accounts_from_storage(&storage); + + // Adjust alive_ratio for slot2 to test it is shrinkable and is a + // candidate for squashing into the previous ancient append vec. + // However, due to the fact that this appendvec is `oversized`, it can't + // be squashed into the ancient append vec at previous slot (exceeds the + // size limit). Therefore, a new "oversized" ancient append vec are + // created at slot2 as the overflow. This is where the "min_bytes" in + // `fn create_ancient_append_vec` used for. + let slot2 = slot1 + 1; + let storage2 = db.storage.get_slot_storage_entry(slot2).unwrap(); + let original_cap_slot2 = storage2.accounts.capacity(); + storage2 + .accounts + .set_current_len_for_tests(original_cap_slot2 as usize); + + // Combine appendvec into ancient append vec. + let slots_to_combine: Vec = (slot1..slot1 + (num_normal_slots + 1) as Slot).collect(); + db.combine_ancient_slots(slots_to_combine, CAN_RANDOMLY_SHRINK_FALSE); + + // slot2 is too big to fit into ideal ancient append vec at slot1. So slot2 won't be merged into slot1. + // slot1 will have its own ancient append vec. + assert!(db.storage.get_slot_storage_entry(slot1).is_some()); + let ancient = db.get_storage_for_slot(slot1).unwrap(); + assert!(is_ancient(&ancient.accounts)); + assert_eq!(ancient.capacity(), ideal_av_size); + + let after_store = db.get_storage_for_slot(slot1).unwrap(); + let GetUniqueAccountsResult { + stored_accounts: after_stored_accounts, + capacity: after_capacity, + } = db.get_unique_accounts_from_storage(&after_store); + assert!(created_accounts.capacity <= after_capacity); + assert_eq!(created_accounts.stored_accounts.len(), 1); + assert_eq!(after_stored_accounts.len(), 1); + + // slot2, even after shrinking, it is still oversized. Therefore, there + // exists as an ancient append vec at slot2. + let storage2_after = db.storage.get_slot_storage_entry(slot2).unwrap(); + assert!(is_ancient(&storage2_after.accounts)); + assert!(storage2_after.capacity() > ideal_av_size); + let after_store = db.get_storage_for_slot(slot2).unwrap(); + let GetUniqueAccountsResult { + stored_accounts: after_stored_accounts, + capacity: after_capacity, + } = db.get_unique_accounts_from_storage(&after_store); + assert!(created_accounts.capacity <= after_capacity); + assert_eq!(created_accounts.stored_accounts.len(), 1); + assert_eq!(after_stored_accounts.len(), 1); + } + #[test] fn test_shrink_ancient_overflow() { solana_logger::setup(); @@ -17770,6 +17675,55 @@ pub mod tests { storage.remove_account(num_bytes, reset_accounts); } + pub(crate) fn create_storages_and_update_index_with_customized_account_size_per_slot( + db: &AccountsDb, + tf: Option<&TempFile>, + starting_slot: Slot, + num_slots: usize, + alive: bool, + account_data_sizes: Vec, + ) { + if num_slots == 0 { + return; + } + assert!(account_data_sizes.len() == num_slots + 1); + let local_tf = (tf.is_none()).then(|| { + crate::append_vec::test_utils::get_append_vec_path("create_storages_and_update_index") + }); + let tf = tf.unwrap_or_else(|| local_tf.as_ref().unwrap()); + + let write_version1 = 0; + let starting_id = db + .storage + .iter() + .map(|storage| storage.1.append_vec_id()) + .max() + .unwrap_or(999); + for (i, account_data_size) in account_data_sizes.iter().enumerate().take(num_slots) { + let id = starting_id + (i as AppendVecId); + let pubkey1 = solana_sdk::pubkey::new_rand(); + let storage = sample_storage_with_entries_id_fill_percentage( + tf, + write_version1, + starting_slot + (i as Slot), + &pubkey1, + id, + alive, + Some(*account_data_size), + 50, + ); + insert_store(db, Arc::clone(&storage)); + } + + let storage = db.get_storage_for_slot(starting_slot).unwrap(); + let created_accounts = db.get_unique_accounts_from_storage(&storage); + assert_eq!(created_accounts.stored_accounts.len(), 1); + + if alive { + populate_index(db, starting_slot..(starting_slot + (num_slots as Slot) + 1)); + } + } + pub(crate) fn create_storages_and_update_index( db: &AccountsDb, tf: Option<&TempFile>, @@ -17839,11 +17793,41 @@ pub mod tests { (db, slot1) } - fn get_one_ancient_append_vec_and_others( + pub(crate) fn create_db_with_storages_and_index_with_customized_account_size_per_slot( + alive: bool, + num_slots: usize, + account_data_size: Vec, + ) -> (AccountsDb, Slot) { + solana_logger::setup(); + + let db = AccountsDb::new_single_for_tests(); + + // create a single append vec with a single account in a slot + // add the pubkey to index if alive + // call combine_ancient_slots with the slot + // verify we create an ancient appendvec that has alive accounts and does not have dead accounts + + let slot1 = 1; + create_storages_and_update_index_with_customized_account_size_per_slot( + &db, + None, + slot1, + num_slots, + alive, + account_data_size, + ); + + let slot1 = slot1 as Slot; + (db, slot1) + } + + fn get_one_ancient_append_vec_and_others_with_account_size( alive: bool, num_normal_slots: usize, + account_data_size: Option, ) -> (AccountsDb, Slot) { - let (db, slot1) = create_db_with_storages_and_index(alive, num_normal_slots + 1, None); + let (db, slot1) = + create_db_with_storages_and_index(alive, num_normal_slots + 1, account_data_size); let storage = db.get_storage_for_slot(slot1).unwrap(); let created_accounts = db.get_unique_accounts_from_storage(&storage); @@ -17857,7 +17841,7 @@ pub mod tests { capacity: after_capacity, } = db.get_unique_accounts_from_storage(&after_store); if alive { - assert_ne!(created_accounts.capacity, after_capacity); + assert!(created_accounts.capacity <= after_capacity); } else { assert_eq!(created_accounts.capacity, after_capacity); } @@ -17868,6 +17852,13 @@ pub mod tests { (db, slot1) } + fn get_one_ancient_append_vec_and_others( + alive: bool, + num_normal_slots: usize, + ) -> (AccountsDb, Slot) { + get_one_ancient_append_vec_and_others_with_account_size(alive, num_normal_slots, None) + } + #[test] fn test_handle_dropped_roots_for_ancient() { solana_logger::setup(); @@ -18079,8 +18070,7 @@ pub mod tests { #[test] fn test_calculate_incremental_accounts_hash() { - let accounts_db = - AccountsDb::new_for_tests_with_caching(Vec::new(), &ClusterType::Development); + let accounts_db = AccountsDb::new_single_for_tests(); let owner = Pubkey::new_unique(); let mut accounts: Vec<_> = (0..10) diff --git a/accounts-db/src/accounts_db/geyser_plugin_utils.rs b/accounts-db/src/accounts_db/geyser_plugin_utils.rs index 1efd678873f620..34bd3d7b52a02f 100644 --- a/accounts-db/src/accounts_db/geyser_plugin_utils.rs +++ b/accounts-db/src/accounts_db/geyser_plugin_utils.rs @@ -328,7 +328,7 @@ pub mod tests { #[test] fn test_notify_account_at_accounts_update() { - let mut accounts = AccountsDb::new_single_for_tests_with_caching(); + let mut accounts = AccountsDb::new_single_for_tests(); let notifier = GeyserTestPlugin::default(); diff --git a/accounts-db/src/accounts_file.rs b/accounts-db/src/accounts_file.rs index 77f1717a9ca259..97c761616e7ce3 100644 --- a/accounts-db/src/accounts_file.rs +++ b/accounts-db/src/accounts_file.rs @@ -4,7 +4,7 @@ use { StorableAccountsWithHashesAndWriteVersions, StoredAccountInfo, StoredAccountMeta, }, accounts_hash::AccountHash, - append_vec::{AppendVec, AppendVecError, MatchAccountOwnerError}, + append_vec::{AppendVec, AppendVecError}, storable_accounts::StorableAccounts, tiered_storage::error::TieredStorageError, }, @@ -40,6 +40,14 @@ pub enum AccountsFileError { TieredStorageError(#[from] TieredStorageError), } +#[derive(Error, Debug, PartialEq, Eq)] +pub enum MatchAccountOwnerError { + #[error("The account owner does not match with the provided list")] + NoMatch, + #[error("Unable to load the account")] + UnableToLoad, +} + pub type Result = std::result::Result; #[derive(Debug)] diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index 7631ea694635b8..78662a04157744 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -469,7 +469,6 @@ impl CumulativeOffsets { #[derive(Debug)] pub struct AccountsHasher<'a> { - pub filler_account_suffix: Option, pub zero_lamport_accounts: ZeroLamportAccounts, /// The directory where temporary cache files are put pub dir_for_temp_cache_files: PathBuf, @@ -495,11 +494,6 @@ struct ItemLocation<'a> { } impl<'a> AccountsHasher<'a> { - /// true if it is possible that there are filler accounts present - pub fn filler_accounts_enabled(&self) -> bool { - self.filler_account_suffix.is_some() - } - pub fn calculate_hash(hashes: Vec>) -> (Hash, usize) { let cumulative_offsets = CumulativeOffsets::from_raw(&hashes); @@ -1151,7 +1145,6 @@ impl<'a> AccountsHasher<'a> { }; let mut overall_sum = 0; - let filler_accounts_enabled = self.filler_accounts_enabled(); while let Some(pointer) = working_set.pop() { let key = &sorted_data_by_pubkey[pointer.slot_group_index][pointer.offset].pubkey; @@ -1166,13 +1159,10 @@ impl<'a> AccountsHasher<'a> { // add lamports and get hash if item.lamports != 0 { - // do not include filler accounts in the hash - if !(filler_accounts_enabled && self.is_filler_account(&item.pubkey)) { - overall_sum = Self::checked_cast_for_capitalization( - item.lamports as u128 + overall_sum as u128, - ); - hashes.write(&item.hash.0); - } + overall_sum = Self::checked_cast_for_capitalization( + item.lamports as u128 + overall_sum as u128, + ); + hashes.write(&item.hash.0); } else { // if lamports == 0, check if they should be included if self.zero_lamport_accounts == ZeroLamportAccounts::Included { @@ -1196,13 +1186,6 @@ impl<'a> AccountsHasher<'a> { (hashes, overall_sum) } - fn is_filler_account(&self, pubkey: &Pubkey) -> bool { - crate::accounts_db::AccountsDb::is_filler_account_helper( - pubkey, - self.filler_account_suffix.as_ref(), - ) - } - /// input: /// vec: group of slot data, ordered by Slot (low to high) /// vec: [..] - items found in that slot range Sorted by: Pubkey, higher Slot, higher Write version (if pubkey =) @@ -1247,7 +1230,7 @@ pub enum ZeroLamportAccounts { pub struct AccountHash(pub Hash); // Ensure the newtype wrapper never changes size from the underlying Hash -// This also ensures there are no padding bytes, which is requried to safely implement Pod +// This also ensures there are no padding bytes, which is required to safely implement Pod const _: () = assert!(std::mem::size_of::() == std::mem::size_of::()); /// Hash of accounts @@ -1343,7 +1326,6 @@ mod tests { impl<'a> AccountsHasher<'a> { fn new(dir_for_temp_cache_files: PathBuf) -> Self { Self { - filler_account_suffix: None, zero_lamport_accounts: ZeroLamportAccounts::Excluded, dir_for_temp_cache_files, active_stats: &ACTIVE_STATS, diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index a93669d186cb9a..4566a8923924d2 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -867,7 +867,11 @@ pub struct AccountsToStore<'a> { /// if 'accounts' contains more items than can be contained in the primary storage, then we have to split these accounts. /// 'index_first_item_overflow' specifies the index of the first item in 'accounts' that will go into the overflow storage index_first_item_overflow: usize, - pub slot: Slot, + slot: Slot, + /// bytes required to store primary accounts + bytes_primary: usize, + /// bytes required to store overflow accounts + bytes_overflow: usize, } impl<'a> AccountsToStore<'a> { @@ -880,8 +884,11 @@ impl<'a> AccountsToStore<'a> { slot: Slot, ) -> Self { let num_accounts = accounts.len(); + let mut bytes_primary = alive_total_bytes; + let mut bytes_overflow = 0; // index of the first account that doesn't fit in the current append vec let mut index_first_item_overflow = num_accounts; // assume all fit + let initial_available_bytes = available_bytes as usize; if alive_total_bytes > available_bytes as usize { // not all the alive bytes fit, so we have to find how many accounts fit within available_bytes for (i, account) in accounts.iter().enumerate() { @@ -891,6 +898,9 @@ impl<'a> AccountsToStore<'a> { } else if index_first_item_overflow == num_accounts { // the # of accounts we have so far seen is the most that will fit in the current ancient append vec index_first_item_overflow = i; + bytes_primary = + initial_available_bytes.saturating_sub(available_bytes as usize); + bytes_overflow = alive_total_bytes.saturating_sub(bytes_primary); break; } } @@ -899,6 +909,8 @@ impl<'a> AccountsToStore<'a> { accounts, index_first_item_overflow, slot, + bytes_primary, + bytes_overflow, } } @@ -907,6 +919,14 @@ impl<'a> AccountsToStore<'a> { self.index_first_item_overflow < self.accounts.len() } + /// return # required bytes for the given selector + pub fn get_bytes(&self, selector: StorageSelector) -> usize { + match selector { + StorageSelector::Primary => self.bytes_primary, + StorageSelector::Overflow => self.bytes_overflow, + } + } + /// get the accounts to store in the given 'storage' pub fn get(&self, storage: StorageSelector) -> &[&'a StoredAccountMeta<'a>] { let range = match storage { @@ -915,16 +935,33 @@ impl<'a> AccountsToStore<'a> { }; &self.accounts[range] } + + pub fn slot(&self) -> Slot { + self.slot + } } /// capacity of an ancient append vec -pub fn get_ancient_append_vec_capacity() -> u64 { +#[allow(clippy::assertions_on_constants, dead_code)] +pub const fn get_ancient_append_vec_capacity() -> u64 { + // There is a trade-off for selecting the ancient append vec size. Smaller non-ancient append vec are getting + // combined into large ancient append vec. Too small size of ancient append vec will result in too many ancient append vec + // memory mapped files. Too big size will make it difficult to clean and shrink them. Hence, we choose approximately + // 128MB for the ancient append vec size. + const RESULT: u64 = 128 * 1024 * 1024; + use crate::append_vec::MAXIMUM_APPEND_VEC_FILE_SIZE; - // smaller than max by a bit just in case - // some functions add slop on allocation - // The bigger an append vec is, the more unwieldy it becomes to shrink, create, write. - // 1/10 of max is a reasonable size in practice. - MAXIMUM_APPEND_VEC_FILE_SIZE / 10 - 2048 + const _: () = assert!( + RESULT < MAXIMUM_APPEND_VEC_FILE_SIZE, + "ancient append vec size should be less than the maximum append vec size" + ); + const PAGE_SIZE: u64 = 4 * 1024; + const _: () = assert!( + RESULT % PAGE_SIZE == 0, + "ancient append vec size should be a multiple of PAGE_SIZE" + ); + + RESULT } /// is this a max-size append vec designed to be used as an ancient append vec? @@ -2040,6 +2077,9 @@ pub mod tests { accounts_to_store.has_overflow() ); assert!(accounts.is_empty()); + + assert_eq!(accounts_to_store.get_bytes(selector), account_size); + assert_eq!(accounts_to_store.get_bytes(get_opposite(&selector)), 0); } } fn get_opposite(selector: &StorageSelector) -> StorageSelector { @@ -2051,10 +2091,7 @@ pub mod tests { #[test] fn test_get_ancient_append_vec_capacity() { - assert_eq!( - get_ancient_append_vec_capacity(), - crate::append_vec::MAXIMUM_APPEND_VEC_FILE_SIZE / 10 - 2048 - ); + assert_eq!(get_ancient_append_vec_capacity(), 128 * 1024 * 1024); } #[test] @@ -3261,7 +3298,6 @@ pub mod tests { // irrelevant fields slot: 0, capacity: 0, - aligned_total_bytes: 0, alive_accounts: ShrinkCollectAliveSeparatedByRefs { one_ref: AliveAccounts::default(), many_refs_this_is_newest_alive: AliveAccounts::default(), diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index bd789aa3092ad9..782abee7f2a9ff 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -2,7 +2,7 @@ //! //! For more information, see: //! -//! +//! use { crate::{ @@ -10,7 +10,7 @@ use { AccountMeta, StorableAccountsWithHashesAndWriteVersions, StoredAccountInfo, StoredAccountMeta, StoredMeta, StoredMetaWriteVersion, }, - accounts_file::{AccountsFileError, Result, ALIGN_BOUNDARY_OFFSET}, + accounts_file::{AccountsFileError, MatchAccountOwnerError, Result, ALIGN_BOUNDARY_OFFSET}, accounts_hash::AccountHash, storable_accounts::StorableAccounts, u64_align, @@ -96,14 +96,6 @@ impl<'append_vec> Iterator for AppendVecAccountsIter<'append_vec> { } } -#[derive(Error, Debug, PartialEq, Eq)] -pub enum MatchAccountOwnerError { - #[error("The account owner does not match with the provided list")] - NoMatch, - #[error("Unable to load the account")] - UnableToLoad, -} - /// References to account data stored elsewhere. Getting an `Account` requires cloning /// (see `StoredAccountMeta::clone_account()`). #[derive(PartialEq, Eq, Debug)] @@ -327,7 +319,8 @@ impl AppendVec { /// how many more bytes can be stored in this append vec pub fn remaining_bytes(&self) -> u64 { - (self.capacity()).saturating_sub(self.len() as u64) + self.capacity() + .saturating_sub(u64_align!(self.len()) as u64) } pub fn len(&self) -> usize { @@ -1002,10 +995,36 @@ pub mod tests { let av = AppendVec::new(&path.path, true, sz); assert_eq!(av.capacity(), sz64); assert_eq!(av.remaining_bytes(), sz64); + + // append first account, an u64 aligned account (136 bytes) + let mut av_len = 0; let account = create_test_account(0); av.append_account_test(&account).unwrap(); + av_len += STORE_META_OVERHEAD; assert_eq!(av.capacity(), sz64); assert_eq!(av.remaining_bytes(), sz64 - (STORE_META_OVERHEAD as u64)); + assert_eq!(av.len(), av_len); + + // append second account, a *not* u64 aligned account (137 bytes) + let account = create_test_account(1); + let account_storage_len = STORE_META_OVERHEAD + 1; + av_len += account_storage_len; + av.append_account_test(&account).unwrap(); + assert_eq!(av.capacity(), sz64); + assert_eq!(av.len(), av_len); + let alignment_bytes = u64_align!(av_len) - av_len; // bytes used for alignment (7 bytes) + assert_eq!(alignment_bytes, 7); + assert_eq!(av.remaining_bytes(), sz64 - u64_align!(av_len) as u64); + + // append third account, a *not* u64 aligned account (137 bytes) + let account = create_test_account(1); + av.append_account_test(&account).unwrap(); + let account_storage_len = STORE_META_OVERHEAD + 1; + av_len += alignment_bytes; // bytes used for alignment at the end of previous account + av_len += account_storage_len; + assert_eq!(av.capacity(), sz64); + assert_eq!(av.len(), av_len); + assert_eq!(av.remaining_bytes(), sz64 - u64_align!(av_len) as u64); } #[test] diff --git a/accounts-db/src/lib.rs b/accounts-db/src/lib.rs index 1bf8ecd3d27149..61cfcdaccb194a 100644 --- a/accounts-db/src/lib.rs +++ b/accounts-db/src/lib.rs @@ -6,7 +6,6 @@ extern crate lazy_static; pub mod account_info; pub mod account_overrides; -pub mod account_rent_state; pub mod account_storage; pub mod accounts; pub mod accounts_cache; diff --git a/accounts-db/src/partitioned_rewards.rs b/accounts-db/src/partitioned_rewards.rs index 9d012a71a4a018..652a907d3a7bb6 100644 --- a/accounts-db/src/partitioned_rewards.rs +++ b/accounts-db/src/partitioned_rewards.rs @@ -2,7 +2,6 @@ //! use solana_sdk::clock::Slot; -#[allow(dead_code)] #[derive(Debug)] /// Configuration options for partitioned epoch rewards. /// This struct allows various forms of testing, especially prior to feature activation. @@ -53,7 +52,6 @@ pub enum TestPartitionedEpochRewards { }, } -#[allow(dead_code)] impl PartitionedEpochRewardsConfig { pub fn new(test: TestPartitionedEpochRewards) -> Self { match test { diff --git a/accounts-db/src/rent_collector.rs b/accounts-db/src/rent_collector.rs index cea0a07c9883b3..1a72cac88308b3 100644 --- a/accounts-db/src/rent_collector.rs +++ b/accounts-db/src/rent_collector.rs @@ -111,10 +111,9 @@ impl RentCollector { &self, address: &Pubkey, account: &mut AccountSharedData, - filler_account_suffix: Option<&Pubkey>, set_exempt_rent_epoch_max: bool, ) -> CollectedInfo { - match self.calculate_rent_result(address, account, filler_account_suffix) { + match self.calculate_rent_result(address, account) { RentResult::Exempt => { if set_exempt_rent_epoch_max { account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); @@ -151,19 +150,13 @@ impl RentCollector { &self, address: &Pubkey, account: &impl ReadableAccount, - filler_account_suffix: Option<&Pubkey>, ) -> RentResult { if account.rent_epoch() == RENT_EXEMPT_RENT_EPOCH || account.rent_epoch() > self.epoch { // potentially rent paying account (or known and already marked exempt) // Maybe collect rent later, leave account alone for now. return RentResult::NoRentCollectionNow; } - if !self.should_collect_rent(address, account) - || crate::accounts_db::AccountsDb::is_filler_account_helper( - address, - filler_account_suffix, - ) - { + if !self.should_collect_rent(address, account) { // easy to determine this account should not consider having rent collected from it return RentResult::Exempt; } @@ -230,12 +223,7 @@ mod tests { ) -> CollectedInfo { // initialize rent_epoch as created at this epoch account.set_rent_epoch(self.epoch); - self.collect_from_existing_account( - address, - account, - /*filler_account_suffix:*/ None, - set_exempt_rent_epoch_max, - ) + self.collect_from_existing_account(address, account, set_exempt_rent_epoch_max) } } @@ -246,7 +234,7 @@ mod tests { let mut account = AccountSharedData::default(); assert_matches!( - rent_collector.calculate_rent_result(&Pubkey::default(), &account, None), + rent_collector.calculate_rent_result(&Pubkey::default(), &account), RentResult::NoRentCollectionNow ); { @@ -255,7 +243,6 @@ mod tests { rent_collector.collect_from_existing_account( &Pubkey::default(), &mut account_clone, - None, set_exempt_rent_epoch_max ), CollectedInfo::default() @@ -265,7 +252,7 @@ mod tests { account.set_executable(true); assert_matches!( - rent_collector.calculate_rent_result(&Pubkey::default(), &account, None), + rent_collector.calculate_rent_result(&Pubkey::default(), &account), RentResult::Exempt ); { @@ -278,7 +265,6 @@ mod tests { rent_collector.collect_from_existing_account( &Pubkey::default(), &mut account_clone, - None, set_exempt_rent_epoch_max ), CollectedInfo::default() @@ -288,7 +274,7 @@ mod tests { account.set_executable(false); assert_matches!( - rent_collector.calculate_rent_result(&incinerator::id(), &account, None), + rent_collector.calculate_rent_result(&incinerator::id(), &account), RentResult::Exempt ); { @@ -301,7 +287,6 @@ mod tests { rent_collector.collect_from_existing_account( &incinerator::id(), &mut account_clone, - None, set_exempt_rent_epoch_max ), CollectedInfo::default() @@ -309,49 +294,44 @@ mod tests { assert_eq!(account_clone, account_expected); } - // try a few combinations of rent collector rent epoch and collecting rent with and without filler accounts specified (but we aren't a filler) - let filler_account = solana_sdk::pubkey::new_rand(); - - for filler_accounts in [None, Some(&filler_account)] { - for (rent_epoch, rent_due_expected) in [(2, 2), (3, 5)] { - rent_collector.epoch = rent_epoch; - account.set_lamports(10); - account.set_rent_epoch(1); - let new_rent_epoch_expected = rent_collector.epoch + 1; - assert!( - matches!( - rent_collector.calculate_rent_result(&Pubkey::default(), &account, filler_accounts), - RentResult::CollectRent{ new_rent_epoch, rent_due} if new_rent_epoch == new_rent_epoch_expected && rent_due == rent_due_expected, + // try a few combinations of rent collector rent epoch and collecting rent + for (rent_epoch, rent_due_expected) in [(2, 2), (3, 5)] { + rent_collector.epoch = rent_epoch; + account.set_lamports(10); + account.set_rent_epoch(1); + let new_rent_epoch_expected = rent_collector.epoch + 1; + assert!( + matches!( + rent_collector.calculate_rent_result(&Pubkey::default(), &account), + RentResult::CollectRent{ new_rent_epoch, rent_due} if new_rent_epoch == new_rent_epoch_expected && rent_due == rent_due_expected, + ), + "{:?}", + rent_collector.calculate_rent_result(&Pubkey::default(), &account) + ); + + { + let mut account_clone = account.clone(); + assert_eq!( + rent_collector.collect_from_existing_account( + &Pubkey::default(), + &mut account_clone, + set_exempt_rent_epoch_max ), - "{:?}", - rent_collector.calculate_rent_result(&Pubkey::default(), &account, None,) + CollectedInfo { + rent_amount: rent_due_expected, + account_data_len_reclaimed: 0 + } ); - - { - let mut account_clone = account.clone(); - assert_eq!( - rent_collector.collect_from_existing_account( - &Pubkey::default(), - &mut account_clone, - filler_accounts, - set_exempt_rent_epoch_max - ), - CollectedInfo { - rent_amount: rent_due_expected, - account_data_len_reclaimed: 0 - } - ); - let mut account_expected = account.clone(); - account_expected.set_lamports(account.lamports() - rent_due_expected); - account_expected.set_rent_epoch(new_rent_epoch_expected); - assert_eq!(account_clone, account_expected); - } + let mut account_expected = account.clone(); + account_expected.set_lamports(account.lamports() - rent_due_expected); + account_expected.set_rent_epoch(new_rent_epoch_expected); + assert_eq!(account_clone, account_expected); } } // enough lamports to make us exempt account.set_lamports(1_000_000); - let result = rent_collector.calculate_rent_result(&Pubkey::default(), &account, None); + let result = rent_collector.calculate_rent_result(&Pubkey::default(), &account); assert!( matches!(result, RentResult::Exempt), "{result:?}, set_exempt_rent_epoch_max: {set_exempt_rent_epoch_max}", @@ -366,7 +346,6 @@ mod tests { rent_collector.collect_from_existing_account( &Pubkey::default(), &mut account_clone, - None, set_exempt_rent_epoch_max ), CollectedInfo::default() @@ -379,7 +358,7 @@ mod tests { // We don't calculate rent amount vs data if the rent_epoch is already in the future. account.set_rent_epoch(1_000_000); assert_matches!( - rent_collector.calculate_rent_result(&Pubkey::default(), &account, None), + rent_collector.calculate_rent_result(&Pubkey::default(), &account), RentResult::NoRentCollectionNow ); { @@ -388,42 +367,12 @@ mod tests { rent_collector.collect_from_existing_account( &Pubkey::default(), &mut account_clone, - None, set_exempt_rent_epoch_max ), CollectedInfo::default() ); assert_eq!(account_clone, account); } - - // filler accounts are exempt - account.set_rent_epoch(1); - account.set_lamports(10); - assert_matches!( - rent_collector.calculate_rent_result( - &filler_account, - &account, - Some(&filler_account), - ), - RentResult::Exempt - ); - { - let mut account_clone = account.clone(); - let mut account_expected = account.clone(); - if set_exempt_rent_epoch_max { - account_expected.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); - } - assert_eq!( - rent_collector.collect_from_existing_account( - &filler_account, - &mut account_clone, - Some(&filler_account), - set_exempt_rent_epoch_max - ), - CollectedInfo::default() - ); - assert_eq!(account_clone, account_expected); - } } } @@ -464,7 +413,6 @@ mod tests { let collected = rent_collector.collect_from_existing_account( &solana_sdk::pubkey::new_rand(), &mut existing_account, - None, // filler_account_suffix set_exempt_rent_epoch_max, ); assert!(existing_account.lamports() < old_lamports); @@ -502,7 +450,6 @@ mod tests { let collected = rent_collector.collect_from_existing_account( &pubkey, &mut account, - None, // filler_account_suffix set_exempt_rent_epoch_max, ); assert_eq!(account.lamports(), huge_lamports); @@ -519,7 +466,6 @@ mod tests { let collected = rent_collector.collect_from_existing_account( &pubkey, &mut account, - None, // filler_account_suffix set_exempt_rent_epoch_max, ); assert_eq!(account.lamports(), tiny_lamports - collected.rent_amount); @@ -546,7 +492,6 @@ mod tests { let collected = rent_collector.collect_from_existing_account( &pubkey, &mut account, - None, // filler_account_suffix set_exempt_rent_epoch_max, ); assert_eq!(account.lamports(), 0); @@ -573,7 +518,6 @@ mod tests { let collected = rent_collector.collect_from_existing_account( &Pubkey::new_unique(), &mut account, - None, // filler_account_suffix set_exempt_rent_epoch_max, ); diff --git a/accounts-db/src/rolling_bit_field/iterators.rs b/accounts-db/src/rolling_bit_field/iterators.rs index dd075037ee119c..f32f47a0bea8e7 100644 --- a/accounts-db/src/rolling_bit_field/iterators.rs +++ b/accounts-db/src/rolling_bit_field/iterators.rs @@ -33,9 +33,7 @@ impl Iterator for RollingBitFieldOnesIter<'_> { // Then iterate over the bit vec loop { // If there are no more bits in the range, then we've iterated over everything and are done - let Some(bit) = self.bit_range.next() else { - return None; - }; + let bit = self.bit_range.next()?; if self.rolling_bit_field.contains_assume_in_range(&bit) { break Some(bit); diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index 829b0cb033b4f5..40f5c8f8d26077 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -8,6 +8,7 @@ pub mod hot; pub mod index; pub mod meta; pub mod mmap_utils; +pub mod owners; pub mod readable; pub mod writer; diff --git a/accounts-db/src/tiered_storage/byte_block.rs b/accounts-db/src/tiered_storage/byte_block.rs index e0fa8b4b136b3b..869036251d9b21 100644 --- a/accounts-db/src/tiered_storage/byte_block.rs +++ b/accounts-db/src/tiered_storage/byte_block.rs @@ -4,7 +4,7 @@ use { crate::tiered_storage::{footer::AccountBlockFormat, meta::AccountMetaOptionalFields}, std::{ - io::{Cursor, Read, Write}, + io::{Cursor, Read, Result as IoResult, Write}, mem, }, }; @@ -53,11 +53,31 @@ impl ByteBlockWriter { self.len } + /// Write plain ol' data to the internal buffer of the ByteBlockWriter instance + /// + /// Prefer this over `write_type()`, as it prevents some undefined behavior. + pub fn write_pod(&mut self, value: &T) -> IoResult { + // SAFETY: Since T is NoUninit, it does not contain any uninitialized bytes. + unsafe { self.write_type(value) } + } + /// Write the specified typed instance to the internal buffer of /// the ByteBlockWriter instance. - pub fn write_type(&mut self, value: &T) -> std::io::Result { + /// + /// Prefer `write_pod()` when possible, because `write_type()` may cause + /// undefined behavior if `value` contains uninitialized bytes. + /// + /// # Safety + /// + /// Caller must ensure casting T to bytes is safe. + /// Refer to the Safety sections in std::slice::from_raw_parts() + /// and bytemuck's Pod and NoUninit for more information. + pub unsafe fn write_type(&mut self, value: &T) -> IoResult { let size = mem::size_of::(); let ptr = value as *const _ as *const u8; + // SAFETY: The caller ensures that `value` contains no uninitialized bytes, + // we ensure the size is safe by querying T directly, + // and Rust ensures all values are at least byte-aligned. let slice = unsafe { std::slice::from_raw_parts(ptr, size) }; self.write(slice)?; Ok(size) @@ -65,18 +85,18 @@ impl ByteBlockWriter { /// Write all the Some fields of the specified AccountMetaOptionalFields. /// - /// Note that the existance of each optional field is stored separately in + /// Note that the existence of each optional field is stored separately in /// AccountMetaFlags. pub fn write_optional_fields( &mut self, opt_fields: &AccountMetaOptionalFields, - ) -> std::io::Result { + ) -> IoResult { let mut size = 0; if let Some(rent_epoch) = opt_fields.rent_epoch { - size += self.write_type(&rent_epoch)?; + size += self.write_pod(&rent_epoch)?; } if let Some(hash) = opt_fields.account_hash { - size += self.write_type(&hash)?; + size += self.write_pod(&hash)?; } debug_assert_eq!(size, opt_fields.size()); @@ -86,7 +106,7 @@ impl ByteBlockWriter { /// Write the specified typed bytes to the internal buffer of the /// ByteBlockWriter instance. - pub fn write(&mut self, buf: &[u8]) -> std::io::Result<()> { + pub fn write(&mut self, buf: &[u8]) -> IoResult<()> { match &mut self.encoder { ByteBlockEncoder::Raw(cursor) => cursor.write_all(buf)?, ByteBlockEncoder::Lz4(lz4_encoder) => lz4_encoder.write_all(buf)?, @@ -97,7 +117,7 @@ impl ByteBlockWriter { /// Flush the internal byte buffer that collects all the previous writes /// into an encoded byte array. - pub fn finish(self) -> std::io::Result> { + pub fn finish(self) -> IoResult> { match self.encoder { ByteBlockEncoder::Raw(cursor) => Ok(cursor.into_inner()), ByteBlockEncoder::Lz4(lz4_encoder) => { @@ -112,18 +132,40 @@ impl ByteBlockWriter { /// The util struct for reading byte blocks. pub struct ByteBlockReader; +/// Reads the raw part of the input byte_block, at the specified offset, as type T. +/// +/// Returns None if `offset` + size_of::() exceeds the size of the input byte_block. +/// +/// Type T must be plain ol' data to ensure no undefined behavior. +pub fn read_pod(byte_block: &[u8], offset: usize) -> Option<&T> { + // SAFETY: Since T is AnyBitPattern, it is safe to cast bytes to T. + unsafe { read_type(byte_block, offset) } +} + /// Reads the raw part of the input byte_block at the specified offset /// as type T. /// /// If `offset` + size_of::() exceeds the size of the input byte_block, /// then None will be returned. -pub fn read_type(byte_block: &[u8], offset: usize) -> Option<&T> { +/// +/// Prefer `read_pod()` when possible, because `read_type()` may cause +/// undefined behavior. +/// +/// # Safety +/// +/// Caller must ensure casting bytes to T is safe. +/// Refer to the Safety sections in std::slice::from_raw_parts() +/// and bytemuck's Pod and AnyBitPattern for more information. +pub unsafe fn read_type(byte_block: &[u8], offset: usize) -> Option<&T> { let (next, overflow) = offset.overflowing_add(std::mem::size_of::()); if overflow || next > byte_block.len() { return None; } let ptr = byte_block[offset..].as_ptr() as *const T; debug_assert!(ptr as usize % std::mem::align_of::() == 0); + // SAFETY: The caller ensures it is safe to cast bytes to T, + // we ensure the size is safe by querying T directly, + // and we just checked above to ensure the ptr is aligned for T. Some(unsafe { &*ptr }) } @@ -134,7 +176,7 @@ impl ByteBlockReader { /// /// Note that calling this function with AccountBlockFormat::AlignedRaw encoding /// will result in panic as the input is already decoded. - pub fn decode(encoding: AccountBlockFormat, input: &[u8]) -> std::io::Result> { + pub fn decode(encoding: AccountBlockFormat, input: &[u8]) -> IoResult> { match encoding { AccountBlockFormat::Lz4 => { let mut decoder = lz4::Decoder::new(input).unwrap(); @@ -169,7 +211,7 @@ mod tests { let mut writer = ByteBlockWriter::new(format); let value: u32 = 42; - writer.write_type(&value).unwrap(); + writer.write_pod(&value).unwrap(); assert_eq!(writer.raw_len(), mem::size_of::()); let buffer = writer.finish().unwrap(); @@ -231,12 +273,14 @@ mod tests { let test_data3 = [33u8; 300]; // Write the above meta and data in an interleaving way. - writer.write_type(&test_metas[0]).unwrap(); - writer.write_type(&test_data1).unwrap(); - writer.write_type(&test_metas[1]).unwrap(); - writer.write_type(&test_data2).unwrap(); - writer.write_type(&test_metas[2]).unwrap(); - writer.write_type(&test_data3).unwrap(); + unsafe { + writer.write_type(&test_metas[0]).unwrap(); + writer.write_type(&test_data1).unwrap(); + writer.write_type(&test_metas[1]).unwrap(); + writer.write_type(&test_data2).unwrap(); + writer.write_type(&test_metas[2]).unwrap(); + writer.write_type(&test_data3).unwrap(); + } assert_eq!( writer.raw_len(), mem::size_of::() * 3 @@ -346,13 +390,13 @@ mod tests { let mut offset = 0; for opt_fields in &opt_fields_vec { if let Some(expected_rent_epoch) = opt_fields.rent_epoch { - let rent_epoch = read_type::(&decoded_buffer, offset).unwrap(); + let rent_epoch = read_pod::(&decoded_buffer, offset).unwrap(); assert_eq!(*rent_epoch, expected_rent_epoch); verified_count += 1; offset += std::mem::size_of::(); } if let Some(expected_hash) = opt_fields.account_hash { - let hash = read_type::(&decoded_buffer, offset).unwrap(); + let hash = read_pod::(&decoded_buffer, offset).unwrap(); assert_eq!(hash, &expected_hash); verified_count += 1; offset += std::mem::size_of::(); diff --git a/accounts-db/src/tiered_storage/error.rs b/accounts-db/src/tiered_storage/error.rs index 822b8bcde4810b..e0c8ffa5ca482d 100644 --- a/accounts-db/src/tiered_storage/error.rs +++ b/accounts-db/src/tiered_storage/error.rs @@ -1,4 +1,4 @@ -use {std::path::PathBuf, thiserror::Error}; +use {super::footer::SanitizeFooterError, std::path::PathBuf, thiserror::Error}; #[derive(Error, Debug)] pub enum TieredStorageError { @@ -16,4 +16,19 @@ pub enum TieredStorageError { #[error("Unsupported: the feature is not yet supported")] Unsupported(), + + #[error("invalid footer size: {0}, expected: {1}")] + InvalidFooterSize(u64, u64), + + #[error("invalid footer version: {0}")] + InvalidFooterVersion(u64), + + #[error("footer is unsanitary: {0}")] + SanitizeFooter(#[from] SanitizeFooterError), + + #[error("OffsetOutOfBounds: offset {0} is larger than the supported size {1}")] + OffsetOutOfBounds(usize, usize), + + #[error("OffsetAlignmentError: offset {0} must be multiple of {1}")] + OffsetAlignmentError(usize, usize), } diff --git a/accounts-db/src/tiered_storage/file.rs b/accounts-db/src/tiered_storage/file.rs index 0799c1eec8610a..51801c6133e1f7 100644 --- a/accounts-db/src/tiered_storage/file.rs +++ b/accounts-db/src/tiered_storage/file.rs @@ -1,8 +1,11 @@ -use std::{ - fs::{File, OpenOptions}, - io::{Read, Seek, SeekFrom, Write}, - mem, - path::Path, +use { + bytemuck::{AnyBitPattern, NoUninit}, + std::{ + fs::{File, OpenOptions}, + io::{Read, Result as IoResult, Seek, SeekFrom, Write}, + mem, + path::Path, + }, }; #[derive(Debug)] @@ -15,17 +18,16 @@ impl TieredStorageFile { .read(true) .create(false) .open(&file_path) - .unwrap_or_else(|e| { + .unwrap_or_else(|err| { panic!( - "[TieredStorageError] Unable to open {:?} as read-only: {:?}", + "[TieredStorageError] Unable to open {} as read-only: {err}", file_path.as_ref().display(), - e ); }), ) } - pub fn new_writable(file_path: impl AsRef) -> Result { + pub fn new_writable(file_path: impl AsRef) -> IoResult { Ok(Self( OpenOptions::new() .create_new(true) @@ -34,39 +36,72 @@ impl TieredStorageFile { )) } - pub fn write_type(&self, value: &T) -> Result { + /// Writes `value` to the file. + /// + /// `value` must be plain ol' data. + pub fn write_pod(&self, value: &T) -> IoResult { + // SAFETY: Since T is NoUninit, it does not contain any uninitialized bytes. + unsafe { self.write_type(value) } + } + + /// Writes `value` to the file. + /// + /// Prefer `write_pod` when possible, because `write_value` may cause + /// undefined behavior if `value` contains uninitialized bytes. + /// + /// # Safety + /// + /// Caller must ensure casting T to bytes is safe. + /// Refer to the Safety sections in std::slice::from_raw_parts() + /// and bytemuck's Pod and NoUninit for more information. + pub unsafe fn write_type(&self, value: &T) -> IoResult { let ptr = value as *const _ as *const u8; - let slice = unsafe { std::slice::from_raw_parts(ptr, mem::size_of::()) }; - (&self.0).write_all(slice)?; + let bytes = unsafe { std::slice::from_raw_parts(ptr, mem::size_of::()) }; + self.write_bytes(bytes) + } - Ok(std::mem::size_of::()) + /// Reads a value of type `T` from the file. + /// + /// Type T must be plain ol' data. + pub fn read_pod(&self, value: &mut T) -> IoResult<()> { + // SAFETY: Since T is AnyBitPattern, it is safe to cast bytes to T. + unsafe { self.read_type(value) } } - pub fn read_type(&self, value: &mut T) -> Result<(), std::io::Error> { + /// Reads a value of type `T` from the file. + /// + /// Prefer `read_pod()` when possible, because `read_type()` may cause + /// undefined behavior. + /// + /// # Safety + /// + /// Caller must ensure casting bytes to T is safe. + /// Refer to the Safety sections in std::slice::from_raw_parts() + /// and bytemuck's Pod and AnyBitPattern for more information. + pub unsafe fn read_type(&self, value: &mut T) -> IoResult<()> { let ptr = value as *mut _ as *mut u8; - let slice = unsafe { std::slice::from_raw_parts_mut(ptr, mem::size_of::()) }; - (&self.0).read_exact(slice)?; - - Ok(()) + // SAFETY: The caller ensures it is safe to cast bytes to T, + // we ensure the size is safe by querying T directly, + // and Rust ensures ptr is aligned. + let bytes = unsafe { std::slice::from_raw_parts_mut(ptr, mem::size_of::()) }; + self.read_bytes(bytes) } - pub fn seek(&self, offset: u64) -> Result { + pub fn seek(&self, offset: u64) -> IoResult { (&self.0).seek(SeekFrom::Start(offset)) } - pub fn seek_from_end(&self, offset: i64) -> Result { + pub fn seek_from_end(&self, offset: i64) -> IoResult { (&self.0).seek(SeekFrom::End(offset)) } - pub fn write_bytes(&self, bytes: &[u8]) -> Result { + pub fn write_bytes(&self, bytes: &[u8]) -> IoResult { (&self.0).write_all(bytes)?; Ok(bytes.len()) } - pub fn read_bytes(&self, buffer: &mut [u8]) -> Result<(), std::io::Error> { - (&self.0).read_exact(buffer)?; - - Ok(()) + pub fn read_bytes(&self, buffer: &mut [u8]) -> IoResult<()> { + (&self.0).read_exact(buffer) } } diff --git a/accounts-db/src/tiered_storage/footer.rs b/accounts-db/src/tiered_storage/footer.rs index 7763d8d5622a0a..d5c8176ecfff6f 100644 --- a/accounts-db/src/tiered_storage/footer.rs +++ b/accounts-db/src/tiered_storage/footer.rs @@ -1,11 +1,17 @@ use { crate::tiered_storage::{ - error::TieredStorageError, file::TieredStorageFile, index::IndexBlockFormat, - mmap_utils::get_type, TieredStorageResult as TsResult, + error::TieredStorageError, + file::TieredStorageFile, + index::IndexBlockFormat, + mmap_utils::{get_pod, get_type}, + TieredStorageResult, }, + bytemuck::{Pod, Zeroable}, memmap2::Mmap, + num_enum::TryFromPrimitiveError, solana_sdk::{hash::Hash, pubkey::Pubkey}, std::{mem, path::Path}, + thiserror::Error, }; pub const FOOTER_FORMAT_VERSION: u64 = 1; @@ -22,10 +28,13 @@ pub const FOOTER_TAIL_SIZE: usize = 24; /// The ending 8 bytes of a valid tiered account storage file. pub const FOOTER_MAGIC_NUMBER: u64 = 0x502A2AB5; // SOLALABS -> SOLANA LABS -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Clone, Copy, Pod, Zeroable)] #[repr(C)] pub struct TieredStorageMagicNumber(pub u64); +// Ensure there are no implicit padding bytes +const _: () = assert!(std::mem::size_of::() == 8); + impl Default for TieredStorageMagicNumber { fn default() -> Self { Self(FOOTER_MAGIC_NUMBER) @@ -86,7 +95,7 @@ pub enum OwnersBlockFormat { LocalIndex = 0, } -#[derive(Debug, PartialEq, Eq, Clone)] +#[derive(Debug, PartialEq, Eq, Clone, Copy)] #[repr(C)] pub struct TieredStorageFooter { // formats @@ -122,7 +131,7 @@ pub struct TieredStorageFooter { /// The offset pointing to the first byte of the account index block. pub index_block_offset: u64, /// The offset pointing to the first byte of the owners block. - pub owners_offset: u64, + pub owners_block_offset: u64, // account range /// The smallest account address in this file. @@ -133,17 +142,41 @@ pub struct TieredStorageFooter { /// A hash that represents a tiered accounts file for consistency check. pub hash: Hash, + /// The format version of the tiered accounts file. + pub format_version: u64, // The below fields belong to footer tail. // The sum of their sizes should match FOOTER_TAIL_SIZE. /// The size of the footer including the magic number. pub footer_size: u64, - /// The format version of the tiered accounts file. - pub format_version: u64, // This field is persisted in the storage but not in this struct. // The number should match FOOTER_MAGIC_NUMBER. // pub magic_number: u64, } +// It is undefined behavior to read/write uninitialized bytes. +// The `Pod` marker trait indicates there are no uninitialized bytes. +// In order to safely guarantee a type is POD, it cannot have any padding. +const _: () = assert!( + std::mem::size_of::() + == std::mem::size_of::() + + std::mem::size_of::() + + std::mem::size_of::() + + std::mem::size_of::() + + std::mem::size_of::() // account_entry_count + + std::mem::size_of::() // account_meta_entry_size + + std::mem::size_of::() // account_block_size + + std::mem::size_of::() // owner_count + + std::mem::size_of::() // owner_entry_size + + std::mem::size_of::() // index_block_offset + + std::mem::size_of::() // owners_block_offset + + std::mem::size_of::() // min_account_address + + std::mem::size_of::() // max_account_address + + std::mem::size_of::() // hash + + std::mem::size_of::() // format_version + + std::mem::size_of::(), // footer_size + "TieredStorageFooter cannot have any padding" +); + impl Default for TieredStorageFooter { fn default() -> Self { Self { @@ -157,39 +190,50 @@ impl Default for TieredStorageFooter { owner_count: 0, owner_entry_size: 0, index_block_offset: 0, - owners_offset: 0, + owners_block_offset: 0, hash: Hash::new_unique(), min_account_address: Pubkey::default(), max_account_address: Pubkey::default(), - footer_size: FOOTER_SIZE as u64, format_version: FOOTER_FORMAT_VERSION, + footer_size: FOOTER_SIZE as u64, } } } impl TieredStorageFooter { - pub fn new_from_path(path: impl AsRef) -> TsResult { + pub fn new_from_path(path: impl AsRef) -> TieredStorageResult { let file = TieredStorageFile::new_readonly(path); Self::new_from_footer_block(&file) } - pub fn write_footer_block(&self, file: &TieredStorageFile) -> TsResult<()> { - file.write_type(self)?; - file.write_type(&TieredStorageMagicNumber::default())?; + pub fn write_footer_block(&self, file: &TieredStorageFile) -> TieredStorageResult<()> { + // SAFETY: The footer does not contain any uninitialized bytes. + unsafe { file.write_type(self)? }; + file.write_pod(&TieredStorageMagicNumber::default())?; Ok(()) } - pub fn new_from_footer_block(file: &TieredStorageFile) -> TsResult { - let mut footer_size: u64 = 0; + pub fn new_from_footer_block(file: &TieredStorageFile) -> TieredStorageResult { + file.seek_from_end(-(FOOTER_TAIL_SIZE as i64))?; + let mut footer_version: u64 = 0; - let mut magic_number = TieredStorageMagicNumber(0); + file.read_pod(&mut footer_version)?; + if footer_version != FOOTER_FORMAT_VERSION { + return Err(TieredStorageError::InvalidFooterVersion(footer_version)); + } - file.seek_from_end(-(FOOTER_TAIL_SIZE as i64))?; - file.read_type(&mut footer_size)?; - file.read_type(&mut footer_version)?; - file.read_type(&mut magic_number)?; + let mut footer_size: u64 = 0; + file.read_pod(&mut footer_size)?; + if footer_size != FOOTER_SIZE as u64 { + return Err(TieredStorageError::InvalidFooterSize( + footer_size, + FOOTER_SIZE as u64, + )); + } + let mut magic_number = TieredStorageMagicNumber::zeroed(); + file.read_pod(&mut magic_number)?; if magic_number != TieredStorageMagicNumber::default() { return Err(TieredStorageError::MagicNumberMismatch( TieredStorageMagicNumber::default().0, @@ -199,17 +243,31 @@ impl TieredStorageFooter { let mut footer = Self::default(); file.seek_from_end(-(footer_size as i64))?; - file.read_type(&mut footer)?; + // SAFETY: We sanitize the footer to ensure all the bytes are + // actually safe to interpret as a TieredStorageFooter. + unsafe { file.read_type(&mut footer)? }; + Self::sanitize(&footer)?; Ok(footer) } - pub fn new_from_mmap(map: &Mmap) -> TsResult<&TieredStorageFooter> { - let offset = map.len().saturating_sub(FOOTER_TAIL_SIZE); - let (footer_size, offset) = get_type::(map, offset)?; - let (_footer_version, offset) = get_type::(map, offset)?; - let (magic_number, _offset) = get_type::(map, offset)?; + pub fn new_from_mmap(mmap: &Mmap) -> TieredStorageResult<&TieredStorageFooter> { + let offset = mmap.len().saturating_sub(FOOTER_TAIL_SIZE); + + let (footer_version, offset) = get_pod::(mmap, offset)?; + if *footer_version != FOOTER_FORMAT_VERSION { + return Err(TieredStorageError::InvalidFooterVersion(*footer_version)); + } + + let (&footer_size, offset) = get_pod::(mmap, offset)?; + if footer_size != FOOTER_SIZE as u64 { + return Err(TieredStorageError::InvalidFooterSize( + footer_size, + FOOTER_SIZE as u64, + )); + } + let (magic_number, _offset) = get_pod::(mmap, offset)?; if *magic_number != TieredStorageMagicNumber::default() { return Err(TieredStorageError::MagicNumberMismatch( TieredStorageMagicNumber::default().0, @@ -217,11 +275,66 @@ impl TieredStorageFooter { )); } - let (footer, _offset) = - get_type::(map, map.len().saturating_sub(*footer_size as usize))?; + let footer_offset = mmap.len().saturating_sub(footer_size as usize); + // SAFETY: We sanitize the footer to ensure all the bytes are + // actually safe to interpret as a TieredStorageFooter. + let (footer, _offset) = unsafe { get_type::(mmap, footer_offset)? }; + Self::sanitize(footer)?; Ok(footer) } + + /// Sanitizes the footer + /// + /// Since the various formats only have specific valid values, they must be sanitized + /// prior to use. This ensures the formats are valid to interpret as (rust) enums. + fn sanitize(footer: &Self) -> Result<(), SanitizeFooterError> { + let account_meta_format_u16 = + unsafe { &*(&footer.account_meta_format as *const _ as *const u16) }; + let owners_block_format_u16 = + unsafe { &*(&footer.owners_block_format as *const _ as *const u16) }; + let index_block_format_u16 = + unsafe { &*(&footer.index_block_format as *const _ as *const u16) }; + let account_block_format_u16 = + unsafe { &*(&footer.account_block_format as *const _ as *const u16) }; + + _ = AccountMetaFormat::try_from(*account_meta_format_u16) + .map_err(SanitizeFooterError::InvalidAccountMetaFormat)?; + _ = OwnersBlockFormat::try_from(*owners_block_format_u16) + .map_err(SanitizeFooterError::InvalidOwnersBlockFormat)?; + _ = IndexBlockFormat::try_from(*index_block_format_u16) + .map_err(SanitizeFooterError::InvalidIndexBlockFormat)?; + _ = AccountBlockFormat::try_from(*account_block_format_u16) + .map_err(SanitizeFooterError::InvalidAccountBlockFormat)?; + + // Since we just sanitized the formats within the footer, + // it is now safe to read them as (rust) enums. + // + // from https://doc.rust-lang.org/reference/items/enumerations.html#casting: + // > If an enumeration is unit-only (with no tuple and struct variants), + // > then its discriminant can be directly accessed with a numeric cast; + // + // from https://doc.rust-lang.org/reference/items/enumerations.html#pointer-casting: + // > If the enumeration specifies a primitive representation, + // > then the discriminant may be reliably accessed via unsafe pointer casting + Ok(()) + } +} + +/// Errors that can happen while sanitizing the footer +#[derive(Error, Debug)] +pub enum SanitizeFooterError { + #[error("invalid account meta format: {0}")] + InvalidAccountMetaFormat(#[from] TryFromPrimitiveError), + + #[error("invalid owners block format: {0}")] + InvalidOwnersBlockFormat(#[from] TryFromPrimitiveError), + + #[error("invalid index block format: {0}")] + InvalidIndexBlockFormat(#[from] TryFromPrimitiveError), + + #[error("invalid account block format: {0}")] + InvalidAccountBlockFormat(#[from] TryFromPrimitiveError), } #[cfg(test)] @@ -241,7 +354,7 @@ mod tests { let expected_footer = TieredStorageFooter { account_meta_format: AccountMetaFormat::Hot, owners_block_format: OwnersBlockFormat::LocalIndex, - index_block_format: IndexBlockFormat::AddressAndOffset, + index_block_format: IndexBlockFormat::AddressAndBlockOffsetOnly, account_block_format: AccountBlockFormat::AlignedRaw, account_entry_count: 300, account_meta_entry_size: 24, @@ -249,12 +362,12 @@ mod tests { owner_count: 250, owner_entry_size: 32, index_block_offset: 1069600, - owners_offset: 1081200, + owners_block_offset: 1081200, hash: Hash::new_unique(), min_account_address: Pubkey::default(), max_account_address: Pubkey::new_unique(), - footer_size: FOOTER_SIZE as u64, format_version: FOOTER_FORMAT_VERSION, + footer_size: FOOTER_SIZE as u64, }; // Persist the expected footer. @@ -286,11 +399,82 @@ mod tests { assert_eq!(offset_of!(TieredStorageFooter, owner_count), 0x18); assert_eq!(offset_of!(TieredStorageFooter, owner_entry_size), 0x1C); assert_eq!(offset_of!(TieredStorageFooter, index_block_offset), 0x20); - assert_eq!(offset_of!(TieredStorageFooter, owners_offset), 0x28); + assert_eq!(offset_of!(TieredStorageFooter, owners_block_offset), 0x28); assert_eq!(offset_of!(TieredStorageFooter, min_account_address), 0x30); assert_eq!(offset_of!(TieredStorageFooter, max_account_address), 0x50); assert_eq!(offset_of!(TieredStorageFooter, hash), 0x70); - assert_eq!(offset_of!(TieredStorageFooter, footer_size), 0x90); - assert_eq!(offset_of!(TieredStorageFooter, format_version), 0x98); + assert_eq!(offset_of!(TieredStorageFooter, format_version), 0x90); + assert_eq!(offset_of!(TieredStorageFooter, footer_size), 0x98); + } + + #[test] + fn test_sanitize() { + // test: all good + { + let footer = TieredStorageFooter::default(); + let result = TieredStorageFooter::sanitize(&footer); + assert!(result.is_ok()); + } + + // test: bad account meta format + { + let mut footer = TieredStorageFooter::default(); + unsafe { + std::ptr::write( + &mut footer.account_meta_format as *mut _ as *mut u16, + 0xBAD0, + ); + } + let result = TieredStorageFooter::sanitize(&footer); + assert!(matches!( + result, + Err(SanitizeFooterError::InvalidAccountMetaFormat(_)) + )); + } + + // test: bad owners block format + { + let mut footer = TieredStorageFooter::default(); + unsafe { + std::ptr::write( + &mut footer.owners_block_format as *mut _ as *mut u16, + 0xBAD0, + ); + } + let result = TieredStorageFooter::sanitize(&footer); + assert!(matches!( + result, + Err(SanitizeFooterError::InvalidOwnersBlockFormat(_)) + )); + } + + // test: bad index block format + { + let mut footer = TieredStorageFooter::default(); + unsafe { + std::ptr::write(&mut footer.index_block_format as *mut _ as *mut u16, 0xBAD0); + } + let result = TieredStorageFooter::sanitize(&footer); + assert!(matches!( + result, + Err(SanitizeFooterError::InvalidIndexBlockFormat(_)) + )); + } + + // test: bad account block format + { + let mut footer = TieredStorageFooter::default(); + unsafe { + std::ptr::write( + &mut footer.account_block_format as *mut _ as *mut u16, + 0xBAD0, + ); + } + let result = TieredStorageFooter::sanitize(&footer); + assert!(matches!( + result, + Err(SanitizeFooterError::InvalidAccountBlockFormat(_)) + )); + } } } diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index f2efc1a966ca11..1ae19bb1cd7d35 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -1,23 +1,27 @@ -#![allow(dead_code)] //! The account meta and related structs for hot accounts. use { crate::{ + account_storage::meta::StoredAccountMeta, + accounts_file::MatchAccountOwnerError, accounts_hash::AccountHash, tiered_storage::{ byte_block, footer::{ AccountBlockFormat, AccountMetaFormat, OwnersBlockFormat, TieredStorageFooter, }, - index::{AccountOffset, IndexBlockFormat}, + index::{AccountOffset, IndexBlockFormat, IndexOffset}, meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, - mmap_utils::get_type, - TieredStorageFormat, TieredStorageResult, + mmap_utils::{get_pod, get_slice}, + owners::{OwnerOffset, OwnersBlock}, + readable::TieredReadableAccount, + TieredStorageError, TieredStorageFormat, TieredStorageResult, }, }, + bytemuck::{Pod, Zeroable}, memmap2::{Mmap, MmapOptions}, modular_bitfield::prelude::*, - solana_sdk::stake_history::Epoch, + solana_sdk::{pubkey::Pubkey, stake_history::Epoch}, std::{fs::OpenOptions, option::Option, path::Path}, }; @@ -25,7 +29,7 @@ pub const HOT_FORMAT: TieredStorageFormat = TieredStorageFormat { meta_entry_size: std::mem::size_of::(), account_meta_format: AccountMetaFormat::Hot, owners_block_format: OwnersBlockFormat::LocalIndex, - index_block_format: IndexBlockFormat::AddressAndOffset, + index_block_format: IndexBlockFormat::AddressAndBlockOffsetOnly, account_block_format: AccountBlockFormat::AlignedRaw, }; @@ -33,11 +37,21 @@ pub const HOT_FORMAT: TieredStorageFormat = TieredStorageFormat { const MAX_HOT_PADDING: u8 = 7; /// The maximum allowed value for the owner index of a hot account. -const MAX_HOT_OWNER_INDEX: u32 = (1 << 29) - 1; +const MAX_HOT_OWNER_OFFSET: OwnerOffset = OwnerOffset((1 << 29) - 1); + +/// The byte alignment for hot accounts. This alignment serves duo purposes. +/// First, it allows hot accounts to be directly accessed when the underlying +/// file is mmapped. In addition, as all hot accounts are aligned, it allows +/// each hot accounts file to handle more accounts with the same number of +/// bytes in HotAccountOffset. +pub(crate) const HOT_ACCOUNT_ALIGNMENT: usize = 8; + +/// The maximum supported offset for hot accounts storage. +const MAX_HOT_ACCOUNT_OFFSET: usize = u32::MAX as usize * HOT_ACCOUNT_ALIGNMENT; #[bitfield(bits = 32)] #[repr(C)] -#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)] +#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Pod, Zeroable)] struct HotMetaPackedFields { /// A hot account entry consists of the following elements: /// @@ -50,12 +64,52 @@ struct HotMetaPackedFields { /// in its hot account entry. padding: B3, /// The index to the owner of a hot account inside an AccountsFile. - owner_index: B29, + owner_offset: B29, +} + +// Ensure there are no implicit padding bytes +const _: () = assert!(std::mem::size_of::() == 4); + +/// The offset to access a hot account. +#[repr(C)] +#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Pod, Zeroable)] +pub struct HotAccountOffset(u32); + +// Ensure there are no implicit padding bytes +const _: () = assert!(std::mem::size_of::() == 4); + +impl AccountOffset for HotAccountOffset {} + +impl HotAccountOffset { + /// Creates a new AccountOffset instance + pub fn new(offset: usize) -> TieredStorageResult { + if offset > MAX_HOT_ACCOUNT_OFFSET { + return Err(TieredStorageError::OffsetOutOfBounds( + offset, + MAX_HOT_ACCOUNT_OFFSET, + )); + } + + // Hot accounts are aligned based on HOT_ACCOUNT_ALIGNMENT. + if offset % HOT_ACCOUNT_ALIGNMENT != 0 { + return Err(TieredStorageError::OffsetAlignmentError( + offset, + HOT_ACCOUNT_ALIGNMENT, + )); + } + + Ok(HotAccountOffset((offset / HOT_ACCOUNT_ALIGNMENT) as u32)) + } + + /// Returns the offset to the account. + fn offset(&self) -> usize { + self.0 as usize * HOT_ACCOUNT_ALIGNMENT + } } /// The storage and in-memory representation of the metadata entry for a /// hot account. -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Pod, Zeroable)] #[repr(C)] pub struct HotAccountMeta { /// The balance of this account. @@ -66,6 +120,9 @@ pub struct HotAccountMeta { flags: AccountMetaFlags, } +// Ensure there are no implicit padding bytes +const _: () = assert!(std::mem::size_of::() == 8 + 4 + 4); + impl TieredAccountMeta for HotAccountMeta { /// Construct a HotAccountMeta instance. fn new() -> Self { @@ -93,11 +150,11 @@ impl TieredAccountMeta for HotAccountMeta { } /// A builder function that initializes the owner's index. - fn with_owner_index(mut self, owner_index: u32) -> Self { - if owner_index > MAX_HOT_OWNER_INDEX { - panic!("owner_index exceeds MAX_HOT_OWNER_INDEX"); + fn with_owner_offset(mut self, owner_offset: OwnerOffset) -> Self { + if owner_offset > MAX_HOT_OWNER_OFFSET { + panic!("owner_offset exceeds MAX_HOT_OWNER_OFFSET"); } - self.packed_fields.set_owner_index(owner_index); + self.packed_fields.set_owner_offset(owner_offset.0); self } @@ -126,8 +183,8 @@ impl TieredAccountMeta for HotAccountMeta { } /// Returns the index to the accounts' owner in the current AccountsFile. - fn owner_index(&self) -> u32 { - self.packed_fields.owner_index() + fn owner_offset(&self) -> OwnerOffset { + OwnerOffset(self.packed_fields.owner_offset()) } /// Returns the AccountMetaFlags of the current meta. @@ -150,7 +207,7 @@ impl TieredAccountMeta for HotAccountMeta { .then(|| { let offset = self.optional_fields_offset(account_block) + AccountMetaOptionalFields::rent_epoch_offset(self.flags()); - byte_block::read_type::(account_block, offset).copied() + byte_block::read_pod::(account_block, offset).copied() }) .flatten() } @@ -163,7 +220,7 @@ impl TieredAccountMeta for HotAccountMeta { .then(|| { let offset = self.optional_fields_offset(account_block) + AccountMetaOptionalFields::account_hash_offset(self.flags()); - byte_block::read_type::(account_block, offset) + byte_block::read_pod::(account_block, offset) }) .flatten() } @@ -202,11 +259,11 @@ impl HotStorageReader { pub fn new_from_path(path: impl AsRef) -> TieredStorageResult { let file = OpenOptions::new().read(true).open(path)?; let mmap = unsafe { MmapOptions::new().map(&file)? }; - // Here we are cloning the footer as accessing any data in a + // Here we are copying the footer, as accessing any data in a // TieredStorage instance requires accessing its Footer. // This can help improve cache locality and reduce the overhead // of indirection associated with memory-mapped accesses. - let footer = TieredStorageFooter::new_from_mmap(&mmap)?.clone(); + let footer = *TieredStorageFooter::new_from_mmap(&mmap)?; Ok(Self { mmap, footer }) } @@ -225,11 +282,152 @@ impl HotStorageReader { /// Returns the account meta located at the specified offset. fn get_account_meta_from_offset( &self, - account_offset: AccountOffset, + account_offset: HotAccountOffset, ) -> TieredStorageResult<&HotAccountMeta> { - let (meta, _) = get_type::(&self.mmap, account_offset.block)?; + let offset = account_offset.offset(); + + assert!( + offset.saturating_add(std::mem::size_of::()) + <= self.footer.index_block_offset as usize, + "reading HotAccountOffset ({}) would exceed accounts blocks offset boundary ({}).", + offset, + self.footer.index_block_offset, + ); + let (meta, _) = get_pod::(&self.mmap, offset)?; Ok(meta) } + + /// Returns the offset to the account given the specified index. + fn get_account_offset( + &self, + index_offset: IndexOffset, + ) -> TieredStorageResult { + self.footer + .index_block_format + .get_account_offset::(&self.mmap, &self.footer, index_offset) + } + + /// Returns the address of the account associated with the specified index. + fn get_account_address(&self, index: IndexOffset) -> TieredStorageResult<&Pubkey> { + self.footer + .index_block_format + .get_account_address(&self.mmap, &self.footer, index) + } + + /// Returns the address of the account owner given the specified + /// owner_offset. + fn get_owner_address(&self, owner_offset: OwnerOffset) -> TieredStorageResult<&Pubkey> { + OwnersBlock::get_owner_address(&self.mmap, &self.footer, owner_offset) + } + + /// Returns Ok(index_of_matching_owner) if the account owner at + /// `account_offset` is one of the pubkeys in `owners`. + /// + /// Returns Err(MatchAccountOwnerError::NoMatch) if the account has 0 + /// lamports or the owner is not one of the pubkeys in `owners`. + /// + /// Returns Err(MatchAccountOwnerError::UnableToLoad) if there is any internal + /// error that causes the data unable to load, including `account_offset` + /// causes a data overrun. + pub fn account_matches_owners( + &self, + account_offset: HotAccountOffset, + owners: &[&Pubkey], + ) -> Result { + let account_meta = self + .get_account_meta_from_offset(account_offset) + .map_err(|_| MatchAccountOwnerError::UnableToLoad)?; + + if account_meta.lamports() == 0 { + Err(MatchAccountOwnerError::NoMatch) + } else { + let account_owner = self + .get_owner_address(account_meta.owner_offset()) + .map_err(|_| MatchAccountOwnerError::UnableToLoad)?; + + owners + .iter() + .position(|candidate| &account_owner == candidate) + .ok_or(MatchAccountOwnerError::NoMatch) + } + } + + /// Returns the size of the account block based on its account offset + /// and index offset. + /// + /// The account block size information is omitted in the hot accounts file + /// as it can be derived by comparing the offset of the next hot account + /// meta in the index block. + fn get_account_block_size( + &self, + account_offset: HotAccountOffset, + index_offset: IndexOffset, + ) -> TieredStorageResult { + // the offset that points to the hot account meta. + let account_meta_offset = account_offset.offset(); + + // Obtain the ending offset of the account block. If the current + // account is the last account, then the ending offset is the + // index_block_offset. + let account_block_ending_offset = + if index_offset.0.saturating_add(1) == self.footer.account_entry_count { + self.footer.index_block_offset as usize + } else { + self.get_account_offset(IndexOffset(index_offset.0.saturating_add(1)))? + .offset() + }; + + // With the ending offset, minus the starting offset (i.e., + // the account meta offset) and the HotAccountMeta size, the reminder + // is the account block size (account data + optional fields). + Ok(account_block_ending_offset + .saturating_sub(account_meta_offset) + .saturating_sub(std::mem::size_of::())) + } + + /// Returns the account block that contains the account associated with + /// the specified index given the offset to the account meta and its index. + fn get_account_block( + &self, + account_offset: HotAccountOffset, + index_offset: IndexOffset, + ) -> TieredStorageResult<&[u8]> { + let (data, _) = get_slice( + &self.mmap, + account_offset.offset() + std::mem::size_of::(), + self.get_account_block_size(account_offset, index_offset)?, + )?; + + Ok(data) + } + + /// Returns the account located at the specified index offset. + pub fn get_account( + &self, + index_offset: IndexOffset, + ) -> TieredStorageResult, usize)>> { + if index_offset.0 >= self.footer.account_entry_count { + return Ok(None); + } + + let account_offset = self.get_account_offset(index_offset)?; + + let meta = self.get_account_meta_from_offset(account_offset)?; + let address = self.get_account_address(index_offset)?; + let owner = self.get_owner_address(meta.owner_offset())?; + let account_block = self.get_account_block(account_offset, index_offset)?; + + Ok(Some(( + StoredAccountMeta::Hot(TieredReadableAccount { + meta, + address, + owner, + index: index_offset.0 as usize, + account_block, + }), + index_offset.0.saturating_add(1) as usize, + ))) + } } #[cfg(test)] @@ -244,12 +442,13 @@ pub mod tests { FOOTER_SIZE, }, hot::{HotAccountMeta, HotStorageReader}, - index::{AccountOffset, IndexBlockFormat}, + index::{AccountIndexWriterEntry, IndexBlockFormat, IndexOffset}, meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, }, + assert_matches::assert_matches, memoffset::offset_of, - rand::Rng, - solana_sdk::{hash::Hash, pubkey::Pubkey, stake_history::Epoch}, + rand::{seq::SliceRandom, Rng}, + solana_sdk::{account::ReadableAccount, hash::Hash, pubkey::Pubkey, stake_history::Epoch}, tempfile::TempDir, }; @@ -264,31 +463,53 @@ pub mod tests { #[test] fn test_packed_fields() { const TEST_PADDING: u8 = 7; - const TEST_OWNER_INDEX: u32 = 0x1fff_ef98; + const TEST_OWNER_OFFSET: u32 = 0x1fff_ef98; let mut packed_fields = HotMetaPackedFields::default(); packed_fields.set_padding(TEST_PADDING); - packed_fields.set_owner_index(TEST_OWNER_INDEX); + packed_fields.set_owner_offset(TEST_OWNER_OFFSET); assert_eq!(packed_fields.padding(), TEST_PADDING); - assert_eq!(packed_fields.owner_index(), TEST_OWNER_INDEX); + assert_eq!(packed_fields.owner_offset(), TEST_OWNER_OFFSET); } #[test] fn test_packed_fields_max_values() { let mut packed_fields = HotMetaPackedFields::default(); packed_fields.set_padding(MAX_HOT_PADDING); - packed_fields.set_owner_index(MAX_HOT_OWNER_INDEX); + packed_fields.set_owner_offset(MAX_HOT_OWNER_OFFSET.0); assert_eq!(packed_fields.padding(), MAX_HOT_PADDING); - assert_eq!(packed_fields.owner_index(), MAX_HOT_OWNER_INDEX); + assert_eq!(packed_fields.owner_offset(), MAX_HOT_OWNER_OFFSET.0); } #[test] fn test_hot_meta_max_values() { let meta = HotAccountMeta::new() .with_account_data_padding(MAX_HOT_PADDING) - .with_owner_index(MAX_HOT_OWNER_INDEX); + .with_owner_offset(MAX_HOT_OWNER_OFFSET); assert_eq!(meta.account_data_padding(), MAX_HOT_PADDING); - assert_eq!(meta.owner_index(), MAX_HOT_OWNER_INDEX); + assert_eq!(meta.owner_offset(), MAX_HOT_OWNER_OFFSET); + } + + #[test] + fn test_max_hot_account_offset() { + assert_matches!(HotAccountOffset::new(0), Ok(_)); + assert_matches!(HotAccountOffset::new(MAX_HOT_ACCOUNT_OFFSET), Ok(_)); + } + + #[test] + fn test_max_hot_account_offset_out_of_bounds() { + assert_matches!( + HotAccountOffset::new(MAX_HOT_ACCOUNT_OFFSET + HOT_ACCOUNT_ALIGNMENT), + Err(TieredStorageError::OffsetOutOfBounds(_, _)) + ); + } + + #[test] + fn test_max_hot_account_offset_alignment_error() { + assert_matches!( + HotAccountOffset::new(HOT_ACCOUNT_ALIGNMENT - 1), + Err(TieredStorageError::OffsetAlignmentError(_, _)) + ); } #[test] @@ -298,16 +519,16 @@ pub mod tests { } #[test] - #[should_panic(expected = "owner_index exceeds MAX_HOT_OWNER_INDEX")] - fn test_hot_meta_owner_index_exceeds_limit() { - HotAccountMeta::new().with_owner_index(MAX_HOT_OWNER_INDEX + 1); + #[should_panic(expected = "owner_offset exceeds MAX_HOT_OWNER_OFFSET")] + fn test_hot_meta_owner_offset_exceeds_limit() { + HotAccountMeta::new().with_owner_offset(OwnerOffset(MAX_HOT_OWNER_OFFSET.0 + 1)); } #[test] fn test_hot_account_meta() { const TEST_LAMPORTS: u64 = 2314232137; const TEST_PADDING: u8 = 5; - const TEST_OWNER_INDEX: u32 = 0x1fef_1234; + const TEST_OWNER_OFFSET: OwnerOffset = OwnerOffset(0x1fef_1234); const TEST_RENT_EPOCH: Epoch = 7; let optional_fields = AccountMetaOptionalFields { @@ -319,12 +540,12 @@ pub mod tests { let meta = HotAccountMeta::new() .with_lamports(TEST_LAMPORTS) .with_account_data_padding(TEST_PADDING) - .with_owner_index(TEST_OWNER_INDEX) + .with_owner_offset(TEST_OWNER_OFFSET) .with_flags(&flags); assert_eq!(meta.lamports(), TEST_LAMPORTS); assert_eq!(meta.account_data_padding(), TEST_PADDING); - assert_eq!(meta.owner_index(), TEST_OWNER_INDEX); + assert_eq!(meta.owner_offset(), TEST_OWNER_OFFSET); assert_eq!(*meta.flags(), flags); } @@ -334,7 +555,7 @@ pub mod tests { let padding = [0u8; 5]; const TEST_LAMPORT: u64 = 2314232137; - const OWNER_INDEX: u32 = 0x1fef_1234; + const OWNER_OFFSET: u32 = 0x1fef_1234; const TEST_RENT_EPOCH: Epoch = 7; let optional_fields = AccountMetaOptionalFields { @@ -346,17 +567,20 @@ pub mod tests { let expected_meta = HotAccountMeta::new() .with_lamports(TEST_LAMPORT) .with_account_data_padding(padding.len().try_into().unwrap()) - .with_owner_index(OWNER_INDEX) + .with_owner_offset(OwnerOffset(OWNER_OFFSET)) .with_flags(&flags); let mut writer = ByteBlockWriter::new(AccountBlockFormat::AlignedRaw); - writer.write_type(&expected_meta).unwrap(); - writer.write_type(&account_data).unwrap(); - writer.write_type(&padding).unwrap(); + writer.write_pod(&expected_meta).unwrap(); + // SAFETY: These values are POD, so they are safe to write. + unsafe { + writer.write_type(&account_data).unwrap(); + writer.write_type(&padding).unwrap(); + } writer.write_optional_fields(&optional_fields).unwrap(); let buffer = writer.finish().unwrap(); - let meta = byte_block::read_type::(&buffer, 0).unwrap(); + let meta = byte_block::read_pod::(&buffer, 0).unwrap(); assert_eq!(expected_meta, *meta); assert!(meta.flags().has_rent_epoch()); assert!(meta.flags().has_account_hash()); @@ -386,7 +610,7 @@ pub mod tests { let expected_footer = TieredStorageFooter { account_meta_format: AccountMetaFormat::Hot, owners_block_format: OwnersBlockFormat::LocalIndex, - index_block_format: IndexBlockFormat::AddressAndOffset, + index_block_format: IndexBlockFormat::AddressAndBlockOffsetOnly, account_block_format: AccountBlockFormat::AlignedRaw, account_entry_count: 300, account_meta_entry_size: 16, @@ -394,7 +618,7 @@ pub mod tests { owner_count: 250, owner_entry_size: 32, index_block_offset: 1069600, - owners_offset: 1081200, + owners_block_offset: 1081200, hash: Hash::new_unique(), min_account_address: Pubkey::default(), max_account_address: Pubkey::new_unique(), @@ -428,12 +652,12 @@ pub mod tests { .map(|_| { HotAccountMeta::new() .with_lamports(rng.gen_range(0..u64::MAX)) - .with_owner_index(rng.gen_range(0..NUM_ACCOUNTS)) + .with_owner_offset(OwnerOffset(rng.gen_range(0..NUM_ACCOUNTS))) }) .collect(); let account_offsets: Vec<_>; - let footer = TieredStorageFooter { + let mut footer = TieredStorageFooter { account_meta_format: AccountMetaFormat::Hot, account_entry_count: NUM_ACCOUNTS, ..TieredStorageFooter::default() @@ -446,12 +670,13 @@ pub mod tests { .iter() .map(|meta| { let prev_offset = current_offset; - current_offset += file.write_type(meta).unwrap(); - AccountOffset { block: prev_offset } + current_offset += file.write_pod(meta).unwrap(); + HotAccountOffset::new(prev_offset).unwrap() }) .collect(); // while the test only focuses on account metas, writing a footer // here is necessary to make it a valid tiered-storage file. + footer.index_block_offset = current_offset as u64; footer.write_footer_block(&file).unwrap(); } @@ -461,6 +686,344 @@ pub mod tests { let meta = hot_storage.get_account_meta_from_offset(*offset).unwrap(); assert_eq!(meta, expected_meta); } + assert_eq!(&footer, hot_storage.footer()); } + + #[test] + #[should_panic(expected = "would exceed accounts blocks offset boundary")] + fn test_get_acount_meta_from_offset_out_of_bounds() { + // Generate a new temp path that is guaranteed to NOT already have a file. + let temp_dir = TempDir::new().unwrap(); + let path = temp_dir + .path() + .join("test_get_acount_meta_from_offset_out_of_bounds"); + + let footer = TieredStorageFooter { + account_meta_format: AccountMetaFormat::Hot, + index_block_offset: 160, + ..TieredStorageFooter::default() + }; + + { + let file = TieredStorageFile::new_writable(&path).unwrap(); + footer.write_footer_block(&file).unwrap(); + } + + let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + let offset = HotAccountOffset::new(footer.index_block_offset as usize).unwrap(); + // Read from index_block_offset, which offset doesn't belong to + // account blocks. Expect assert failure here + hot_storage.get_account_meta_from_offset(offset).unwrap(); + } + + #[test] + fn test_hot_storage_get_account_offset_and_address() { + // Generate a new temp path that is guaranteed to NOT already have a file. + let temp_dir = TempDir::new().unwrap(); + let path = temp_dir + .path() + .join("test_hot_storage_get_account_offset_and_address"); + const NUM_ACCOUNTS: u32 = 10; + let mut rng = rand::thread_rng(); + + let addresses: Vec<_> = std::iter::repeat_with(Pubkey::new_unique) + .take(NUM_ACCOUNTS as usize) + .collect(); + + let index_writer_entries: Vec<_> = addresses + .iter() + .map(|address| AccountIndexWriterEntry { + address, + offset: HotAccountOffset::new( + rng.gen_range(0..u32::MAX) as usize * HOT_ACCOUNT_ALIGNMENT, + ) + .unwrap(), + }) + .collect(); + + let mut footer = TieredStorageFooter { + account_meta_format: AccountMetaFormat::Hot, + account_entry_count: NUM_ACCOUNTS, + // Set index_block_offset to 0 as we didn't write any account + // meta/data in this test + index_block_offset: 0, + ..TieredStorageFooter::default() + }; + { + let file = TieredStorageFile::new_writable(&path).unwrap(); + + let cursor = footer + .index_block_format + .write_index_block(&file, &index_writer_entries) + .unwrap(); + footer.owners_block_offset = cursor as u64; + footer.write_footer_block(&file).unwrap(); + } + + let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + for (i, index_writer_entry) in index_writer_entries.iter().enumerate() { + let account_offset = hot_storage + .get_account_offset(IndexOffset(i as u32)) + .unwrap(); + assert_eq!(account_offset, index_writer_entry.offset); + + let account_address = hot_storage + .get_account_address(IndexOffset(i as u32)) + .unwrap(); + assert_eq!(account_address, index_writer_entry.address); + } + } + + #[test] + fn test_hot_storage_get_owner_address() { + // Generate a new temp path that is guaranteed to NOT already have a file. + let temp_dir = TempDir::new().unwrap(); + let path = temp_dir.path().join("test_hot_storage_get_owner_address"); + const NUM_OWNERS: usize = 10; + + let addresses: Vec<_> = std::iter::repeat_with(Pubkey::new_unique) + .take(NUM_OWNERS) + .collect(); + + let footer = TieredStorageFooter { + account_meta_format: AccountMetaFormat::Hot, + // meta/data nor index block in this test + owners_block_offset: 0, + ..TieredStorageFooter::default() + }; + + { + let file = TieredStorageFile::new_writable(&path).unwrap(); + + OwnersBlock::write_owners_block(&file, &addresses).unwrap(); + + // while the test only focuses on account metas, writing a footer + // here is necessary to make it a valid tiered-storage file. + footer.write_footer_block(&file).unwrap(); + } + + let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + for (i, address) in addresses.iter().enumerate() { + assert_eq!( + hot_storage + .get_owner_address(OwnerOffset(i as u32)) + .unwrap(), + address, + ); + } + } + + #[test] + fn test_account_matches_owners() { + // Generate a new temp path that is guaranteed to NOT already have a file. + let temp_dir = TempDir::new().unwrap(); + let path = temp_dir.path().join("test_hot_storage_get_owner_address"); + const NUM_OWNERS: u32 = 10; + + let owner_addresses: Vec<_> = std::iter::repeat_with(Pubkey::new_unique) + .take(NUM_OWNERS as usize) + .collect(); + + const NUM_ACCOUNTS: u32 = 30; + let mut rng = rand::thread_rng(); + + let hot_account_metas: Vec<_> = std::iter::repeat_with({ + || { + HotAccountMeta::new() + .with_lamports(rng.gen_range(1..u64::MAX)) + .with_owner_offset(OwnerOffset(rng.gen_range(0..NUM_OWNERS))) + } + }) + .take(NUM_ACCOUNTS as usize) + .collect(); + let mut footer = TieredStorageFooter { + account_meta_format: AccountMetaFormat::Hot, + account_entry_count: NUM_ACCOUNTS, + owner_count: NUM_OWNERS, + ..TieredStorageFooter::default() + }; + let account_offsets: Vec<_>; + + { + let file = TieredStorageFile::new_writable(&path).unwrap(); + let mut current_offset = 0; + + account_offsets = hot_account_metas + .iter() + .map(|meta| { + let prev_offset = current_offset; + current_offset += file.write_pod(meta).unwrap(); + HotAccountOffset::new(prev_offset).unwrap() + }) + .collect(); + footer.index_block_offset = current_offset as u64; + // Typically, the owners block is stored after index block, but + // since we don't write index block in this test, so we have + // the owners_block_offset set to the end of the accounts blocks. + footer.owners_block_offset = footer.index_block_offset; + + OwnersBlock::write_owners_block(&file, &owner_addresses).unwrap(); + + // while the test only focuses on account metas, writing a footer + // here is necessary to make it a valid tiered-storage file. + footer.write_footer_block(&file).unwrap(); + } + + let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + + // First, verify whether we can find the expected owners. + let mut owner_candidates: Vec<_> = owner_addresses.iter().collect(); + owner_candidates.shuffle(&mut rng); + + for (account_offset, account_meta) in account_offsets.iter().zip(hot_account_metas.iter()) { + let index = hot_storage + .account_matches_owners(*account_offset, &owner_candidates) + .unwrap(); + assert_eq!( + owner_candidates[index], + &owner_addresses[account_meta.owner_offset().0 as usize] + ); + } + + // Second, verify the MatchAccountOwnerError::NoMatch case + const NUM_UNMATCHED_OWNERS: usize = 20; + let unmatched_owners: Vec<_> = std::iter::repeat_with(Pubkey::new_unique) + .take(NUM_UNMATCHED_OWNERS) + .collect(); + let unmatched_candidates: Vec<_> = unmatched_owners.iter().collect(); + + for account_offset in account_offsets.iter() { + assert_eq!( + hot_storage.account_matches_owners(*account_offset, &unmatched_candidates), + Err(MatchAccountOwnerError::NoMatch) + ); + } + + // Thirdly, we mixed two candidates and make sure we still find the + // matched owner. + owner_candidates.extend(unmatched_candidates); + owner_candidates.shuffle(&mut rng); + + for (account_offset, account_meta) in account_offsets.iter().zip(hot_account_metas.iter()) { + let index = hot_storage + .account_matches_owners(*account_offset, &owner_candidates) + .unwrap(); + assert_eq!( + owner_candidates[index], + &owner_addresses[account_meta.owner_offset().0 as usize] + ); + } + } + + // returns the required number of padding + fn padding_bytes(data_len: usize) -> u8 { + ((HOT_ACCOUNT_ALIGNMENT - (data_len % HOT_ACCOUNT_ALIGNMENT)) % HOT_ACCOUNT_ALIGNMENT) as u8 + } + + #[test] + fn test_hot_storage_get_account() { + // Generate a new temp path that is guaranteed to NOT already have a file. + let temp_dir = TempDir::new().unwrap(); + let path = temp_dir.path().join("test_hot_storage_get_account"); + + let mut rng = rand::thread_rng(); + + // create owners + const NUM_OWNERS: usize = 10; + let owners: Vec<_> = std::iter::repeat_with(Pubkey::new_unique) + .take(NUM_OWNERS) + .collect(); + + // create account data + const NUM_ACCOUNTS: usize = 20; + let account_datas: Vec<_> = (0..NUM_ACCOUNTS) + .map(|i| vec![i as u8; rng.gen_range(0..4096)]) + .collect(); + + // create account metas that link to its data and owner + let account_metas: Vec<_> = (0..NUM_ACCOUNTS) + .map(|i| { + HotAccountMeta::new() + .with_lamports(rng.gen_range(0..u64::MAX)) + .with_owner_offset(OwnerOffset(rng.gen_range(0..NUM_OWNERS) as u32)) + .with_account_data_padding(padding_bytes(account_datas[i].len())) + }) + .collect(); + + // create account addresses + let addresses: Vec<_> = std::iter::repeat_with(Pubkey::new_unique) + .take(NUM_ACCOUNTS) + .collect(); + + let mut footer = TieredStorageFooter { + account_meta_format: AccountMetaFormat::Hot, + account_entry_count: NUM_ACCOUNTS as u32, + owner_count: NUM_OWNERS as u32, + ..TieredStorageFooter::default() + }; + + { + let file = TieredStorageFile::new_writable(&path).unwrap(); + let mut current_offset = 0; + + // write accounts blocks + let padding_buffer = [0u8; HOT_ACCOUNT_ALIGNMENT]; + let index_writer_entries: Vec<_> = account_metas + .iter() + .zip(account_datas.iter()) + .zip(addresses.iter()) + .map(|((meta, data), address)| { + let prev_offset = current_offset; + current_offset += file.write_pod(meta).unwrap(); + current_offset += file.write_bytes(data).unwrap(); + current_offset += file + .write_bytes(&padding_buffer[0..padding_bytes(data.len()) as usize]) + .unwrap(); + AccountIndexWriterEntry { + address, + offset: HotAccountOffset::new(prev_offset).unwrap(), + } + }) + .collect(); + + // write index blocks + footer.index_block_offset = current_offset as u64; + current_offset += footer + .index_block_format + .write_index_block(&file, &index_writer_entries) + .unwrap(); + + // write owners block + footer.owners_block_offset = current_offset as u64; + OwnersBlock::write_owners_block(&file, &owners).unwrap(); + + footer.write_footer_block(&file).unwrap(); + } + + let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + + for i in 0..NUM_ACCOUNTS { + let (stored_meta, next) = hot_storage + .get_account(IndexOffset(i as u32)) + .unwrap() + .unwrap(); + assert_eq!(stored_meta.lamports(), account_metas[i].lamports()); + assert_eq!(stored_meta.data().len(), account_datas[i].len()); + assert_eq!(stored_meta.data(), account_datas[i]); + assert_eq!( + *stored_meta.owner(), + owners[account_metas[i].owner_offset().0 as usize] + ); + assert_eq!(*stored_meta.pubkey(), addresses[i]); + + assert_eq!(i + 1, next); + } + // Make sure it returns None on NUM_ACCOUNTS to allow termination on + // while loop in actual accounts-db read case. + assert_matches!( + hot_storage.get_account(IndexOffset(NUM_ACCOUNTS as u32)), + Ok(None) + ); + } } diff --git a/accounts-db/src/tiered_storage/index.rs b/accounts-db/src/tiered_storage/index.rs index cd8b2a33c82529..279fdc33e51a52 100644 --- a/accounts-db/src/tiered_storage/index.rs +++ b/accounts-db/src/tiered_storage/index.rs @@ -1,36 +1,34 @@ use { crate::tiered_storage::{ - file::TieredStorageFile, footer::TieredStorageFooter, mmap_utils::get_type, + file::TieredStorageFile, footer::TieredStorageFooter, mmap_utils::get_pod, TieredStorageResult, }, + bytemuck::{Pod, Zeroable}, memmap2::Mmap, solana_sdk::pubkey::Pubkey, }; /// The in-memory struct for the writing index block. -/// The actual storage format of a tiered account index entry might be different -/// from this. #[derive(Debug)] -pub struct AccountIndexWriterEntry<'a> { +pub struct AccountIndexWriterEntry<'a, Offset: AccountOffset> { + /// The account address. pub address: &'a Pubkey, - pub block_offset: u64, - pub intra_block_offset: u64, + /// The offset to the account. + pub offset: Offset, } -/// The offset to an account stored inside its accounts block. -/// This struct is used to access the meta and data of an account by looking through -/// its accounts block. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub struct AccountOffset { - /// The offset to the accounts block that contains the account meta/data. - pub block: usize, -} +/// The offset to an account. +pub trait AccountOffset: Clone + Copy + Pod + Zeroable {} /// The offset to an account/address entry in the accounts index block. /// This can be used to obtain the AccountOffset and address by looking through /// the accounts index block. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub struct IndexOffset(usize); +#[repr(C)] +#[derive(Clone, Copy, Debug, Eq, PartialEq, Pod, Zeroable)] +pub struct IndexOffset(pub u32); + +// Ensure there are no implicit padding bytes +const _: () = assert!(std::mem::size_of::() == 4); /// The index format of a tiered accounts file. #[repr(u16)] @@ -47,28 +45,31 @@ pub struct IndexOffset(usize); )] pub enum IndexBlockFormat { /// This format optimizes the storage size by storing only account addresses - /// and offsets. It skips storing the size of account data by storing account - /// block entries and index block entries in the same order. + /// and block offsets. It skips storing the size of account data by storing + /// account block entries and index block entries in the same order. #[default] - AddressAndOffset = 0, + AddressAndBlockOffsetOnly = 0, } +// Ensure there are no implicit padding bytes +const _: () = assert!(std::mem::size_of::() == 2); + impl IndexBlockFormat { /// Persists the specified index_entries to the specified file and returns /// the total number of bytes written. pub fn write_index_block( &self, file: &TieredStorageFile, - index_entries: &[AccountIndexWriterEntry], + index_entries: &[AccountIndexWriterEntry], ) -> TieredStorageResult { match self { - Self::AddressAndOffset => { + Self::AddressAndBlockOffsetOnly => { let mut bytes_written = 0; for index_entry in index_entries { - bytes_written += file.write_type(index_entry.address)?; + bytes_written += file.write_pod(index_entry.address)?; } for index_entry in index_entries { - bytes_written += file.write_type(&index_entry.block_offset)?; + bytes_written += file.write_pod(&index_entry.offset)?; } Ok(bytes_written) } @@ -78,43 +79,65 @@ impl IndexBlockFormat { /// Returns the address of the account given the specified index. pub fn get_account_address<'a>( &self, - map: &'a Mmap, + mmap: &'a Mmap, footer: &TieredStorageFooter, - offset: IndexOffset, + index_offset: IndexOffset, ) -> TieredStorageResult<&'a Pubkey> { let offset = match self { - Self::AddressAndOffset => { - footer.index_block_offset as usize + std::mem::size_of::() * offset.0 + Self::AddressAndBlockOffsetOnly => { + debug_assert!(index_offset.0 < footer.account_entry_count); + footer.index_block_offset as usize + + std::mem::size_of::() * (index_offset.0 as usize) } }; - let (address, _) = get_type::(map, offset)?; + + debug_assert!( + offset.saturating_add(std::mem::size_of::()) + <= footer.owners_block_offset as usize, + "reading IndexOffset ({}) would exceed index block boundary ({}).", + offset, + footer.owners_block_offset, + ); + + let (address, _) = get_pod::(mmap, offset)?; Ok(address) } /// Returns the offset to the account given the specified index. - pub fn get_account_offset( + pub fn get_account_offset( &self, - map: &Mmap, + mmap: &Mmap, footer: &TieredStorageFooter, - offset: IndexOffset, - ) -> TieredStorageResult { - match self { - Self::AddressAndOffset => { - let offset = footer.index_block_offset as usize + index_offset: IndexOffset, + ) -> TieredStorageResult { + let offset = match self { + Self::AddressAndBlockOffsetOnly => { + debug_assert!(index_offset.0 < footer.account_entry_count); + footer.index_block_offset as usize + std::mem::size_of::() * footer.account_entry_count as usize - + offset.0 * std::mem::size_of::(); - let (account_block_offset, _) = get_type(map, offset)?; - Ok(AccountOffset { - block: *account_block_offset, - }) + + std::mem::size_of::() * index_offset.0 as usize } - } + }; + + debug_assert!( + offset.saturating_add(std::mem::size_of::()) + <= footer.owners_block_offset as usize, + "reading IndexOffset ({}) would exceed index block boundary ({}).", + offset, + footer.owners_block_offset, + ); + + let (account_offset, _) = get_pod::(mmap, offset)?; + + Ok(*account_offset) } /// Returns the size of one index entry. - pub fn entry_size(&self) -> usize { + pub fn entry_size(&self) -> usize { match self { - Self::AddressAndOffset => std::mem::size_of::() + std::mem::size_of::(), + Self::AddressAndBlockOffsetOnly => { + std::mem::size_of::() + std::mem::size_of::() + } } } } @@ -122,14 +145,21 @@ impl IndexBlockFormat { #[cfg(test)] mod tests { use { - super::*, crate::tiered_storage::file::TieredStorageFile, memmap2::MmapOptions, rand::Rng, - std::fs::OpenOptions, tempfile::TempDir, + super::*, + crate::tiered_storage::{ + file::TieredStorageFile, + hot::{HotAccountOffset, HOT_ACCOUNT_ALIGNMENT}, + }, + memmap2::MmapOptions, + rand::Rng, + std::fs::OpenOptions, + tempfile::TempDir, }; #[test] fn test_address_and_offset_indexer() { const ENTRY_COUNT: usize = 100; - let footer = TieredStorageFooter { + let mut footer = TieredStorageFooter { account_entry_count: ENTRY_COUNT as u32, ..TieredStorageFooter::default() }; @@ -143,33 +173,182 @@ mod tests { .iter() .map(|address| AccountIndexWriterEntry { address, - block_offset: rng.gen_range(128..2048), - intra_block_offset: 0, + offset: HotAccountOffset::new( + rng.gen_range(0..u32::MAX) as usize * HOT_ACCOUNT_ALIGNMENT, + ) + .unwrap(), }) .collect(); { let file = TieredStorageFile::new_writable(&path).unwrap(); - let indexer = IndexBlockFormat::AddressAndOffset; - indexer.write_index_block(&file, &index_entries).unwrap(); + let indexer = IndexBlockFormat::AddressAndBlockOffsetOnly; + let cursor = indexer.write_index_block(&file, &index_entries).unwrap(); + footer.owners_block_offset = cursor as u64; } - let indexer = IndexBlockFormat::AddressAndOffset; + let indexer = IndexBlockFormat::AddressAndBlockOffsetOnly; let file = OpenOptions::new() .read(true) .create(false) .open(&path) .unwrap(); - let map = unsafe { MmapOptions::new().map(&file).unwrap() }; + let mmap = unsafe { MmapOptions::new().map(&file).unwrap() }; for (i, index_entry) in index_entries.iter().enumerate() { let account_offset = indexer - .get_account_offset(&map, &footer, IndexOffset(i)) + .get_account_offset::(&mmap, &footer, IndexOffset(i as u32)) .unwrap(); - assert_eq!(index_entry.block_offset, account_offset.block as u64); + assert_eq!(index_entry.offset, account_offset); let address = indexer - .get_account_address(&map, &footer, IndexOffset(i)) + .get_account_address(&mmap, &footer, IndexOffset(i as u32)) .unwrap(); assert_eq!(index_entry.address, address); } } + + #[test] + #[should_panic(expected = "index_offset.0 < footer.account_entry_count")] + fn test_get_account_address_out_of_bounds() { + let temp_dir = TempDir::new().unwrap(); + let path = temp_dir + .path() + .join("test_get_account_address_out_of_bounds"); + + let footer = TieredStorageFooter { + account_entry_count: 100, + index_block_format: IndexBlockFormat::AddressAndBlockOffsetOnly, + ..TieredStorageFooter::default() + }; + + { + // we only write a footer here as the test should hit an assert + // failure before it actually reads the file. + let file = TieredStorageFile::new_writable(&path).unwrap(); + footer.write_footer_block(&file).unwrap(); + } + + let file = OpenOptions::new() + .read(true) + .create(false) + .open(&path) + .unwrap(); + let mmap = unsafe { MmapOptions::new().map(&file).unwrap() }; + footer + .index_block_format + .get_account_address(&mmap, &footer, IndexOffset(footer.account_entry_count)) + .unwrap(); + } + + #[test] + #[should_panic(expected = "would exceed index block boundary")] + fn test_get_account_address_exceeds_index_block_boundary() { + let temp_dir = TempDir::new().unwrap(); + let path = temp_dir + .path() + .join("test_get_account_address_exceeds_index_block_boundary"); + + let footer = TieredStorageFooter { + account_entry_count: 100, + index_block_format: IndexBlockFormat::AddressAndBlockOffsetOnly, + index_block_offset: 1024, + // only holds one index entry + owners_block_offset: 1024 + std::mem::size_of::() as u64, + ..TieredStorageFooter::default() + }; + + { + // we only write a footer here as the test should hit an assert + // failure before it actually reads the file. + let file = TieredStorageFile::new_writable(&path).unwrap(); + footer.write_footer_block(&file).unwrap(); + } + + let file = OpenOptions::new() + .read(true) + .create(false) + .open(&path) + .unwrap(); + let mmap = unsafe { MmapOptions::new().map(&file).unwrap() }; + // IndexOffset does not exceed the account_entry_count but exceeds + // the index block boundary. + footer + .index_block_format + .get_account_address(&mmap, &footer, IndexOffset(2)) + .unwrap(); + } + + #[test] + #[should_panic(expected = "index_offset.0 < footer.account_entry_count")] + fn test_get_account_offset_out_of_bounds() { + let temp_dir = TempDir::new().unwrap(); + let path = temp_dir + .path() + .join("test_get_account_offset_out_of_bounds"); + + let footer = TieredStorageFooter { + account_entry_count: 100, + index_block_format: IndexBlockFormat::AddressAndBlockOffsetOnly, + ..TieredStorageFooter::default() + }; + + { + // we only write a footer here as the test should hit an assert + // failure before we actually read the file. + let file = TieredStorageFile::new_writable(&path).unwrap(); + footer.write_footer_block(&file).unwrap(); + } + + let file = OpenOptions::new() + .read(true) + .create(false) + .open(&path) + .unwrap(); + let mmap = unsafe { MmapOptions::new().map(&file).unwrap() }; + footer + .index_block_format + .get_account_offset::( + &mmap, + &footer, + IndexOffset(footer.account_entry_count), + ) + .unwrap(); + } + + #[test] + #[should_panic(expected = "would exceed index block boundary")] + fn test_get_account_offset_exceeds_index_block_boundary() { + let temp_dir = TempDir::new().unwrap(); + let path = temp_dir + .path() + .join("test_get_account_offset_exceeds_index_block_boundary"); + + let footer = TieredStorageFooter { + account_entry_count: 100, + index_block_format: IndexBlockFormat::AddressAndBlockOffsetOnly, + index_block_offset: 1024, + // only holds one index entry + owners_block_offset: 1024 + std::mem::size_of::() as u64, + ..TieredStorageFooter::default() + }; + + { + // we only write a footer here as the test should hit an assert + // failure before we actually read the file. + let file = TieredStorageFile::new_writable(&path).unwrap(); + footer.write_footer_block(&file).unwrap(); + } + + let file = OpenOptions::new() + .read(true) + .create(false) + .open(&path) + .unwrap(); + let mmap = unsafe { MmapOptions::new().map(&file).unwrap() }; + // IndexOffset does not exceed the account_entry_count but exceeds + // the index block boundary. + footer + .index_block_format + .get_account_offset::(&mmap, &footer, IndexOffset(2)) + .unwrap(); + } } diff --git a/accounts-db/src/tiered_storage/meta.rs b/accounts-db/src/tiered_storage/meta.rs index 668c6ab93d8310..134b094bf66f1f 100644 --- a/accounts-db/src/tiered_storage/meta.rs +++ b/accounts-db/src/tiered_storage/meta.rs @@ -1,14 +1,16 @@ -#![allow(dead_code)] //! The account meta and related structs for the tiered storage. + use { - crate::accounts_hash::AccountHash, modular_bitfield::prelude::*, + crate::{accounts_hash::AccountHash, tiered_storage::owners::OwnerOffset}, + bytemuck::{Pod, Zeroable}, + modular_bitfield::prelude::*, solana_sdk::stake_history::Epoch, }; /// The struct that handles the account meta flags. #[bitfield(bits = 32)] #[repr(C)] -#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)] +#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Pod, Zeroable)] pub struct AccountMetaFlags { /// whether the account meta has rent epoch pub has_rent_epoch: bool, @@ -18,6 +20,9 @@ pub struct AccountMetaFlags { reserved: B30, } +// Ensure there are no implicit padding bytes +const _: () = assert!(std::mem::size_of::() == 4); + /// A trait that allows different implementations of the account meta that /// support different tiers of the accounts storage. pub trait TieredAccountMeta: Sized { @@ -31,8 +36,8 @@ pub trait TieredAccountMeta: Sized { /// for the account data associated with the current meta. fn with_account_data_padding(self, padding: u8) -> Self; - /// A builder function that initializes the owner's index. - fn with_owner_index(self, index: u32) -> Self; + /// A builder function that initializes the owner offset. + fn with_owner_offset(self, owner_offset: OwnerOffset) -> Self; /// A builder function that initializes the account data size. /// The size here represents the logical data size without compression. @@ -48,8 +53,8 @@ pub trait TieredAccountMeta: Sized { /// Returns the number of padding bytes for the associated account data fn account_data_padding(&self) -> u8; - /// Returns the index to the accounts' owner in the current AccountsFile. - fn owner_index(&self) -> u32; + /// Returns the offset to the accounts' owner in the current AccountsFile. + fn owner_offset(&self) -> OwnerOffset; /// Returns the AccountMetaFlags of the current meta. fn flags(&self) -> &AccountMetaFlags; diff --git a/accounts-db/src/tiered_storage/mmap_utils.rs b/accounts-db/src/tiered_storage/mmap_utils.rs index a1e70a1e617949..610384efd271c4 100644 --- a/accounts-db/src/tiered_storage/mmap_utils.rs +++ b/accounts-db/src/tiered_storage/mmap_utils.rs @@ -2,12 +2,33 @@ use { crate::{accounts_file::ALIGN_BOUNDARY_OFFSET, u64_align}, log::*, memmap2::Mmap, + std::io::Result as IoResult, }; -pub fn get_type(map: &Mmap, offset: usize) -> std::io::Result<(&T, usize)> { - let (data, next) = get_slice(map, offset, std::mem::size_of::())?; +/// Borrows a value of type `T` from `mmap` +/// +/// Type T must be plain ol' data to ensure no undefined behavior. +pub fn get_pod(mmap: &Mmap, offset: usize) -> IoResult<(&T, usize)> { + // SAFETY: Since T is AnyBitPattern, it is safe to cast bytes to T. + unsafe { get_type::(mmap, offset) } +} + +/// Borrows a value of type `T` from `mmap` +/// +/// Prefer `get_pod()` when possible, because `get_type()` may cause undefined behavior. +/// +/// # Safety +/// +/// Caller must ensure casting bytes to T is safe. +/// Refer to the Safety sections in std::slice::from_raw_parts() +/// and bytemuck's Pod and AnyBitPattern for more information. +pub unsafe fn get_type(mmap: &Mmap, offset: usize) -> IoResult<(&T, usize)> { + let (data, next) = get_slice(mmap, offset, std::mem::size_of::())?; let ptr = data.as_ptr() as *const T; debug_assert!(ptr as usize % std::mem::align_of::() == 0); + // SAFETY: The caller ensures it is safe to cast bytes to T, + // we ensure the size is safe by querying T directly, + // and we just checked above to ensure the ptr is aligned for T. Ok((unsafe { &*ptr }, next)) } @@ -15,23 +36,25 @@ pub fn get_type(map: &Mmap, offset: usize) -> std::io::Result<(&T, usize)> { /// doesn't overrun the internal buffer. Otherwise return an Error. /// Also return the offset of the first byte after the requested data that /// falls on a 64-byte boundary. -pub fn get_slice(map: &Mmap, offset: usize, size: usize) -> std::io::Result<(&[u8], usize)> { +pub fn get_slice(mmap: &Mmap, offset: usize, size: usize) -> IoResult<(&[u8], usize)> { let (next, overflow) = offset.overflowing_add(size); - if overflow || next > map.len() { + if overflow || next > mmap.len() { error!( "Requested offset {} and size {} while mmap only has length {}", offset, size, - map.len() + mmap.len() ); return Err(std::io::Error::new( std::io::ErrorKind::AddrNotAvailable, "Requested offset and data length exceeds the mmap slice", )); } - let data = &map[offset..next]; + let data = &mmap[offset..next]; let next = u64_align!(next); let ptr = data.as_ptr(); + // SAFETY: The Mmap ensures the bytes are safe the read, and we just checked + // to ensure we don't read past the end of the internal buffer. Ok((unsafe { std::slice::from_raw_parts(ptr, size) }, next)) } diff --git a/accounts-db/src/tiered_storage/owners.rs b/accounts-db/src/tiered_storage/owners.rs new file mode 100644 index 00000000000000..41e1f8a6715a3f --- /dev/null +++ b/accounts-db/src/tiered_storage/owners.rs @@ -0,0 +1,101 @@ +use { + crate::tiered_storage::{ + file::TieredStorageFile, footer::TieredStorageFooter, mmap_utils::get_pod, + TieredStorageResult, + }, + memmap2::Mmap, + solana_sdk::pubkey::Pubkey, +}; + +/// Owner block holds a set of unique addresses of account owners, +/// and an account meta has a owner_offset field for accessing +/// it's owner address. +#[derive(Debug)] +pub struct OwnersBlock; + +/// The offset to an owner entry in the owners block. +/// This is used to obtain the address of the account owner. +/// +/// Note that as its internal type is u32, it means the maximum number of +/// unique owners in one TieredStorageFile is 2^32. +#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd)] +pub struct OwnerOffset(pub u32); + +/// OwnersBlock is persisted as a consecutive bytes of pubkeys without any +/// meta-data. For each account meta, it has a owner_offset field to +/// access its owner's address in the OwnersBlock. +impl OwnersBlock { + /// Persists the provided owners' addresses into the specified file. + pub fn write_owners_block( + file: &TieredStorageFile, + addresses: &[Pubkey], + ) -> TieredStorageResult { + let mut bytes_written = 0; + for address in addresses { + bytes_written += file.write_pod(address)?; + } + + Ok(bytes_written) + } + + /// Returns the owner address associated with the specified owner_offset + /// and footer inside the input mmap. + pub fn get_owner_address<'a>( + mmap: &'a Mmap, + footer: &TieredStorageFooter, + owner_offset: OwnerOffset, + ) -> TieredStorageResult<&'a Pubkey> { + let offset = footer.owners_block_offset as usize + + (std::mem::size_of::() * owner_offset.0 as usize); + let (pubkey, _) = get_pod::(mmap, offset)?; + + Ok(pubkey) + } +} + +#[cfg(test)] +mod tests { + use { + super::*, crate::tiered_storage::file::TieredStorageFile, memmap2::MmapOptions, + std::fs::OpenOptions, tempfile::TempDir, + }; + + #[test] + fn test_owners_block() { + // Generate a new temp path that is guaranteed to NOT already have a file. + let temp_dir = TempDir::new().unwrap(); + let path = temp_dir.path().join("test_owners_block"); + const NUM_OWNERS: u32 = 10; + + let addresses: Vec<_> = std::iter::repeat_with(Pubkey::new_unique) + .take(NUM_OWNERS as usize) + .collect(); + + let footer = TieredStorageFooter { + // Set owners_block_offset to 0 as we didn't write any account + // meta/data nor index block. + owners_block_offset: 0, + ..TieredStorageFooter::default() + }; + + { + let file = TieredStorageFile::new_writable(&path).unwrap(); + + OwnersBlock::write_owners_block(&file, &addresses).unwrap(); + + // while the test only focuses on account metas, writing a footer + // here is necessary to make it a valid tiered-storage file. + footer.write_footer_block(&file).unwrap(); + } + + let file = OpenOptions::new().read(true).open(path).unwrap(); + let mmap = unsafe { MmapOptions::new().map(&file).unwrap() }; + + for (i, address) in addresses.iter().enumerate() { + assert_eq!( + OwnersBlock::get_owner_address(&mmap, &footer, OwnerOffset(i as u32)).unwrap(), + address + ); + } + } +} diff --git a/accounts-db/src/transaction_results.rs b/accounts-db/src/transaction_results.rs index 7a6401d62d7a04..bcfe185856ace4 100644 --- a/accounts-db/src/transaction_results.rs +++ b/accounts-db/src/transaction_results.rs @@ -1,3 +1,9 @@ +// Re-exported since these have moved to `solana_sdk`. +#[deprecated( + since = "1.18.0", + note = "Please use `solana_sdk::inner_instruction` types instead" +)] +pub use solana_sdk::inner_instruction::{InnerInstruction, InnerInstructionsList}; use { crate::{ nonce_info::{NonceFull, NonceInfo, NoncePartial}, @@ -34,7 +40,6 @@ pub enum TransactionExecutionResult { Executed { details: TransactionExecutionDetails, programs_modified_by_tx: Box, - programs_updated_only_for_global_cache: Box, }, NotExecuted(TransactionError), } @@ -106,22 +111,6 @@ impl DurableNonceFee { } } -/// An ordered list of compiled instructions that were invoked during a -/// transaction instruction -pub type InnerInstructions = Vec; - -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct InnerInstruction { - pub instruction: CompiledInstruction, - /// Invocation stack height of this instruction. Instruction stack height - /// starts at 1 for transaction instructions. - pub stack_height: u8, -} - -/// A list of compiled instructions that were invoked during each instruction of -/// a transaction -pub type InnerInstructionsList = Vec; - /// Extract the InnerInstructionsList from a TransactionContext pub fn inner_instructions_list_from_instruction_trace( transaction_context: &TransactionContext, diff --git a/accounts-db/src/verify_accounts_hash_in_background.rs b/accounts-db/src/verify_accounts_hash_in_background.rs index d4676cfe128f81..f03e4e0482ce8e 100644 --- a/accounts-db/src/verify_accounts_hash_in_background.rs +++ b/accounts-db/src/verify_accounts_hash_in_background.rs @@ -67,7 +67,7 @@ impl VerifyAccountsHashInBackground { } let result = lock.take().unwrap().join().unwrap(); if !result { - panic!("initial hash verification failed: {result:?}"); + panic!("initial background accounts hash verification failed: {result}"); } // we never have to check again self.verification_complete(); @@ -139,7 +139,7 @@ pub mod tests { } #[test] - #[should_panic(expected = "initial hash verification failed")] + #[should_panic(expected = "initial background accounts hash verification failed")] fn test_panic() { let verify = Arc::new(VerifyAccountsHashInBackground::default()); start_thread_and_return(&verify, false, || {}); diff --git a/banking-bench/Cargo.toml b/banking-bench/Cargo.toml index 44453a5e35d2e3..ed791d94499a0f 100644 --- a/banking-bench/Cargo.toml +++ b/banking-bench/Cargo.toml @@ -22,7 +22,7 @@ solana-logger = { workspace = true } solana-measure = { workspace = true } solana-perf = { workspace = true } solana-poh = { workspace = true, features = ["dev-context-only-utils"] } -solana-runtime = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-sdk = { workspace = true } solana-streamer = { workspace = true } solana-tpu-client = { workspace = true } diff --git a/banks-client/Cargo.toml b/banks-client/Cargo.toml index 6d5bf5b398e50c..b6739ea452d79c 100644 --- a/banks-client/Cargo.toml +++ b/banks-client/Cargo.toml @@ -22,7 +22,7 @@ tokio-serde = { workspace = true, features = ["bincode"] } [dev-dependencies] solana-banks-server = { workspace = true } -solana-runtime = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } [lib] crate-type = ["lib"] diff --git a/banks-interface/src/lib.rs b/banks-interface/src/lib.rs index 93538e00c176d2..9e2e5092231ae4 100644 --- a/banks-interface/src/lib.rs +++ b/banks-interface/src/lib.rs @@ -8,6 +8,7 @@ use { commitment_config::CommitmentLevel, fee_calculator::FeeCalculator, hash::Hash, + inner_instruction::InnerInstructions, message::Message, pubkey::Pubkey, signature::Signature, @@ -37,6 +38,7 @@ pub struct TransactionSimulationDetails { pub logs: Vec, pub units_consumed: u64, pub return_data: Option, + pub inner_instructions: Option>, } #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs index a04d542108e923..1fcdce1ad436c5 100644 --- a/banks-server/src/banks_server.rs +++ b/banks-server/src/banks_server.rs @@ -194,11 +194,14 @@ fn simulate_transaction( post_simulation_accounts: _, units_consumed, return_data, - } = bank.simulate_transaction_unchecked(sanitized_transaction); + inner_instructions, + } = bank.simulate_transaction_unchecked(&sanitized_transaction, false); + let simulation_details = TransactionSimulationDetails { logs, units_consumed, return_data, + inner_instructions, }; BanksTransactionResultWithSimulation { result: Some(result), @@ -217,7 +220,7 @@ impl Banks for BanksServer { .root_bank() .get_blockhash_last_valid_block_height(blockhash) .unwrap(); - let signature = transaction.signatures.get(0).cloned().unwrap_or_default(); + let signature = transaction.signatures.first().cloned().unwrap_or_default(); let info = TransactionInfo::new( signature, serialize(&transaction).unwrap(), diff --git a/bench-tps/Cargo.toml b/bench-tps/Cargo.toml index 803e820a5bd576..cd40eb1c833c1c 100644 --- a/bench-tps/Cargo.toml +++ b/bench-tps/Cargo.toml @@ -44,6 +44,7 @@ thiserror = { workspace = true } [dev-dependencies] serial_test = { workspace = true } solana-local-cluster = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-test-validator = { workspace = true } tempfile = { workspace = true } diff --git a/bench-tps/src/bench.rs b/bench-tps/src/bench.rs index 029937f391cccd..bddce402ac6382 100644 --- a/bench-tps/src/bench.rs +++ b/bench-tps/src/bench.rs @@ -238,7 +238,7 @@ where // Move on to next chunk self.chunk_index = (self.chunk_index + 1) % self.account_chunks.source.len(); - // Switch directions after transfering for each "chunk" + // Switch directions after transferring for each "chunk" if self.chunk_index == 0 { self.reclaim_lamports_back_to_source_account = !self.reclaim_lamports_back_to_source_account; @@ -1160,17 +1160,17 @@ mod tests { }, }; - fn bank_with_all_features(genesis_config: &GenesisConfig) -> Bank { + fn bank_with_all_features(genesis_config: &GenesisConfig) -> Arc { let mut bank = Bank::new_for_tests(genesis_config); bank.feature_set = Arc::new(FeatureSet::all_enabled()); - bank + bank.wrap_with_bank_forks_for_tests().0 } #[test] fn test_bench_tps_bank_client() { let (genesis_config, id) = create_genesis_config(sol_to_lamports(10_000.0)); let bank = bank_with_all_features(&genesis_config); - let client = Arc::new(BankClient::new(bank)); + let client = Arc::new(BankClient::new_shared(bank)); let config = Config { id, @@ -1191,7 +1191,7 @@ mod tests { fn test_bench_tps_fund_keys() { let (genesis_config, id) = create_genesis_config(sol_to_lamports(10_000.0)); let bank = bank_with_all_features(&genesis_config); - let client = Arc::new(BankClient::new(bank)); + let client = Arc::new(BankClient::new_shared(bank)); let keypair_count = 20; let lamports = 20; let rent = client.get_minimum_balance_for_rent_exemption(0).unwrap(); @@ -1216,7 +1216,7 @@ mod tests { let fee_rate_governor = FeeRateGovernor::new(11, 0); genesis_config.fee_rate_governor = fee_rate_governor; let bank = bank_with_all_features(&genesis_config); - let client = Arc::new(BankClient::new(bank)); + let client = Arc::new(BankClient::new_shared(bank)); let keypair_count = 20; let lamports = 20; let rent = client.get_minimum_balance_for_rent_exemption(0).unwrap(); @@ -1234,7 +1234,7 @@ mod tests { fn test_bench_tps_create_durable_nonce() { let (genesis_config, id) = create_genesis_config(sol_to_lamports(10_000.0)); let bank = bank_with_all_features(&genesis_config); - let client = Arc::new(BankClient::new(bank)); + let client = Arc::new(BankClient::new_shared(bank)); let keypair_count = 10; let lamports = 10_000_000; diff --git a/bench-tps/src/send_batch.rs b/bench-tps/src/send_batch.rs index b6f1fe776ff7dc..5ea916530ca23d 100644 --- a/bench-tps/src/send_batch.rs +++ b/bench-tps/src/send_batch.rs @@ -248,9 +248,13 @@ where fn send(&self, client: &Arc) { let mut send_txs = Measure::start("send_and_clone_txs"); let batch: Vec<_> = self.iter().map(|(_keypair, tx)| tx.clone()).collect(); - client.send_batch(batch).expect("transfer"); + let result = client.send_batch(batch); send_txs.stop(); - debug!("send {} {}", self.len(), send_txs); + if result.is_err() { + debug!("Failed to send batch {result:?}"); + } else { + debug!("send {} {}", self.len(), send_txs); + } } fn verify( diff --git a/bloom/benches/bloom.rs b/bloom/benches/bloom.rs index 522b45ee6b8963..a0a9ed684a3423 100644 --- a/bloom/benches/bloom.rs +++ b/bloom/benches/bloom.rs @@ -5,7 +5,7 @@ use { bv::BitVec, fnv::FnvHasher, rand::Rng, - solana_bloom::bloom::{AtomicBloom, Bloom, BloomHashIndex}, + solana_bloom::bloom::{Bloom, BloomHashIndex, ConcurrentBloom}, solana_sdk::{ hash::{hash, Hash}, signature::Signature, @@ -128,7 +128,7 @@ fn bench_add_hash_atomic(bencher: &mut Bencher) { .collect(); let mut fail = 0; bencher.iter(|| { - let bloom: AtomicBloom<_> = Bloom::random(1287, 0.1, 7424).into(); + let bloom: ConcurrentBloom<_> = Bloom::random(1287, 0.1, 7424).into(); // Intentionally not using parallelism here, so that this and above // benchmark only compare the bit-vector ops. // For benchmarking the parallel code, change bellow for loop to: diff --git a/bloom/src/bloom.rs b/bloom/src/bloom.rs index d75301f6b61f23..0163b1b8a652bf 100644 --- a/bloom/src/bloom.rs +++ b/bloom/src/bloom.rs @@ -1,14 +1,19 @@ //! Simple Bloom Filter + use { bv::BitVec, fnv::FnvHasher, rand::{self, Rng}, serde::{Deserialize, Serialize}, - solana_sdk::sanitize::{Sanitize, SanitizeError}, + solana_sdk::{ + sanitize::{Sanitize, SanitizeError}, + timing::AtomicInterval, + }, std::{ cmp, fmt, hash::Hasher, marker::PhantomData, + ops::Deref, sync::atomic::{AtomicU64, Ordering}, }, }; @@ -141,16 +146,19 @@ impl> BloomHashIndex for T { } } -pub struct AtomicBloom { +/// Bloom filter that can be used concurrently. +/// Concurrent reads/writes are safe, but are not atomic at the struct level, +/// this means that reads may see partial writes. +pub struct ConcurrentBloom { num_bits: u64, keys: Vec, bits: Vec, _phantom: PhantomData, } -impl From> for AtomicBloom { +impl From> for ConcurrentBloom { fn from(bloom: Bloom) -> Self { - AtomicBloom { + ConcurrentBloom { num_bits: bloom.bits.len(), keys: bloom.keys, bits: bloom @@ -164,7 +172,7 @@ impl From> for AtomicBloom { } } -impl AtomicBloom { +impl ConcurrentBloom { fn pos(&self, key: &T, hash_index: u64) -> (usize, u64) { let pos = key .hash_at_index(hash_index) @@ -199,15 +207,15 @@ impl AtomicBloom { }) } - pub fn clear_for_tests(&mut self) { + pub fn clear(&self) { self.bits.iter().for_each(|bit| { bit.store(0u64, Ordering::Relaxed); }); } } -impl From> for Bloom { - fn from(atomic_bloom: AtomicBloom) -> Self { +impl From> for Bloom { + fn from(atomic_bloom: ConcurrentBloom) -> Self { let bits: Vec<_> = atomic_bloom .bits .into_iter() @@ -225,6 +233,40 @@ impl From> for Bloom { } } +/// Wrapper around `ConcurrentBloom` and `AtomicInterval` so the bloom filter +/// can be cleared periodically. +pub struct ConcurrentBloomInterval { + interval: AtomicInterval, + bloom: ConcurrentBloom, +} + +// Directly allow all methods of `AtomicBloom` to be called on `AtomicBloomInterval`. +impl Deref for ConcurrentBloomInterval { + type Target = ConcurrentBloom; + fn deref(&self) -> &Self::Target { + &self.bloom + } +} + +impl ConcurrentBloomInterval { + /// Create a new filter with the given parameters. + /// See `Bloom::random` for details. + pub fn new(num_items: usize, false_positive_rate: f64, max_bits: usize) -> Self { + let bloom = Bloom::random(num_items, false_positive_rate, max_bits); + Self { + interval: AtomicInterval::default(), + bloom: ConcurrentBloom::from(bloom), + } + } + + /// Reset the filter if the reset interval has elapsed. + pub fn maybe_reset(&self, reset_interval_ms: u64) { + if self.interval.should_update(reset_interval_ms) { + self.bloom.clear(); + } + } +} + #[cfg(test)] mod test { use { @@ -325,7 +367,7 @@ mod test { let hash_values: Vec<_> = std::iter::repeat_with(generate_random_hash) .take(1200) .collect(); - let bloom: AtomicBloom<_> = Bloom::::random(1287, 0.1, 7424).into(); + let bloom: ConcurrentBloom<_> = Bloom::::random(1287, 0.1, 7424).into(); assert_eq!(bloom.keys.len(), 3); assert_eq!(bloom.num_bits, 6168); assert_eq!(bloom.bits.len(), 97); @@ -360,7 +402,7 @@ mod test { let num_bits_set = bloom.num_bits_set; assert!(num_bits_set > 2000, "# bits set: {num_bits_set}"); // Round-trip with no inserts. - let bloom: AtomicBloom<_> = bloom.into(); + let bloom: ConcurrentBloom<_> = bloom.into(); assert_eq!(bloom.num_bits, 9731); assert_eq!(bloom.bits.len(), (9731 + 63) / 64); for hash_value in &hash_values { @@ -372,7 +414,7 @@ mod test { assert!(bloom.contains(hash_value)); } // Round trip, re-inserting the same hash values. - let bloom: AtomicBloom<_> = bloom.into(); + let bloom: ConcurrentBloom<_> = bloom.into(); hash_values.par_iter().for_each(|v| { bloom.add(v); }); @@ -389,7 +431,7 @@ mod test { let more_hash_values: Vec<_> = std::iter::repeat_with(generate_random_hash) .take(1000) .collect(); - let bloom: AtomicBloom<_> = bloom.into(); + let bloom: ConcurrentBloom<_> = bloom.into(); assert_eq!(bloom.num_bits, 9731); assert_eq!(bloom.bits.len(), (9731 + 63) / 64); more_hash_values.par_iter().for_each(|v| { diff --git a/bucket_map/src/bucket.rs b/bucket_map/src/bucket.rs index 036743c214dc46..6eb755c8cccc6c 100644 --- a/bucket_map/src/bucket.rs +++ b/bucket_map/src/bucket.rs @@ -116,7 +116,6 @@ pub struct Bucket { /// true if this bucket was loaded (as opposed to created blank). /// When populating, we want to prioritize looking for data on disk that already matches as opposed to writing new data. - #[allow(dead_code)] reused_file_at_startup: bool, } diff --git a/bucket_map/src/bucket_api.rs b/bucket_map/src/bucket_api.rs index e5449a814a5be9..6677d6932e1e82 100644 --- a/bucket_map/src/bucket_api.rs +++ b/bucket_map/src/bucket_api.rs @@ -26,7 +26,6 @@ pub struct BucketApi { /// keeps track of which index file this bucket is currently using /// or at startup, which bucket file this bucket should initially use - #[allow(dead_code)] restartable_bucket: RestartableBucket, } diff --git a/bucket_map/src/bucket_storage.rs b/bucket_map/src/bucket_storage.rs index c81c6a1a7a3444..700cc22f25f694 100644 --- a/bucket_map/src/bucket_storage.rs +++ b/bucket_map/src/bucket_storage.rs @@ -219,8 +219,6 @@ impl BucketStorage { offset } - // temporary tag - #[allow(dead_code)] /// load and mmap the file that is this disk bucket if possible pub(crate) fn load_on_restart( path: PathBuf, diff --git a/bucket_map/src/restart.rs b/bucket_map/src/restart.rs index aae4d455fd8fe2..fd921401c653c9 100644 --- a/bucket_map/src/restart.rs +++ b/bucket_map/src/restart.rs @@ -79,7 +79,7 @@ impl RestartableBucket { bucket.random = random; } } - /// retreive the file_name and random that were used prior to the current restart. + /// retrieve the file_name and random that were used prior to the current restart. /// This was written into the restart file on the prior run by `set_file`. pub(crate) fn get(&self) -> Option<(u128, u64)> { self.restart.as_ref().map(|restart| { diff --git a/ci/README.md b/ci/README.md index 64af969ae9b292..45ebd39e1d4a94 100644 --- a/ci/README.md +++ b/ci/README.md @@ -73,7 +73,7 @@ sudo CUDA=1 ./setup-new-buildkite-agent/setup-new-machine.sh ### Configure Node for Buildkite-agent based CI -- Install `buildkite-agent` and set up it user environment with: +- Install `buildkite-agent` and set up its user environment with: ```bash sudo ./setup-new-buildkite-agent/setup-buildkite.sh ``` diff --git a/ci/buildkite-pipeline-in-disk.sh b/ci/buildkite-pipeline-in-disk.sh index 113b009aa4452e..ad12e1fc000a89 100755 --- a/ci/buildkite-pipeline-in-disk.sh +++ b/ci/buildkite-pipeline-in-disk.sh @@ -48,7 +48,7 @@ affects() { # the worse (affected) return 0 fi - # Assume everyting needs to be tested when any Dockerfile changes + # Assume everything needs to be tested when any Dockerfile changes for pattern in ^ci/docker-rust/Dockerfile ^ci/docker-rust-nightly/Dockerfile "$@"; do if [[ ${pattern:0:1} = "!" ]]; then for file in "${affected_files[@]}"; do diff --git a/ci/buildkite-pipeline.sh b/ci/buildkite-pipeline.sh index 8535905bfee4d0..fb6b6f90b50233 100755 --- a/ci/buildkite-pipeline.sh +++ b/ci/buildkite-pipeline.sh @@ -30,7 +30,7 @@ annotate() { fi } -# Assume everyting needs to be tested when this file or any Dockerfile changes +# Assume everything needs to be tested when this file or any Dockerfile changes mandatory_affected_files=() mandatory_affected_files+=(^ci/buildkite-pipeline.sh) mandatory_affected_files+=(^ci/docker-rust/Dockerfile) @@ -156,7 +156,7 @@ all_test_steps() { ^ci/rust-version.sh \ ^ci/test-docs.sh \ ; then - command_step doctest "ci/test-docs.sh" 15 + command_step doctest ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-docs.sh" 15 else annotate --style info --context test-docs \ "Docs skipped as no .rs files were modified" @@ -182,7 +182,7 @@ all_test_steps() { cargo-test-sbf$ \ ; then cat >> "$output_file" <<"EOF" - - command: "ci/test-stable-sbf.sh" + - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable-sbf.sh" name: "stable-sbf" timeout_in_minutes: 35 artifact_paths: "sbf-dumps.tar.bz2" @@ -240,6 +240,8 @@ EOF ^ci/rust-version.sh \ ^ci/test-coverage.sh \ ^ci/test-bench.sh \ + ^ci/bench \ + .buildkite/scripts/build-bench.sh \ ; then .buildkite/scripts/build-bench.sh >> "$output_file" else diff --git a/ci/buildkite-solana-private.sh b/ci/buildkite-solana-private.sh index ede70e6229d5f8..eeb087d323ee9a 100755 --- a/ci/buildkite-solana-private.sh +++ b/ci/buildkite-solana-private.sh @@ -48,7 +48,7 @@ affects() { # the worse (affected) return 0 fi - # Assume everyting needs to be tested when any Dockerfile changes + # Assume everything needs to be tested when any Dockerfile changes for pattern in ^ci/docker-rust/Dockerfile ^ci/docker-rust-nightly/Dockerfile "$@"; do if [[ ${pattern:0:1} = "!" ]]; then for file in "${affected_files[@]}"; do @@ -146,7 +146,7 @@ all_test_steps() { ^ci/rust-version.sh \ ^ci/test-docs.sh \ ; then - command_step doctest "ci/test-docs.sh" 15 + command_step doctest ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-docs.sh" 15 else annotate --style info --context test-docs \ "Docs skipped as no .rs files were modified" @@ -168,7 +168,7 @@ all_test_steps() { ^sdk/ \ ; then cat >> "$output_file" <<"EOF" - - command: "ci/test-stable-sbf.sh" + - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable-sbf.sh" name: "stable-sbf" timeout_in_minutes: 35 artifact_paths: "sbf-dumps.tar.bz2" diff --git a/ci/check-install-all.sh b/ci/check-install-all.sh new file mode 100755 index 00000000000000..0ffb52a370818c --- /dev/null +++ b/ci/check-install-all.sh @@ -0,0 +1,5 @@ +source scripts/spl-token-cli-version.sh +if [[ -z $splTokenCliVersion ]]; then + echo "On the stable channel, splTokenCliVersion must be set in scripts/spl-token-cli-version.sh" + exit 1 +fi diff --git a/ci/docker-rust-nightly/Dockerfile b/ci/docker-rust-nightly/Dockerfile index baf7e09632bac6..60d48cc22594f4 100644 --- a/ci/docker-rust-nightly/Dockerfile +++ b/ci/docker-rust-nightly/Dockerfile @@ -1,4 +1,4 @@ -FROM solanalabs/rust:1.73.0 +FROM solanalabs/rust:1.75.0 ARG date ARG GRCOV_VERSION=v0.8.18 diff --git a/ci/docker-rust/Dockerfile b/ci/docker-rust/Dockerfile index 8619d5e68e30a0..227d5f55d7753b 100644 --- a/ci/docker-rust/Dockerfile +++ b/ci/docker-rust/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:20.04 ARG \ - RUST_VERSION=1.73.0 \ + RUST_VERSION=1.75.0 \ GOLANG_VERSION=1.21.3 \ NODE_MAJOR=18 \ SCCACHE_VERSION=v0.5.4 diff --git a/ci/docker-rust/README.md b/ci/docker-rust/README.md index 3984a649098c04..3f818476867be3 100644 --- a/ci/docker-rust/README.md +++ b/ci/docker-rust/README.md @@ -1,9 +1,9 @@ Docker image containing rust and some preinstalled packages used in CI. NOTE: Recreate rust-nightly docker image after this when updating the stable rust -version! Both of docker images must be updated in tandem. +version! Both docker images must be updated in tandem. -This image manually maintained: +This image is manually maintained: 1. Edit `Dockerfile` to match the desired rust version 1. Run `docker login` to enable pushing images to Docker Hub, if you're authorized. 1. Run `./build.sh` to publish the new image, if you are a member of the [Solana diff --git a/ci/run-sanity.sh b/ci/run-sanity.sh index 3e674d92f4eb25..8108d13a061fd5 100755 --- a/ci/run-sanity.sh +++ b/ci/run-sanity.sh @@ -39,4 +39,5 @@ $solana_ledger_tool create-snapshot --ledger config/ledger "$snapshot_slot" conf cp config/ledger/genesis.tar.bz2 config/snapshot-ledger $solana_ledger_tool copy --ledger config/ledger \ --target-db config/snapshot-ledger --starting-slot "$snapshot_slot" --ending-slot "$latest_slot" -$solana_ledger_tool verify --ledger config/snapshot-ledger +$solana_ledger_tool verify --ledger config/snapshot-ledger --block-verification-method blockstore-processor +$solana_ledger_tool verify --ledger config/snapshot-ledger --block-verification-method unified-scheduler diff --git a/ci/rust-version.sh b/ci/rust-version.sh index a38910accda10b..fcfed6bd961243 100644 --- a/ci/rust-version.sh +++ b/ci/rust-version.sh @@ -29,7 +29,7 @@ fi if [[ -n $RUST_NIGHTLY_VERSION ]]; then nightly_version="$RUST_NIGHTLY_VERSION" else - nightly_version=2023-10-05 + nightly_version=2023-11-16 fi diff --git a/ci/test-checks.sh b/ci/test-checks.sh index 3a4f15ec23d81f..a2022204df7ad4 100755 --- a/ci/test-checks.sh +++ b/ci/test-checks.sh @@ -84,4 +84,8 @@ _ scripts/cargo-for-all-lock-files.sh -- "+${rust_nightly}" fmt --all -- --check _ ci/do-audit.sh +if [[ -n $CI ]] && [[ $CHANNEL = "stable" ]]; then + _ ci/check-install-all.sh +fi + echo --- ok diff --git a/ci/test-stable.sh b/ci/test-stable.sh index f521a6c17c6417..40ee0ae2c40a74 100755 --- a/ci/test-stable.sh +++ b/ci/test-stable.sh @@ -107,7 +107,7 @@ test-stable-sbf) _ cargo test \ --manifest-path programs/sbf/Cargo.toml \ --no-default-features --features=sbf_c,sbf_rust assert_instruction_count \ - -- --nocapture &> "${sbf_target_path}"/deploy/instuction_counts.txt + -- --nocapture &> "${sbf_target_path}"/deploy/instruction_counts.txt sbf_dump_archive="sbf-dumps.tar.bz2" rm -f "$sbf_dump_archive" diff --git a/clap-utils/src/nonce.rs b/clap-utils/src/nonce.rs index 34c7011075c32c..514302e6f52e61 100644 --- a/clap-utils/src/nonce.rs +++ b/clap-utils/src/nonce.rs @@ -9,7 +9,7 @@ pub const NONCE_ARG: ArgConstant<'static> = ArgConstant { help: "Provide the nonce account to use when creating a nonced \n\ transaction. Nonced transactions are useful when a transaction \n\ requires a lengthy signing process. Learn more about nonced \n\ - transactions at https://docs.solana.com/offline-signing/durable-nonce", + transactions at https://docs.solanalabs.com/cli/examples/durable-nonce", }; pub const NONCE_AUTHORITY_ARG: ArgConstant<'static> = ArgConstant { diff --git a/clap-v3-utils/src/nonce.rs b/clap-v3-utils/src/nonce.rs index 7ea6d5b8fc7284..452c1f14e7c8bb 100644 --- a/clap-v3-utils/src/nonce.rs +++ b/clap-v3-utils/src/nonce.rs @@ -9,7 +9,7 @@ pub const NONCE_ARG: ArgConstant<'static> = ArgConstant { help: "Provide the nonce account to use when creating a nonced \n\ transaction. Nonced transactions are useful when a transaction \n\ requires a lengthy signing process. Learn more about nonced \n\ - transactions at https://docs.solana.com/offline-signing/durable-nonce", + transactions at https://docs.solanalabs.com/cli/examples/durable-nonce", }; pub const NONCE_AUTHORITY_ARG: ArgConstant<'static> = ArgConstant { diff --git a/cli-config/src/config.rs b/cli-config/src/config.rs index d3dc4bc0aacd59..3e9dfed01917bb 100644 --- a/cli-config/src/config.rs +++ b/cli-config/src/config.rs @@ -34,7 +34,7 @@ pub struct Config { /// /// For local testing, the typical value is `http://localhost:8899`. /// - /// [rpcdocs]: https://docs.solana.com/cluster/rpc-endpoints + /// [rpcdocs]: https://solana.com/docs/core/clusters pub json_rpc_url: String, /// The address to connect to for receiving event notifications. /// diff --git a/cli-output/src/cli_output.rs b/cli-output/src/cli_output.rs index 8fe188ad7c970e..7e51a05786fcfa 100644 --- a/cli-output/src/cli_output.rs +++ b/cli-output/src/cli_output.rs @@ -2316,6 +2316,26 @@ impl fmt::Display for CliUpgradeableProgramClosed { } } +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CliUpgradeableProgramExtended { + pub program_id: String, + pub additional_bytes: u32, +} +impl QuietDisplay for CliUpgradeableProgramExtended {} +impl VerboseDisplay for CliUpgradeableProgramExtended {} +impl fmt::Display for CliUpgradeableProgramExtended { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + writeln!(f)?; + writeln!( + f, + "Extended Program Id {} by {} bytes", + &self.program_id, self.additional_bytes, + )?; + Ok(()) + } +} + #[derive(Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct CliUpgradeableBuffer { diff --git a/cli-output/src/display.rs b/cli-output/src/display.rs index a157cec8d3ede8..c3465e493b7cf1 100644 --- a/cli-output/src/display.rs +++ b/cli-output/src/display.rs @@ -708,7 +708,7 @@ pub fn new_spinner_progress_bar() -> ProgressBar { progress_bar.set_style( ProgressStyle::default_spinner() .template("{spinner:.green} {wide_msg}") - .expect("ProgresStyle::template direct input to be correct"), + .expect("ProgressStyle::template direct input to be correct"), ); progress_bar.enable_steady_tick(Duration::from_millis(100)); progress_bar diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 01d773ff9eaa4c..b9170ac79ab07c 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -59,6 +59,7 @@ assert_matches = { workspace = true } solana-streamer = { workspace = true } solana-test-validator = { workspace = true } tempfile = { workspace = true } +test-case = { workspace = true } [[bin]] name = "solana" diff --git a/cli/src/address_lookup_table.rs b/cli/src/address_lookup_table.rs index a1be08a577c07f..0a968e1b74444e 100644 --- a/cli/src/address_lookup_table.rs +++ b/cli/src/address_lookup_table.rs @@ -80,10 +80,12 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .takes_value(true) .validator(is_pubkey) .help( - "Lookup table authority address [default: the default configured keypair]. \ - WARNING: Cannot be used for creating a lookup table for a cluster running v1.11 - or earlier which requires the authority to sign for lookup table creation.", - ) + "Lookup table authority address \ + [default: the default configured keypair]. \ + WARNING: Cannot be used for creating a lookup table for \ + a cluster running v1.11 or earlier which requires the \ + authority to sign for lookup table creation.", + ), ) .arg( Arg::with_name("authority_signer") @@ -92,7 +94,10 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .takes_value(true) .conflicts_with("authority") .validator(is_valid_signer) - .help("Lookup table authority keypair [default: the default configured keypair].") + .help( + "Lookup table authority keypair \ + [default: the default configured keypair].", + ), ) .arg( Arg::with_name("payer") @@ -100,8 +105,11 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .value_name("PAYER_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Account that will pay rent fees for the created lookup table [default: the default configured keypair]") - ) + .help( + "Account that will pay rent fees for the created lookup table \ + [default: the default configured keypair]", + ), + ), ) .subcommand( SubCommand::with_name("freeze") @@ -113,7 +121,7 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .takes_value(true) .required(true) .validator(is_pubkey) - .help("Address of the lookup table") + .help("Address of the lookup table"), ) .arg( Arg::with_name("authority") @@ -121,7 +129,10 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .value_name("AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Lookup table authority [default: the default configured keypair]") + .help( + "Lookup table authority \ + [default: the default configured keypair]", + ), ) .arg( Arg::with_name("bypass_warning") @@ -140,7 +151,7 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .takes_value(true) .required(true) .validator(is_pubkey) - .help("Address of the lookup table") + .help("Address of the lookup table"), ) .arg( Arg::with_name("authority") @@ -148,7 +159,10 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .value_name("AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Lookup table authority [default: the default configured keypair]") + .help( + "Lookup table authority \ + [default: the default configured keypair]", + ), ) .arg( Arg::with_name("payer") @@ -156,7 +170,10 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .value_name("PAYER_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Account that will pay rent fees for the extended lookup table [default: the default configured keypair]") + .help( + "Account that will pay rent fees for the extended lookup \ + table [default: the default configured keypair]", + ), ) .arg( Arg::with_name("addresses") @@ -166,8 +183,8 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .use_delimiter(true) .required(true) .validator(is_pubkey) - .help("Comma separated list of addresses to append") - ) + .help("Comma separated list of addresses to append"), + ), ) .subcommand( SubCommand::with_name("deactivate") @@ -178,7 +195,7 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .value_name("LOOKUP_TABLE_ADDRESS") .takes_value(true) .required(true) - .help("Address of the lookup table") + .help("Address of the lookup table"), ) .arg( Arg::with_name("authority") @@ -186,7 +203,10 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .value_name("AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Lookup table authority [default: the default configured keypair]") + .help( + "Lookup table authority \ + [default: the default configured keypair]", + ), ) .arg( Arg::with_name("bypass_warning") @@ -204,7 +224,7 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .value_name("LOOKUP_TABLE_ADDRESS") .takes_value(true) .required(true) - .help("Address of the lookup table") + .help("Address of the lookup table"), ) .arg( Arg::with_name("recipient") @@ -212,7 +232,10 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .value_name("RECIPIENT_ADDRESS") .takes_value(true) .validator(is_pubkey) - .help("Address of the recipient account to deposit the closed account's lamports [default: the default configured keypair]") + .help( + "Address of the recipient account to deposit the closed \ + account's lamports [default: the default configured keypair]", + ), ) .arg( Arg::with_name("authority") @@ -220,8 +243,11 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .value_name("AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Lookup table authority [default: the default configured keypair]") - ) + .help( + "Lookup table authority \ + [default: the default configured keypair]", + ), + ), ) .subcommand( SubCommand::with_name("get") @@ -231,9 +257,9 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .index(1) .value_name("LOOKUP_TABLE_ADDRESS") .takes_value(true) - .help("Address of the lookup table to show") - ) - ) + .help("Address of the lookup table to show"), + ), + ), ) } } @@ -593,9 +619,9 @@ fn process_create_lookup_table( } } -pub const FREEZE_LOOKUP_TABLE_WARNING: &str = "WARNING! \ -Once a lookup table is frozen, it can never be modified or unfrozen again. \ -To proceed with freezing, rerun the `freeze` command with the `--bypass-warning` flag"; +pub const FREEZE_LOOKUP_TABLE_WARNING: &str = + "WARNING! Once a lookup table is frozen, it can never be modified or unfrozen again. To \ + proceed with freezing, rerun the `freeze` command with the `--bypass-warning` flag"; fn process_freeze_lookup_table( rpc_client: &RpcClient, @@ -613,9 +639,10 @@ fn process_freeze_lookup_table( })?; if !address_lookup_table::program::check_id(&lookup_table_account.owner) { return Err(format!( - "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", - ) - .into()); + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table \ + program", + ) + .into()); } if !bypass_warning { @@ -671,9 +698,10 @@ fn process_extend_lookup_table( })?; if !address_lookup_table::program::check_id(&lookup_table_account.owner) { return Err(format!( - "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", - ) - .into()); + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table \ + program", + ) + .into()); } let authority_address = authority_signer.pubkey(); @@ -709,10 +737,10 @@ fn process_extend_lookup_table( } } -pub const DEACTIVATE_LOOKUP_TABLE_WARNING: &str = "WARNING! \ -Once a lookup table is deactivated, it is no longer usable by transactions. -Deactivated lookup tables may only be closed and cannot be recreated at the same address. \ -To proceed with deactivation, rerun the `deactivate` command with the `--bypass-warning` flag"; +pub const DEACTIVATE_LOOKUP_TABLE_WARNING: &str = + "WARNING! Once a lookup table is deactivated, it is no longer usable by transactions. +Deactivated lookup tables may only be closed and cannot be recreated at the same address. To \ + proceed with deactivation, rerun the `deactivate` command with the `--bypass-warning` flag"; fn process_deactivate_lookup_table( rpc_client: &RpcClient, @@ -730,9 +758,10 @@ fn process_deactivate_lookup_table( })?; if !address_lookup_table::program::check_id(&lookup_table_account.owner) { return Err(format!( - "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", - ) - .into()); + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table \ + program", + ) + .into()); } if !bypass_warning { @@ -783,17 +812,19 @@ fn process_close_lookup_table( })?; if !address_lookup_table::program::check_id(&lookup_table_account.owner) { return Err(format!( - "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", - ) - .into()); + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table \ + program", + ) + .into()); } let lookup_table_account = AddressLookupTable::deserialize(&lookup_table_account.data)?; if lookup_table_account.meta.deactivation_slot == u64::MAX { return Err(format!( - "Lookup table account {lookup_table_pubkey} is not deactivated. Only deactivated lookup tables may be closed", - ) - .into()); + "Lookup table account {lookup_table_pubkey} is not deactivated. Only deactivated \ + lookup tables may be closed", + ) + .into()); } let authority_address = authority_signer.pubkey(); @@ -836,9 +867,10 @@ fn process_show_lookup_table( })?; if !address_lookup_table::program::check_id(&lookup_table_account.owner) { return Err(format!( - "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", - ) - .into()); + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table \ + program", + ) + .into()); } let lookup_table_account = AddressLookupTable::deserialize(&lookup_table_account.data)?; diff --git a/cli/src/clap_app.rs b/cli/src/clap_app.rs index 74d9b998badbf8..3706b3e6c2fea9 100644 --- a/cli/src/clap_app.rs +++ b/cli/src/clap_app.rs @@ -38,7 +38,7 @@ pub fn get_clap_app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> A .validator(is_url_or_moniker) .help( "URL for Solana's JSON RPC or moniker (or their first letter): \ - [mainnet-beta, testnet, devnet, localhost]", + [mainnet-beta, testnet, devnet, localhost]", ), ) .arg( @@ -67,16 +67,19 @@ pub fn get_clap_app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> A "processed", "confirmed", "finalized", - "recent", // Deprecated as of v1.5.5 - "single", // Deprecated as of v1.5.5 + "recent", // Deprecated as of v1.5.5 + "single", // Deprecated as of v1.5.5 "singleGossip", // Deprecated as of v1.5.5 - "root", // Deprecated as of v1.5.5 - "max", // Deprecated as of v1.5.5 + "root", // Deprecated as of v1.5.5 + "max", // Deprecated as of v1.5.5 ]) .value_name("COMMITMENT_LEVEL") .hide_possible_values(true) .global(true) - .help("Return information at the selected commitment level [possible values: processed, confirmed, finalized]"), + .help( + "Return information at the selected commitment level \ + [possible values: processed, confirmed, finalized]", + ), ) .arg( Arg::with_name("verbose") @@ -207,14 +210,14 @@ pub fn get_clap_app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> A ) .subcommand( SubCommand::with_name("completion") - .about("Generate completion scripts for various shells") - .arg( - Arg::with_name("shell") - .long("shell") - .short("s") - .takes_value(true) - .possible_values(&["bash", "fish", "zsh", "powershell", "elvish"]) - .default_value("bash") - ) + .about("Generate completion scripts for various shells") + .arg( + Arg::with_name("shell") + .long("shell") + .short("s") + .takes_value(true) + .possible_values(&["bash", "fish", "zsh", "powershell", "elvish"]) + .default_value("bash"), + ), ) } diff --git a/cli/src/cli.rs b/cli/src/cli.rs index 5e4e8b95cb2460..e5bf78670d7dca 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -2430,7 +2430,7 @@ mod tests { write_keypair_file(&default_keypair, &default_keypair_file).unwrap(); let default_signer = DefaultSigner::new("", &default_keypair_file); - //Test Transfer Subcommand, SOL + // Test Transfer Subcommand, SOL let from_keypair = keypair_from_seed(&[0u8; 32]).unwrap(); let from_pubkey = from_keypair.pubkey(); let from_string = from_pubkey.to_string(); diff --git a/cli/src/cluster_query.rs b/cli/src/cluster_query.rs index ee683081ed4790..a5162e9498441a 100644 --- a/cli/src/cluster_query.rs +++ b/cli/src/cluster_query.rs @@ -103,20 +103,23 @@ impl ClusterQuerySubCommands for App<'_, '_> { .subcommand( SubCommand::with_name("catchup") .about("Wait for a validator to catch up to the cluster") - .arg( - pubkey!(Arg::with_name("node_pubkey") + .arg(pubkey!( + Arg::with_name("node_pubkey") .index(1) .value_name("OUR_VALIDATOR_PUBKEY") .required(false), - "Identity pubkey of the validator"), - ) + "Identity of the validator." + )) .arg( Arg::with_name("node_json_rpc_url") .index(2) .value_name("OUR_URL") .takes_value(true) .validator(is_url) - .help("JSON RPC URL for validator, which is useful for validators with a private RPC service") + .help( + "JSON RPC URL for validator, which is useful for validators with a \ + private RPC service", + ), ) .arg( Arg::with_name("follow") @@ -131,19 +134,19 @@ impl ClusterQuerySubCommands for App<'_, '_> { .value_name("PORT") .default_value(DEFAULT_RPC_PORT_STR) .validator(is_port) - .help("Guess Identity pubkey and validator rpc node assuming local (possibly private) validator"), + .help( + "Guess Identity pubkey and validator rpc node assuming local \ + (possibly private) validator", + ), ) - .arg( - Arg::with_name("log") - .long("log") - .takes_value(false) - .help("Don't update the progress inplace; instead show updates with its own new lines"), - ), - ) - .subcommand( - SubCommand::with_name("cluster-date") - .about("Get current cluster date, computed from genesis creation time and network time"), + .arg(Arg::with_name("log").long("log").takes_value(false).help( + "Don't update the progress inplace; instead show updates with its own new \ + lines", + )), ) + .subcommand(SubCommand::with_name("cluster-date").about( + "Get current cluster date, computed from genesis creation time and network time", + )) .subcommand( SubCommand::with_name("cluster-version") .about("Get the version of the cluster entrypoint"), @@ -151,94 +154,97 @@ impl ClusterQuerySubCommands for App<'_, '_> { // Deprecated in v1.8.0 .subcommand( SubCommand::with_name("fees") - .about("Display current cluster fees (Deprecated in v1.8.0)") - .arg( - Arg::with_name("blockhash") - .long("blockhash") - .takes_value(true) - .value_name("BLOCKHASH") - .validator(is_hash) - .help("Query fees for BLOCKHASH instead of the the most recent blockhash") - ), + .about("Display current cluster fees (Deprecated in v1.8.0)") + .arg( + Arg::with_name("blockhash") + .long("blockhash") + .takes_value(true) + .value_name("BLOCKHASH") + .validator(is_hash) + .help("Query fees for BLOCKHASH instead of the the most recent blockhash"), + ), ) .subcommand( SubCommand::with_name("first-available-block") .about("Get the first available block in the storage"), ) - .subcommand(SubCommand::with_name("block-time") - .about("Get estimated production time of a block") - .alias("get-block-time") - .arg( - Arg::with_name("slot") - .index(1) - .takes_value(true) - .value_name("SLOT") - .help("Slot number of the block to query") - ) - ) - .subcommand(SubCommand::with_name("leader-schedule") - .about("Display leader schedule") - .arg( - Arg::with_name("epoch") - .long("epoch") - .takes_value(true) - .value_name("EPOCH") - .validator(is_epoch) - .help("Epoch to show leader schedule for. [default: current]") - ) - ) .subcommand( - SubCommand::with_name("epoch-info") - .about("Get information about the current epoch") - .alias("get-epoch-info"), + SubCommand::with_name("block-time") + .about("Get estimated production time of a block") + .alias("get-block-time") + .arg( + Arg::with_name("slot") + .index(1) + .takes_value(true) + .value_name("SLOT") + .help("Slot number of the block to query"), + ), ) .subcommand( - SubCommand::with_name("genesis-hash") - .about("Get the genesis hash") - .alias("get-genesis-hash") + SubCommand::with_name("leader-schedule") + .about("Display leader schedule") + .arg( + Arg::with_name("epoch") + .long("epoch") + .takes_value(true) + .value_name("EPOCH") + .validator(is_epoch) + .help("Epoch to show leader schedule for [default: current]"), + ), ) .subcommand( - SubCommand::with_name("slot").about("Get current slot") - .alias("get-slot"), + SubCommand::with_name("epoch-info") + .about("Get information about the current epoch") + .alias("get-epoch-info"), ) .subcommand( - SubCommand::with_name("block-height").about("Get current block height"), + SubCommand::with_name("genesis-hash") + .about("Get the genesis hash") + .alias("get-genesis-hash"), ) .subcommand( - SubCommand::with_name("epoch").about("Get current epoch"), + SubCommand::with_name("slot") + .about("Get current slot") + .alias("get-slot"), ) + .subcommand(SubCommand::with_name("block-height").about("Get current block height")) + .subcommand(SubCommand::with_name("epoch").about("Get current epoch")) .subcommand( - SubCommand::with_name("largest-accounts").about("Get addresses of largest cluster accounts") - .arg( - Arg::with_name("circulating") - .long("circulating") - .takes_value(false) - .help("Filter address list to only circulating accounts") - ) - .arg( - Arg::with_name("non_circulating") - .long("non-circulating") - .takes_value(false) - .conflicts_with("circulating") - .help("Filter address list to only non-circulating accounts") - ), + SubCommand::with_name("largest-accounts") + .about("Get addresses of largest cluster accounts") + .arg( + Arg::with_name("circulating") + .long("circulating") + .takes_value(false) + .help("Filter address list to only circulating accounts"), + ) + .arg( + Arg::with_name("non_circulating") + .long("non-circulating") + .takes_value(false) + .conflicts_with("circulating") + .help("Filter address list to only non-circulating accounts"), + ), ) .subcommand( - SubCommand::with_name("supply").about("Get information about the cluster supply of SOL") - .arg( - Arg::with_name("print_accounts") - .long("print-accounts") - .takes_value(false) - .help("Print list of non-circualting account addresses") - ), + SubCommand::with_name("supply") + .about("Get information about the cluster supply of SOL") + .arg( + Arg::with_name("print_accounts") + .long("print-accounts") + .takes_value(false) + .help("Print list of non-circualting account addresses"), + ), ) .subcommand( - SubCommand::with_name("total-supply").about("Get total number of SOL") - .setting(AppSettings::Hidden), + SubCommand::with_name("total-supply") + .about("Get total number of SOL") + .setting(AppSettings::Hidden), ) .subcommand( - SubCommand::with_name("transaction-count").about("Get current transaction count") - .alias("get-transaction-count"), + SubCommand::with_name("transaction-count") + .about("Get current transaction count") + .alias("get-transaction-count"), ) .subcommand( SubCommand::with_name("ping") @@ -265,7 +271,10 @@ impl ClusterQuerySubCommands for App<'_, '_> { .short("D") .long("print-timestamp") .takes_value(false) - .help("Print timestamp (unix time + microseconds as in gettimeofday) before each line"), + .help( + "Print timestamp (unix time + microseconds as in gettimeofday) before \ + each line", + ), ) .arg( Arg::with_name("timeout") @@ -286,20 +295,17 @@ impl ClusterQuerySubCommands for App<'_, '_> { .subcommand( SubCommand::with_name("logs") .about("Stream transaction logs") - .arg( - pubkey!(Arg::with_name("address") - .index(1) - .value_name("ADDRESS"), - "Account address to monitor \ - [default: monitor all transactions except for votes] \ - ") - ) + .arg(pubkey!( + Arg::with_name("address").index(1).value_name("ADDRESS"), + "Account to monitor \ + [default: monitor all transactions except for votes]." + )) .arg( Arg::with_name("include_votes") .long("include-votes") .takes_value(false) .conflicts_with("address") - .help("Include vote transactions when monitoring all transactions") + .help("Include vote transactions when monitoring all transactions"), ), ) .subcommand( @@ -316,13 +322,16 @@ impl ClusterQuerySubCommands for App<'_, '_> { Arg::with_name("slot_limit") .long("slot-limit") .takes_value(true) - .help("Limit results to this many slots from the end of the epoch [default: full epoch]"), + .help( + "Limit results to this many slots from the end of the epoch \ + [default: full epoch]", + ), ), ) .subcommand( SubCommand::with_name("gossip") .about("Show the current gossip network nodes") - .alias("show-gossip") + .alias("show-gossip"), ) .subcommand( SubCommand::with_name("stakes") @@ -333,19 +342,19 @@ impl ClusterQuerySubCommands for App<'_, '_> { .takes_value(false) .help("Display balance in lamports instead of SOL"), ) - .arg( - pubkey!(Arg::with_name("vote_account_pubkeys") + .arg(pubkey!( + Arg::with_name("vote_account_pubkeys") .index(1) .value_name("VOTE_ACCOUNT_PUBKEYS") .multiple(true), - "Only show stake accounts delegated to the provided vote accounts. "), - ) - .arg( - pubkey!(Arg::with_name("withdraw_authority") - .value_name("PUBKEY") - .long("withdraw-authority"), - "Only show stake accounts with the provided withdraw authority. "), - ), + "Only show stake accounts delegated to the provided vote account." + )) + .arg(pubkey!( + Arg::with_name("withdraw_authority") + .value_name("PUBKEY") + .long("withdraw-authority"), + "Only show stake accounts with the provided withdraw authority." + )), ) .subcommand( SubCommand::with_name("validators") @@ -394,7 +403,7 @@ impl ClusterQuerySubCommands for App<'_, '_> { Arg::with_name("keep_unstaked_delinquents") .long("keep-unstaked-delinquents") .takes_value(false) - .help("Don't discard unstaked, delinquent validators") + .help("Don't discard unstaked, delinquent validators"), ) .arg( Arg::with_name("delinquent_slot_distance") @@ -402,25 +411,27 @@ impl ClusterQuerySubCommands for App<'_, '_> { .takes_value(true) .value_name("SLOT_DISTANCE") .validator(is_slot) - .help( - concatcp!( - "Minimum slot distance from the tip to consider a validator delinquent. [default: ", - DELINQUENT_VALIDATOR_SLOT_DISTANCE, - "]", - )) + .help(concatcp!( + "Minimum slot distance from the tip to consider a validator \ + delinquent [default: ", + DELINQUENT_VALIDATOR_SLOT_DISTANCE, + "]", + )), ), ) .subcommand( SubCommand::with_name("transaction-history") - .about("Show historical transactions affecting the given address \ - from newest to oldest") - .arg( - pubkey!(Arg::with_name("address") + .about( + "Show historical transactions affecting the given address from newest to \ + oldest", + ) + .arg(pubkey!( + Arg::with_name("address") .index(1) .value_name("ADDRESS") .required(true), - "Account address"), - ) + "Account to query for transactions." + )) .arg( Arg::with_name("limit") .long("limit") @@ -442,18 +453,22 @@ impl ClusterQuerySubCommands for App<'_, '_> { .long("until") .value_name("TRANSACTION_SIGNATURE") .takes_value(true) - .help("List until this transaction signature, if found before limit reached"), + .help( + "List until this transaction signature, if found before limit reached", + ), ) .arg( Arg::with_name("show_transactions") .long("show-transactions") .takes_value(false) .help("Display the full transactions"), - ) + ), ) .subcommand( SubCommand::with_name("wait-for-max-stake") - .about("Wait for the max stake of any one node to drop below a percentage of total.") + .about( + "Wait for the max stake of any one node to drop below a percentage of total.", + ) .arg( Arg::with_name("max_percent") .long("max-percent") @@ -475,7 +490,10 @@ impl ClusterQuerySubCommands for App<'_, '_> { .map(|_| ()) .map_err(|e| e.to_string()) }) - .help("Length of data field in the account to calculate rent for, or moniker: [nonce, stake, system, vote]"), + .help( + "Length of data field in the account to calculate rent for, or \ + moniker: [nonce, stake, system, vote]", + ), ) .arg( Arg::with_name("lamports") @@ -502,8 +520,8 @@ pub fn parse_catchup( // requirement of node_pubkey is relaxed only if our_localhost_port if our_localhost_port.is_none() && node_pubkey.is_none() { return Err(CliError::BadParameter( - "OUR_VALIDATOR_PUBKEY (and possibly OUR_URL) must be specified \ - unless --our-localhost is given" + "OUR_VALIDATOR_PUBKEY (and possibly OUR_URL) must be specified unless --our-localhost \ + is given" .into(), )); } @@ -737,8 +755,7 @@ pub fn process_catchup( if node_json_rpc_url.is_some() && node_json_rpc_url != gussed_default { // go to new line to leave this message on console println!( - "Prefering explicitly given rpc ({}) as us, \ - although --our-localhost is given\n", + "Preferring explicitly given rpc ({}) as us, although --our-localhost is given\n", node_json_rpc_url.as_ref().unwrap() ); } else { @@ -754,8 +771,8 @@ pub fn process_catchup( (if node_pubkey.is_some() && node_pubkey != guessed_default { // go to new line to leave this message on console println!( - "Prefering explicitly given node pubkey ({}) as us, \ - although --our-localhost is given\n", + "Preferring explicitly given node pubkey ({}) as us, although --our-localhost \ + is given\n", node_pubkey.unwrap() ); node_pubkey @@ -807,13 +824,18 @@ pub fn process_catchup( if reported_node_pubkey != node_pubkey { return Err(format!( - "The identity reported by node RPC URL does not match. Expected: {node_pubkey:?}. Reported: {reported_node_pubkey:?}" + "The identity reported by node RPC URL does not match. Expected: {node_pubkey:?}. \ + Reported: {reported_node_pubkey:?}" ) .into()); } if rpc_client.get_identity()? == node_pubkey { - return Err("Both RPC URLs reference the same node, unable to monitor for catchup. Try a different --url".into()); + return Err( + "Both RPC URLs reference the same node, unable to monitor for catchup. Try a \ + different --url" + .into(), + ); } let mut previous_rpc_slot = std::u64::MAX; @@ -1213,44 +1235,45 @@ pub fn process_show_block_production( CliError::RpcRequestError("Failed to deserialize slot history".to_string()) })?; - let (confirmed_blocks, start_slot) = if start_slot >= slot_history.oldest() - && end_slot <= slot_history.newest() - { - // Fast, more reliable path using the SlotHistory sysvar + let (confirmed_blocks, start_slot) = + if start_slot >= slot_history.oldest() && end_slot <= slot_history.newest() { + // Fast, more reliable path using the SlotHistory sysvar - let confirmed_blocks: Vec<_> = (start_slot..=end_slot) - .filter(|slot| slot_history.check(*slot) == slot_history::Check::Found) - .collect(); - (confirmed_blocks, start_slot) - } else { - // Slow, less reliable path using `getBlocks`. - // - // "less reliable" because if the RPC node has holds in its ledger then the block production data will be - // incorrect. This condition currently can't be detected over RPC - // - - let minimum_ledger_slot = rpc_client.minimum_ledger_slot()?; - if minimum_ledger_slot > end_slot { - return Err(format!( - "Ledger data not available for slots {start_slot} to {end_slot} (minimum ledger slot is {minimum_ledger_slot})" + let confirmed_blocks: Vec<_> = (start_slot..=end_slot) + .filter(|slot| slot_history.check(*slot) == slot_history::Check::Found) + .collect(); + (confirmed_blocks, start_slot) + } else { + // Slow, less reliable path using `getBlocks`. + // + // "less reliable" because if the RPC node has holds in its ledger then the block production data will be + // incorrect. This condition currently can't be detected over RPC + // + + let minimum_ledger_slot = rpc_client.minimum_ledger_slot()?; + if minimum_ledger_slot > end_slot { + return Err(format!( + "Ledger data not available for slots {start_slot} to {end_slot} (minimum \ + ledger slot is {minimum_ledger_slot})" ) .into()); - } + } - if minimum_ledger_slot > start_slot { - progress_bar.println(format!( + if minimum_ledger_slot > start_slot { + progress_bar.println(format!( "{}", style(format!( - "Note: Requested start slot was {start_slot} but minimum ledger slot is {minimum_ledger_slot}" + "Note: Requested start slot was {start_slot} but minimum ledger slot is \ + {minimum_ledger_slot}" )) .italic(), )); - start_slot = minimum_ledger_slot; - } + start_slot = minimum_ledger_slot; + } - let confirmed_blocks = rpc_client.get_blocks(start_slot, Some(end_slot))?; - (confirmed_blocks, start_slot) - }; + let confirmed_blocks = rpc_client.get_blocks(start_slot, Some(end_slot))?; + (confirmed_blocks, start_slot) + }; let start_slot_index = (start_slot - first_slot_in_epoch) as usize; let end_slot_index = (end_slot - first_slot_in_epoch) as usize; @@ -1281,7 +1304,8 @@ pub fn process_show_block_production( } progress_bar.set_message(format!( - "Processing {total_slots} slots containing {total_blocks_produced} blocks and {total_slots_skipped} empty slots..." + "Processing {total_slots} slots containing {total_blocks_produced} blocks and \ + {total_slots_skipped} empty slots..." )); let mut confirmed_blocks_index = 0; diff --git a/cli/src/feature.rs b/cli/src/feature.rs index 708ea302b9ac27..65d117c2686c2d 100644 --- a/cli/src/feature.rs +++ b/cli/src/feature.rs @@ -240,7 +240,9 @@ impl fmt::Display for CliClusterSoftwareVersions { f, "{}", style(format!( - "{software_version_title:max_stake_percent_len$} {rpc_percent_title:>max_rpc_percent_len$}", + "{software_version_title:max_stake_percent_len$} \ + {rpc_percent_title:>max_rpc_percent_len$}", )) .bold(), )?; @@ -318,8 +320,12 @@ impl fmt::Display for CliClusterFeatureSets { writeln!( f, "\n{}", - style("To activate features the tool and cluster feature sets must match, select a tool version that matches the cluster") - .bold())?; + style( + "To activate features the tool and cluster feature sets must match, select a \ + tool version that matches the cluster" + ) + .bold() + )?; } else { if !self.stake_allowed { write!( @@ -349,7 +355,10 @@ impl fmt::Display for CliClusterFeatureSets { f, "{}", style(format!( - "{software_versions_title:max_stake_percent_len$} {rpc_percent_title:>max_rpc_percent_len$}", + "{software_versions_title:max_stake_percent_len$} \ + {rpc_percent_title:>max_rpc_percent_len$}", )) .bold(), )?; @@ -402,8 +411,8 @@ fn check_rpc_genesis_hash( if rpc_genesis_hash != genesis_hash { return Err(format!( "The genesis hash for the specified cluster {cluster_type:?} does not match the \ - genesis hash reported by the specified RPC. Cluster genesis hash: {genesis_hash}, \ - RPC reported genesis hash: {rpc_genesis_hash}" + genesis hash reported by the specified RPC. Cluster genesis hash: \ + {genesis_hash}, RPC reported genesis hash: {rpc_genesis_hash}" ) .into()); } @@ -927,11 +936,17 @@ fn process_activate( if !feature_activation_allowed(rpc_client, false)?.0 { match force { - ForceActivation::Almost => - return Err("Add force argument once more to override the sanity check to force feature activation ".into()), - ForceActivation::Yes => println!("FEATURE ACTIVATION FORCED"), - ForceActivation::No => - return Err("Feature activation is not allowed at this time".into()), + ForceActivation::Almost => { + return Err( + "Add force argument once more to override the sanity check to force feature \ + activation " + .into(), + ) + } + ForceActivation::Yes => println!("FEATURE ACTIVATION FORCED"), + ForceActivation::No => { + return Err("Feature activation is not allowed at this time".into()) + } } } diff --git a/cli/src/inflation.rs b/cli/src/inflation.rs index d01354eae43dca..523eee350eb122 100644 --- a/cli/src/inflation.rs +++ b/cli/src/inflation.rs @@ -39,7 +39,7 @@ impl InflationSubCommands for App<'_, '_> { .index(1) .multiple(true) .required(true), - "Address of account to query for rewards. " + "Account to query for rewards." )) .arg( Arg::with_name("rewards_epoch") diff --git a/cli/src/lib.rs b/cli/src/lib.rs index e4e925b5872f1b..9b12f1a1afceba 100644 --- a/cli/src/lib.rs +++ b/cli/src/lib.rs @@ -1,7 +1,7 @@ #![allow(clippy::arithmetic_side_effects)] macro_rules! ACCOUNT_STRING { () => { - r#", one of: + r#" Address is one of: * a base58-encoded public key * a path to a keypair file * a hyphen; signals a JSON-encoded keypair on stdin diff --git a/cli/src/main.rs b/cli/src/main.rs index 8e14fbe26016dd..e1b4f94bc4ee86 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -27,7 +27,8 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result { println!( - "{} Either provide the `--config` arg or ensure home directory exists to use the default config location", + "{} Either provide the `--config` arg or ensure home directory exists to \ + use the default config location", style("No config file found.").bold() ); return Ok(false); diff --git a/cli/src/nonce.rs b/cli/src/nonce.rs index 8ec5b6a23b7182..bc6fd981cea951 100644 --- a/cli/src/nonce.rs +++ b/cli/src/nonce.rs @@ -48,20 +48,20 @@ impl NonceSubCommands for App<'_, '_> { self.subcommand( SubCommand::with_name("authorize-nonce-account") .about("Assign account authority to a new entity") - .arg( - pubkey!(Arg::with_name("nonce_account_pubkey") + .arg(pubkey!( + Arg::with_name("nonce_account_pubkey") .index(1) .value_name("NONCE_ACCOUNT_ADDRESS") .required(true), - "Address of the nonce account. "), - ) - .arg( - pubkey!(Arg::with_name("new_authority") + "Nonce account." + )) + .arg(pubkey!( + Arg::with_name("new_authority") .index(2) .value_name("AUTHORITY_PUBKEY") .required(true), - "Account to be granted authority of the nonce account. "), - ) + "Account to be granted authority of the nonce account." + )) .arg(nonce_authority_arg()) .arg(memo_arg()) .arg(compute_unit_price_arg()), @@ -85,20 +85,26 @@ impl NonceSubCommands for App<'_, '_> { .takes_value(true) .required(true) .validator(is_amount_or_all) - .help("The amount to load the nonce account with, in SOL; accepts keyword ALL"), + .help( + "The amount to load the nonce account with, in SOL; accepts keyword \ + ALL", + ), ) - .arg( - pubkey!(Arg::with_name(NONCE_AUTHORITY_ARG.name) + .arg(pubkey!( + Arg::with_name(NONCE_AUTHORITY_ARG.name) .long(NONCE_AUTHORITY_ARG.long) .value_name("PUBKEY"), - "Assign noncing authority to another entity. "), - ) + "Assign noncing authority to this other entity." + )) .arg( Arg::with_name("seed") .long("seed") .value_name("STRING") .takes_value(true) - .help("Seed for address generation; if specified, the resulting account will be at a derived address of the NONCE_ACCOUNT pubkey") + .help( + "Seed for address generation; if specified, the resulting account \ + will be at a derived address of the NONCE_ACCOUNT pubkey", + ), ) .arg(memo_arg()) .arg(compute_unit_price_arg()), @@ -107,24 +113,24 @@ impl NonceSubCommands for App<'_, '_> { SubCommand::with_name("nonce") .about("Get the current nonce value") .alias("get-nonce") - .arg( - pubkey!(Arg::with_name("nonce_account_pubkey") + .arg(pubkey!( + Arg::with_name("nonce_account_pubkey") .index(1) .value_name("NONCE_ACCOUNT_ADDRESS") .required(true), - "Address of the nonce account to display. "), - ), + "Nonce account to display." + )), ) .subcommand( SubCommand::with_name("new-nonce") .about("Generate a new nonce, rendering the existing nonce useless") - .arg( - pubkey!(Arg::with_name("nonce_account_pubkey") + .arg(pubkey!( + Arg::with_name("nonce_account_pubkey") .index(1) .value_name("NONCE_ACCOUNT_ADDRESS") .required(true), - "Address of the nonce account. "), - ) + "Nonce account." + )) .arg(nonce_authority_arg()) .arg(memo_arg()) .arg(compute_unit_price_arg()), @@ -133,13 +139,13 @@ impl NonceSubCommands for App<'_, '_> { SubCommand::with_name("nonce-account") .about("Show the contents of a nonce account") .alias("show-nonce-account") - .arg( - pubkey!(Arg::with_name("nonce_account_pubkey") + .arg(pubkey!( + Arg::with_name("nonce_account_pubkey") .index(1) .value_name("NONCE_ACCOUNT_ADDRESS") .required(true), - "Address of the nonce account to display. "), - ) + "Nonce account to display." + )) .arg( Arg::with_name("lamports") .long("lamports") @@ -150,20 +156,20 @@ impl NonceSubCommands for App<'_, '_> { .subcommand( SubCommand::with_name("withdraw-from-nonce-account") .about("Withdraw SOL from the nonce account") - .arg( - pubkey!(Arg::with_name("nonce_account_pubkey") + .arg(pubkey!( + Arg::with_name("nonce_account_pubkey") .index(1) .value_name("NONCE_ACCOUNT_ADDRESS") .required(true), - "Nonce account to withdraw from. "), - ) - .arg( - pubkey!(Arg::with_name("destination_account_pubkey") + "Nonce account to withdraw from." + )) + .arg(pubkey!( + Arg::with_name("destination_account_pubkey") .index(2) .value_name("RECIPIENT_ADDRESS") .required(true), - "The account to which the SOL should be transferred. "), - ) + "Recipient of withdrawn SOL." + )) .arg( Arg::with_name("amount") .index(3) @@ -179,15 +185,17 @@ impl NonceSubCommands for App<'_, '_> { ) .subcommand( SubCommand::with_name("upgrade-nonce-account") - .about("One-time idempotent upgrade of legacy nonce versions \ - in order to bump them out of chain blockhash domain.") - .arg( - pubkey!(Arg::with_name("nonce_account_pubkey") + .about( + "One-time idempotent upgrade of legacy nonce versions in order to bump them \ + out of chain blockhash domain.", + ) + .arg(pubkey!( + Arg::with_name("nonce_account_pubkey") .index(1) .value_name("NONCE_ACCOUNT_ADDRESS") .required(true), - "Nonce account to upgrade. "), - ) + "Nonce account to upgrade." + )) .arg(memo_arg()) .arg(compute_unit_price_arg()), ) @@ -502,7 +510,8 @@ pub fn process_create_nonce_account( let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption(State::size())?; if lamports < minimum_balance { return Err(CliError::BadParameter(format!( - "need at least {minimum_balance} lamports for nonce account to be rent exempt, provided lamports: {lamports}" + "need at least {minimum_balance} lamports for nonce account to be rent exempt, \ + provided lamports: {lamports}" )) .into()); } diff --git a/cli/src/program.rs b/cli/src/program.rs index 4222c732e07fde..72f4ae8451f4cc 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -12,12 +12,19 @@ use { solana_account_decoder::{UiAccountEncoding, UiDataSliceConfig}, solana_bpf_loader_program::syscalls::create_program_runtime_environment_v1, solana_clap_utils::{ - self, hidden_unless_forced, input_parsers::*, input_validators::*, keypair::*, + self, + fee_payer::{fee_payer_arg, FEE_PAYER_ARG}, + hidden_unless_forced, + input_parsers::*, + input_validators::*, + keypair::*, + offline::{OfflineArgs, DUMP_TRANSACTION_MESSAGE, SIGN_ONLY_ARG}, }, solana_cli_output::{ - CliProgram, CliProgramAccountType, CliProgramAuthority, CliProgramBuffer, CliProgramId, - CliUpgradeableBuffer, CliUpgradeableBuffers, CliUpgradeableProgram, - CliUpgradeableProgramClosed, CliUpgradeablePrograms, + return_signers_with_config, CliProgram, CliProgramAccountType, CliProgramAuthority, + CliProgramBuffer, CliProgramId, CliUpgradeableBuffer, CliUpgradeableBuffers, + CliUpgradeableProgram, CliUpgradeableProgramClosed, CliUpgradeableProgramExtended, + CliUpgradeablePrograms, ReturnSignersConfig, }, solana_client::{ connection_cache::ConnectionCache, @@ -35,8 +42,9 @@ use { config::{RpcAccountInfoConfig, RpcProgramAccountsConfig, RpcSendTransactionConfig}, filter::{Memcmp, RpcFilterType}, }, + solana_rpc_client_nonce_utils::blockhash_query::BlockhashQuery, solana_sdk::{ - account::Account, + account::{is_executable, Account}, account_utils::StateMut, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, @@ -63,15 +71,16 @@ use { }, }; -pub const CLOSE_PROGRAM_WARNING: &str = "WARNING! \ -Closed programs cannot be recreated at the same program id. \ -Once a program is closed, it can never be invoked again. \ -To proceed with closing, rerun the `close` command with the `--bypass-warning` flag"; +pub const CLOSE_PROGRAM_WARNING: &str = "WARNING! Closed programs cannot be recreated at the same \ + program id. Once a program is closed, it can never be \ + invoked again. To proceed with closing, rerun the \ + `close` command with the `--bypass-warning` flag"; #[derive(Debug, PartialEq, Eq)] pub enum ProgramCliCommand { Deploy { program_location: Option, + fee_payer_signer_index: SignerIndex, program_signer_index: Option, program_pubkey: Option, buffer_signer_index: Option, @@ -82,8 +91,18 @@ pub enum ProgramCliCommand { allow_excessive_balance: bool, skip_fee_check: bool, }, + Upgrade { + fee_payer_signer_index: SignerIndex, + program_pubkey: Pubkey, + buffer_pubkey: Pubkey, + upgrade_authority_signer_index: SignerIndex, + sign_only: bool, + dump_transaction_message: bool, + blockhash_query: BlockhashQuery, + }, WriteBuffer { program_location: String, + fee_payer_signer_index: SignerIndex, buffer_signer_index: Option, buffer_pubkey: Option, buffer_authority_signer_index: SignerIndex, @@ -124,6 +143,10 @@ pub enum ProgramCliCommand { use_lamports_unit: bool, bypass_warning: bool, }, + ExtendProgram { + program_pubkey: Pubkey, + additional_bytes: u32, + }, } pub trait ProgramSubCommands { @@ -141,7 +164,7 @@ impl ProgramSubCommands for App<'_, '_> { .long("skip-fee-check") .hidden(hidden_unless_forced()) .takes_value(false) - .global(true) + .global(true), ) .subcommand( SubCommand::with_name("deploy") @@ -153,14 +176,17 @@ impl ProgramSubCommands for App<'_, '_> { .takes_value(true) .help("/path/to/program.so"), ) + .arg(fee_payer_arg()) .arg( Arg::with_name("buffer") .long("buffer") .value_name("BUFFER_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Intermediate buffer account to write data to, which can be used to resume a failed deploy \ - [default: random address]") + .help( + "Intermediate buffer account to write data to, which can be \ + used to resume a failed deploy [default: random address]", + ), ) .arg( Arg::with_name("upgrade_authority") @@ -168,19 +194,22 @@ impl ProgramSubCommands for App<'_, '_> { .value_name("UPGRADE_AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Upgrade authority [default: the default configured keypair]") + .help( + "Upgrade authority [default: the default configured keypair]", + ), ) - .arg( - pubkey!(Arg::with_name("program_id") + .arg(pubkey!( + Arg::with_name("program_id") .long("program-id") .value_name("PROGRAM_ID"), - "Executable program's address, must be a keypair for initial deploys, can be a pubkey for upgrades \ - [default: address of keypair at /path/to/program-keypair.json if present, otherwise a random address]"), - ) + "Executable program; must be a signer for initial deploys, \ + can be an address for upgrades [default: address of keypair at \ + /path/to/program-keypair.json if present, otherwise a random address]." + )) .arg( Arg::with_name("final") .long("final") - .help("The program will not be upgradeable") + .help("The program will not be upgradeable"), ) .arg( Arg::with_name("max_len") @@ -188,16 +217,51 @@ impl ProgramSubCommands for App<'_, '_> { .value_name("max_len") .takes_value(true) .required(false) - .help("Maximum length of the upgradeable program \ - [default: twice the length of the original deployed program]") + .help( + "Maximum length of the upgradeable program \ + [default: twice the length of the original deployed program]", + ), ) .arg( Arg::with_name("allow_excessive_balance") .long("allow-excessive-deploy-account-balance") .takes_value(false) - .help("Use the designated program id even if the account already holds a large balance of SOL") + .help( + "Use the designated program id even if the account already \ + holds a large balance of SOL", + ), ), ) + .subcommand( + SubCommand::with_name("upgrade") + .about("Upgrade an upgradeable program") + .arg(pubkey!( + Arg::with_name("buffer") + .index(1) + .required(true) + .value_name("BUFFER_PUBKEY"), + "Intermediate buffer account with new program data" + )) + .arg(pubkey!( + Arg::with_name("program_id") + .index(2) + .required(true) + .value_name("PROGRAM_ID"), + "Executable program's address (pubkey)" + )) + .arg(fee_payer_arg()) + .arg( + Arg::with_name("upgrade_authority") + .long("upgrade-authority") + .value_name("UPGRADE_AUTHORITY_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help( + "Upgrade authority [default: the default configured keypair]", + ), + ) + .offline_args(), + ) .subcommand( SubCommand::with_name("write-buffer") .about("Writes a program into a buffer account") @@ -209,13 +273,16 @@ impl ProgramSubCommands for App<'_, '_> { .required(true) .help("/path/to/program.so"), ) + .arg(fee_payer_arg()) .arg( Arg::with_name("buffer") .long("buffer") .value_name("BUFFER_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Buffer account to write data into [default: random address]") + .help( + "Buffer account to write data into [default: random address]", + ), ) .arg( Arg::with_name("buffer_authority") @@ -223,7 +290,7 @@ impl ProgramSubCommands for App<'_, '_> { .value_name("BUFFER_AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Buffer authority [default: the default configured keypair]") + .help("Buffer authority [default: the default configured keypair]"), ) .arg( Arg::with_name("max_len") @@ -231,8 +298,10 @@ impl ProgramSubCommands for App<'_, '_> { .value_name("max_len") .takes_value(true) .required(false) - .help("Maximum length of the upgradeable program \ - [default: twice the length of the original deployed program]") + .help( + "Maximum length of the upgradeable program \ + [default: twice the length of the original deployed program]", + ), ), ) .subcommand( @@ -244,7 +313,7 @@ impl ProgramSubCommands for App<'_, '_> { .value_name("BUFFER_PUBKEY") .takes_value(true) .required(true) - .help("Public key of the buffer") + .help("Public key of the buffer"), ) .arg( Arg::with_name("buffer_authority") @@ -252,15 +321,15 @@ impl ProgramSubCommands for App<'_, '_> { .value_name("BUFFER_AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Buffer authority [default: the default configured keypair]") + .help("Buffer authority [default: the default configured keypair]"), ) - .arg( - pubkey!(Arg::with_name("new_buffer_authority") + .arg(pubkey!( + Arg::with_name("new_buffer_authority") .long("new-buffer-authority") .value_name("NEW_BUFFER_AUTHORITY") .required(true), - "Address of the new buffer authority"), - ) + "New buffer authority." + )), ) .subcommand( SubCommand::with_name("set-upgrade-authority") @@ -271,7 +340,7 @@ impl ProgramSubCommands for App<'_, '_> { .value_name("PROGRAM_ADDRESS") .takes_value(true) .required(true) - .help("Address of the program to upgrade") + .help("Address of the program to upgrade"), ) .arg( Arg::with_name("upgrade_authority") @@ -279,7 +348,9 @@ impl ProgramSubCommands for App<'_, '_> { .value_name("UPGRADE_AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Upgrade authority [default: the default configured keypair]") + .help( + "Upgrade authority [default: the default configured keypair]", + ), ) .arg( Arg::with_name("new_upgrade_authority") @@ -287,21 +358,32 @@ impl ProgramSubCommands for App<'_, '_> { .value_name("NEW_UPGRADE_AUTHORITY") .required_unless("final") .takes_value(true) - .help("New upgrade authority (keypair or pubkey). It is strongly recommended to pass in a keypair to prevent mistakes in setting the upgrade authority. You can opt out of this behavior by passing --skip-new-upgrade-authority-signer-check if you are really confident that you are setting the correct authority. Alternatively, If you wish to make the program immutable, you should ignore this arg and pass the --final flag." - ) + .help( + "New upgrade authority (keypair or pubkey). It is strongly \ + recommended to pass in a keypair to prevent mistakes in \ + setting the upgrade authority. You can opt out of this \ + behavior by passing \ + --skip-new-upgrade-authority-signer-check if you are really \ + confident that you are setting the correct authority. \ + Alternatively, If you wish to make the program immutable, \ + you should ignore this arg and pass the --final flag.", + ), ) .arg( Arg::with_name("final") .long("final") .conflicts_with("new_upgrade_authority") - .help("The program will not be upgradeable") + .help("The program will not be upgradeable"), ) .arg( Arg::with_name("skip_new_upgrade_authority_signer_check") .long("skip-new-upgrade-authority-signer-check") .requires("new_upgrade_authority") .takes_value(false) - .help("Set this flag if you don't want the new authority to sign the set-upgrade-authority transaction."), + .help( + "Set this flag if you don't want the new authority to sign \ + the set-upgrade-authority transaction.", + ), ), ) .subcommand( @@ -312,7 +394,7 @@ impl ProgramSubCommands for App<'_, '_> { .index(1) .value_name("ACCOUNT_ADDRESS") .takes_value(true) - .help("Address of the buffer or program to show") + .help("Address of the buffer or program to show"), ) .arg( Arg::with_name("programs") @@ -320,7 +402,7 @@ impl ProgramSubCommands for App<'_, '_> { .conflicts_with("account") .conflicts_with("buffers") .required_unless_one(&["account", "buffers"]) - .help("Show every upgradeable program that matches the authority") + .help("Show every upgradeable program that matches the authority"), ) .arg( Arg::with_name("buffers") @@ -328,22 +410,22 @@ impl ProgramSubCommands for App<'_, '_> { .conflicts_with("account") .conflicts_with("programs") .required_unless_one(&["account", "programs"]) - .help("Show every upgradeable buffer that matches the authority") + .help("Show every upgradeable buffer that matches the authority"), ) .arg( Arg::with_name("all") .long("all") .conflicts_with("account") .conflicts_with("buffer_authority") - .help("Show accounts for all authorities") + .help("Show accounts for all authorities"), ) - .arg( - pubkey!(Arg::with_name("buffer_authority") + .arg(pubkey!( + Arg::with_name("buffer_authority") .long("buffer-authority") .value_name("AUTHORITY") .conflicts_with("all"), - "Authority [default: the default configured keypair]"), - ) + "Authority [default: the default configured keypair]." + )) .arg( Arg::with_name("lamports") .long("lamports") @@ -360,7 +442,7 @@ impl ProgramSubCommands for App<'_, '_> { .value_name("ACCOUNT_ADDRESS") .takes_value(true) .required(true) - .help("Address of the buffer or program") + .help("Address of the buffer or program"), ) .arg( Arg::with_name("output_location") @@ -386,7 +468,7 @@ impl ProgramSubCommands for App<'_, '_> { .long("buffers") .conflicts_with("account") .required_unless("account") - .help("Close all buffer accounts that match the authority") + .help("Close all buffer accounts that match the authority"), ) .arg( Arg::with_name("authority") @@ -395,15 +477,18 @@ impl ProgramSubCommands for App<'_, '_> { .value_name("AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Upgrade or buffer authority [default: the default configured keypair]") + .help( + "Upgrade or buffer authority [default: the default configured \ + keypair]", + ), ) - - .arg( - pubkey!(Arg::with_name("recipient_account") + .arg(pubkey!( + Arg::with_name("recipient_account") .long("recipient") .value_name("RECIPIENT_ADDRESS"), - "Address of the account to deposit the closed account's lamports [default: the default configured keypair]"), - ) + "Recipient of closed account's lamports \ + [default: the default configured keypair]." + )) .arg( Arg::with_name("lamports") .long("lamports") @@ -417,11 +502,41 @@ impl ProgramSubCommands for App<'_, '_> { .help("Bypass the permanent program closure warning"), ), ) + .subcommand( + SubCommand::with_name("extend") + .about( + "Extend the length of an upgradeable program to deploy larger programs", + ) + .arg( + Arg::with_name("program_id") + .index(1) + .value_name("PROGRAM_ID") + .takes_value(true) + .required(true) + .validator(is_valid_pubkey) + .help("Address of the program to extend"), + ) + .arg( + Arg::with_name("additional_bytes") + .index(2) + .value_name("ADDITIONAL_BYTES") + .takes_value(true) + .required(true) + .validator(is_parsable::) + .help( + "Number of bytes that will be allocated for the program's \ + data account", + ), + ), + ), ) .subcommand( SubCommand::with_name("deploy") - .about("Deploy has been removed. Use `solana program deploy` instead to deploy upgradeable programs") - .setting(AppSettings::Hidden) + .about( + "Deploy has been removed. Use `solana program deploy` instead to deploy \ + upgradeable programs", + ) + .setting(AppSettings::Hidden), ) } } @@ -440,9 +555,13 @@ pub fn parse_program_subcommand( let response = match (subcommand, sub_matches) { ("deploy", Some(matches)) => { - let mut bulk_signers = vec![Some( - default_signer.signer_from_path(matches, wallet_manager)?, - )]; + let (fee_payer, fee_payer_pubkey) = + signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?; + + let mut bulk_signers = vec![ + Some(default_signer.signer_from_path(matches, wallet_manager)?), + fee_payer, // if None, default signer will be supplied + ]; let program_location = matches .value_of("program_location") @@ -478,6 +597,7 @@ pub fn parse_program_subcommand( CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location, + fee_payer_signer_index: signer_info.index_of(fee_payer_pubkey).unwrap(), program_signer_index: signer_info.index_of_or_none(program_pubkey), program_pubkey, buffer_signer_index: signer_info.index_of_or_none(buffer_pubkey), @@ -493,10 +613,55 @@ pub fn parse_program_subcommand( signers: signer_info.signers, } } + ("upgrade", Some(matches)) => { + let sign_only = matches.is_present(SIGN_ONLY_ARG.name); + let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name); + let blockhash_query = BlockhashQuery::new_from_matches(matches); + + let buffer_pubkey = pubkey_of_signer(matches, "buffer", wallet_manager) + .unwrap() + .unwrap(); + let program_pubkey = pubkey_of_signer(matches, "program_id", wallet_manager) + .unwrap() + .unwrap(); + + let (fee_payer, fee_payer_pubkey) = + signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?; + + let mut bulk_signers = vec![ + fee_payer, // if None, default signer will be supplied + ]; + + let (upgrade_authority, upgrade_authority_pubkey) = + signer_of(matches, "upgrade_authority", wallet_manager)?; + bulk_signers.push(upgrade_authority); + + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + + CliCommandInfo { + command: CliCommand::Program(ProgramCliCommand::Upgrade { + fee_payer_signer_index: signer_info.index_of(fee_payer_pubkey).unwrap(), + program_pubkey, + buffer_pubkey, + upgrade_authority_signer_index: signer_info + .index_of(upgrade_authority_pubkey) + .unwrap(), + sign_only, + dump_transaction_message, + blockhash_query, + }), + signers: signer_info.signers, + } + } ("write-buffer", Some(matches)) => { - let mut bulk_signers = vec![Some( - default_signer.signer_from_path(matches, wallet_manager)?, - )]; + let (fee_payer, fee_payer_pubkey) = + signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?; + + let mut bulk_signers = vec![ + Some(default_signer.signer_from_path(matches, wallet_manager)?), + fee_payer, // if None, default signer will be supplied + ]; let buffer_pubkey = if let Ok((buffer_signer, Some(buffer_pubkey))) = signer_of(matches, "buffer", wallet_manager) @@ -519,6 +684,7 @@ pub fn parse_program_subcommand( CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: matches.value_of("program_location").unwrap().to_string(), + fee_payer_signer_index: signer_info.index_of(fee_payer_pubkey).unwrap(), buffer_signer_index: signer_info.index_of_or_none(buffer_pubkey), buffer_pubkey, buffer_authority_signer_index: signer_info @@ -675,6 +841,26 @@ pub fn parse_program_subcommand( signers: signer_info.signers, } } + ("extend", Some(matches)) => { + let program_pubkey = pubkey_of(matches, "program_id").unwrap(); + let additional_bytes = value_of(matches, "additional_bytes").unwrap(); + + let signer_info = default_signer.generate_unique_signers( + vec![Some( + default_signer.signer_from_path(matches, wallet_manager)?, + )], + matches, + wallet_manager, + )?; + + CliCommandInfo { + command: CliCommand::Program(ProgramCliCommand::ExtendProgram { + program_pubkey, + additional_bytes, + }), + signers: signer_info.signers, + } + } _ => unreachable!(), }; Ok(response) @@ -688,6 +874,7 @@ pub fn process_program_subcommand( match program_subcommand { ProgramCliCommand::Deploy { program_location, + fee_payer_signer_index, program_signer_index, program_pubkey, buffer_signer_index, @@ -701,6 +888,7 @@ pub fn process_program_subcommand( rpc_client, config, program_location, + *fee_payer_signer_index, *program_signer_index, *program_pubkey, *buffer_signer_index, @@ -711,8 +899,28 @@ pub fn process_program_subcommand( *allow_excessive_balance, *skip_fee_check, ), + ProgramCliCommand::Upgrade { + fee_payer_signer_index, + program_pubkey, + buffer_pubkey, + upgrade_authority_signer_index, + sign_only, + dump_transaction_message, + blockhash_query, + } => process_program_upgrade( + rpc_client, + config, + *fee_payer_signer_index, + *program_pubkey, + *buffer_pubkey, + *upgrade_authority_signer_index, + *sign_only, + *dump_transaction_message, + blockhash_query, + ), ProgramCliCommand::WriteBuffer { program_location, + fee_payer_signer_index, buffer_signer_index, buffer_pubkey, buffer_authority_signer_index, @@ -722,6 +930,7 @@ pub fn process_program_subcommand( rpc_client, config, program_location, + *fee_payer_signer_index, *buffer_signer_index, *buffer_pubkey, *buffer_authority_signer_index, @@ -799,6 +1008,10 @@ pub fn process_program_subcommand( *use_lamports_unit, *bypass_warning, ), + ProgramCliCommand::ExtendProgram { + program_pubkey, + additional_bytes, + } => process_extend_program(&rpc_client, config, *program_pubkey, *additional_bytes), } } @@ -823,12 +1036,22 @@ fn get_default_program_keypair(program_location: &Option) -> Keypair { program_keypair } -/// Deploy using upgradeable loader +fn is_account_executable(account: &Account) -> bool { + if account.owner == bpf_loader_deprecated::id() || account.owner == bpf_loader::id() { + account.executable + } else { + let feature_set = FeatureSet::all_enabled(); + is_executable(account, &feature_set) + } +} + +/// Deploy program using upgradeable loader. It also can process program upgrades #[allow(clippy::too_many_arguments)] fn process_program_deploy( rpc_client: Arc, config: &CliConfig, program_location: &Option, + fee_payer_signer_index: SignerIndex, program_signer_index: Option, program_pubkey: Option, buffer_signer_index: Option, @@ -839,7 +1062,10 @@ fn process_program_deploy( allow_excessive_balance: bool, skip_fee_check: bool, ) -> ProcessResult { - let (words, mnemonic, buffer_keypair) = create_ephemeral_keypair()?; + let fee_payer_signer = config.signers[fee_payer_signer_index]; + let upgrade_authority_signer = config.signers[upgrade_authority_signer_index]; + + let (buffer_words, buffer_mnemonic, buffer_keypair) = create_ephemeral_keypair()?; let (buffer_provided, buffer_signer, buffer_pubkey) = if let Some(i) = buffer_signer_index { (true, Some(config.signers[i]), config.signers[i].pubkey()) } else if let Some(pubkey) = buffer_pubkey { @@ -851,7 +1077,6 @@ fn process_program_deploy( buffer_keypair.pubkey(), ) }; - let upgrade_authority_signer = config.signers[upgrade_authority_signer_index]; let default_program_keypair = get_default_program_keypair(program_location); let (program_signer, program_pubkey) = if let Some(i) = program_signer_index { @@ -865,7 +1090,7 @@ fn process_program_deploy( ) }; - let do_deploy = if let Some(account) = rpc_client + let do_initial_deploy = if let Some(account) = rpc_client .get_account_with_commitment(&program_pubkey, config.commitment)? .value { @@ -876,7 +1101,7 @@ fn process_program_deploy( .into()); } - if !account.executable { + if !is_account_executable(&account) { // Continue an initial deploy true } else if let Ok(UpgradeableLoaderState::Program { @@ -932,61 +1157,18 @@ fn process_program_deploy( let program_len = program_data.len(); (program_data, program_len) } else if buffer_provided { - // Check supplied buffer account - if let Some(account) = rpc_client - .get_account_with_commitment(&buffer_pubkey, config.commitment)? - .value - { - if !bpf_loader_upgradeable::check_id(&account.owner) { - return Err(format!( - "Buffer account {buffer_pubkey} is not owned by the BPF Upgradeable Loader", - ) - .into()); - } - - match account.state() { - Ok(UpgradeableLoaderState::Buffer { .. }) => { - // continue if buffer is initialized - } - Ok(UpgradeableLoaderState::Program { .. }) => { - return Err( - format!("Cannot use program account {buffer_pubkey} as buffer").into(), - ); - } - Ok(UpgradeableLoaderState::ProgramData { .. }) => { - return Err(format!( - "Cannot use program data account {buffer_pubkey} as buffer", - ) - .into()) - } - Ok(UpgradeableLoaderState::Uninitialized) => { - return Err(format!("Buffer account {buffer_pubkey} is not initialized").into()); - } - Err(_) => { - return Err( - format!("Buffer account {buffer_pubkey} could not be deserialized").into(), - ) - } - }; - - let program_len = account - .data - .len() - .saturating_sub(UpgradeableLoaderState::size_of_buffer_metadata()); - - (vec![], program_len) - } else { - return Err(format!( - "Buffer account {buffer_pubkey} not found, was it already consumed?", - ) - .into()); - } + ( + vec![], + fetch_buffer_len(&rpc_client, config, buffer_pubkey)?, + ) } else { return Err("Program location required if buffer not supplied".into()); }; - let programdata_len = if let Some(len) = max_len { + let program_data_max_len = if let Some(len) = max_len { if program_len > len { - return Err("Max length specified not large enough".into()); + return Err( + "Max length specified not large enough to accommodate desired program".into(), + ); } len } else if is_final { @@ -994,11 +1176,12 @@ fn process_program_deploy( } else { program_len * 2 }; - let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption( - UpgradeableLoaderState::size_of_programdata(programdata_len), + + let min_rent_exempt_program_data_balance = rpc_client.get_minimum_balance_for_rent_exemption( + UpgradeableLoaderState::size_of_programdata(program_data_max_len), )?; - let result = if do_deploy { + let result = if do_initial_deploy { if program_signer.is_none() { return Err( "Initial deployments require a keypair be provided for the program id".into(), @@ -1009,9 +1192,10 @@ fn process_program_deploy( config, &program_data, program_len, - programdata_len, - minimum_balance, + program_data_max_len, + min_rent_exempt_program_data_balance, &bpf_loader_upgradeable::id(), + fee_payer_signer, Some(&[program_signer.unwrap(), upgrade_authority_signer]), buffer_signer, &buffer_pubkey, @@ -1024,8 +1208,11 @@ fn process_program_deploy( rpc_client.clone(), config, &program_data, + program_len, + min_rent_exempt_program_data_balance, + fee_payer_signer, &program_pubkey, - config.signers[upgrade_authority_signer_index], + upgrade_authority_signer, &buffer_pubkey, buffer_signer, skip_fee_check, @@ -1041,22 +1228,141 @@ fn process_program_deploy( None, )?; } - if result.is_err() && buffer_signer_index.is_none() { - report_ephemeral_mnemonic(words, mnemonic); + if result.is_err() && !buffer_provided { + // We might have deployed "temporary" buffer but failed to deploy our program from this + // buffer, reporting this to the user - so he can retry deploying re-using same buffer. + report_ephemeral_mnemonic(buffer_words, buffer_mnemonic); } result } +fn fetch_buffer_len( + rpc_client: &RpcClient, + config: &CliConfig, + buffer_pubkey: Pubkey, +) -> Result> { + // Check supplied buffer account + if let Some(account) = rpc_client + .get_account_with_commitment(&buffer_pubkey, config.commitment)? + .value + { + if !bpf_loader_upgradeable::check_id(&account.owner) { + return Err(format!( + "Buffer account {buffer_pubkey} is not owned by the BPF Upgradeable Loader", + ) + .into()); + } + + match account.state() { + Ok(UpgradeableLoaderState::Buffer { .. }) => { + // continue if buffer is initialized + } + Ok(UpgradeableLoaderState::Program { .. }) => { + return Err(format!("Cannot use program account {buffer_pubkey} as buffer").into()); + } + Ok(UpgradeableLoaderState::ProgramData { .. }) => { + return Err( + format!("Cannot use program data account {buffer_pubkey} as buffer",).into(), + ) + } + Ok(UpgradeableLoaderState::Uninitialized) => { + return Err(format!("Buffer account {buffer_pubkey} is not initialized").into()); + } + Err(_) => { + return Err( + format!("Buffer account {buffer_pubkey} could not be deserialized").into(), + ) + } + }; + + let program_len = account + .data + .len() + .saturating_sub(UpgradeableLoaderState::size_of_buffer_metadata()); + + Ok(program_len) + } else { + Err(format!("Buffer account {buffer_pubkey} not found, was it already consumed?",).into()) + } +} + +/// Upgrade existing program using upgradeable loader +#[allow(clippy::too_many_arguments)] +fn process_program_upgrade( + rpc_client: Arc, + config: &CliConfig, + fee_payer_signer_index: SignerIndex, + program_id: Pubkey, + buffer_pubkey: Pubkey, + upgrade_authority_signer_index: SignerIndex, + sign_only: bool, + dump_transaction_message: bool, + blockhash_query: &BlockhashQuery, +) -> ProcessResult { + let fee_payer_signer = config.signers[fee_payer_signer_index]; + let upgrade_authority_signer = config.signers[upgrade_authority_signer_index]; + + let blockhash = blockhash_query.get_blockhash(&rpc_client, config.commitment)?; + let message = Message::new_with_blockhash( + &[bpf_loader_upgradeable::upgrade( + &program_id, + &buffer_pubkey, + &upgrade_authority_signer.pubkey(), + &fee_payer_signer.pubkey(), + )], + Some(&fee_payer_signer.pubkey()), + &blockhash, + ); + + if sign_only { + let mut tx = Transaction::new_unsigned(message); + let signers = &[fee_payer_signer, upgrade_authority_signer]; + // Using try_partial_sign here because fee_payer_signer might not be the fee payer we + // end up using for this transaction (it might be NullSigner in `--sign-only` mode). + tx.try_partial_sign(signers, blockhash)?; + return_signers_with_config( + &tx, + &config.output_format, + &ReturnSignersConfig { + dump_transaction_message, + }, + ) + } else { + let fee = rpc_client.get_fee_for_message(&message)?; + check_account_for_spend_and_fee_with_commitment( + &rpc_client, + &fee_payer_signer.pubkey(), + 0, + fee, + config.commitment, + )?; + let mut tx = Transaction::new_unsigned(message); + let signers = &[fee_payer_signer, upgrade_authority_signer]; + tx.try_sign(signers, blockhash)?; + rpc_client + .send_and_confirm_transaction_with_spinner(&tx) + .map_err(|e| format!("Upgrading program failed: {e}"))?; + let program_id = CliProgramId { + program_id: program_id.to_string(), + }; + Ok(config.output_format.formatted_string(&program_id)) + } +} + fn process_write_buffer( rpc_client: Arc, config: &CliConfig, program_location: &str, + fee_payer_signer_index: SignerIndex, buffer_signer_index: Option, buffer_pubkey: Option, buffer_authority_signer_index: SignerIndex, max_len: Option, skip_fee_check: bool, ) -> ProcessResult { + let fee_payer_signer = config.signers[fee_payer_signer_index]; + let buffer_authority = config.signers[buffer_authority_signer_index]; + // Create ephemeral keypair to use for Buffer account, if not provided let (words, mnemonic, buffer_keypair) = create_ephemeral_keypair()?; let (buffer_signer, buffer_pubkey) = if let Some(i) = buffer_signer_index { @@ -1069,7 +1375,6 @@ fn process_write_buffer( buffer_keypair.pubkey(), ) }; - let buffer_authority = config.signers[buffer_authority_signer_index]; if let Some(account) = rpc_client .get_account_with_commitment(&buffer_pubkey, config.commitment)? @@ -1095,13 +1400,13 @@ fn process_write_buffer( } let program_data = read_and_verify_elf(program_location)?; - let buffer_data_len = if let Some(len) = max_len { + let buffer_data_max_len = if let Some(len) = max_len { len } else { program_data.len() }; - let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption( - UpgradeableLoaderState::size_of_programdata(buffer_data_len), + let min_rent_exempt_program_data_balance = rpc_client.get_minimum_balance_for_rent_exemption( + UpgradeableLoaderState::size_of_programdata(buffer_data_max_len), )?; let result = do_process_program_write_and_deploy( @@ -1109,9 +1414,10 @@ fn process_write_buffer( config, &program_data, program_data.len(), - program_data.len(), - minimum_balance, + buffer_data_max_len, + min_rent_exempt_program_data_balance, &bpf_loader_upgradeable::id(), + fee_payer_signer, None, buffer_signer, &buffer_pubkey, @@ -1119,7 +1425,6 @@ fn process_write_buffer( true, skip_fee_check, ); - if result.is_err() && buffer_signer_index.is_none() && buffer_signer.is_some() { report_ephemeral_mnemonic(words, mnemonic); } @@ -1331,7 +1636,8 @@ fn get_programs( let results = get_accounts_with_filter(rpc_client, filters, 0)?; if results.len() != 1 { return Err(format!( - "Error: More than one Program associated with ProgramData account {programdata_address}" + "Error: More than one Program associated with ProgramData account \ + {programdata_address}" ) .into()); } @@ -1716,6 +2022,100 @@ fn process_close( } } +fn process_extend_program( + rpc_client: &RpcClient, + config: &CliConfig, + program_pubkey: Pubkey, + additional_bytes: u32, +) -> ProcessResult { + let payer_pubkey = config.signers[0].pubkey(); + + if additional_bytes == 0 { + return Err("Additional bytes must be greater than zero".into()); + } + + let program_account = match rpc_client + .get_account_with_commitment(&program_pubkey, config.commitment)? + .value + { + Some(program_account) => Ok(program_account), + None => Err(format!("Unable to find program {program_pubkey}")), + }?; + + if !bpf_loader_upgradeable::check_id(&program_account.owner) { + return Err(format!("Account {program_pubkey} is not an upgradeable program").into()); + } + + let programdata_pubkey = match program_account.state() { + Ok(UpgradeableLoaderState::Program { + programdata_address: programdata_pubkey, + }) => Ok(programdata_pubkey), + _ => Err(format!( + "Account {program_pubkey} is not an upgradeable program" + )), + }?; + + let programdata_account = match rpc_client + .get_account_with_commitment(&programdata_pubkey, config.commitment)? + .value + { + Some(programdata_account) => Ok(programdata_account), + None => Err(format!("Program {program_pubkey} is closed")), + }?; + + let upgrade_authority_address = match programdata_account.state() { + Ok(UpgradeableLoaderState::ProgramData { + slot: _, + upgrade_authority_address, + }) => Ok(upgrade_authority_address), + _ => Err(format!("Program {program_pubkey} is closed")), + }?; + + match upgrade_authority_address { + None => Err(format!("Program {program_pubkey} is not upgradeable")), + _ => Ok(()), + }?; + + let blockhash = rpc_client.get_latest_blockhash()?; + + let mut tx = Transaction::new_unsigned(Message::new( + &[bpf_loader_upgradeable::extend_program( + &program_pubkey, + Some(&payer_pubkey), + additional_bytes, + )], + Some(&payer_pubkey), + )); + + tx.try_sign(&[config.signers[0]], blockhash)?; + let result = rpc_client.send_and_confirm_transaction_with_spinner_and_config( + &tx, + config.commitment, + RpcSendTransactionConfig { + preflight_commitment: Some(config.commitment.commitment), + ..RpcSendTransactionConfig::default() + }, + ); + if let Err(err) = result { + if let ClientErrorKind::TransactionError(TransactionError::InstructionError( + _, + InstructionError::InvalidInstructionData, + )) = err.kind() + { + return Err("Extending a program is not supported by the cluster".into()); + } else { + return Err(format!("Extend program failed: {err}").into()); + } + } + + Ok(config + .output_format + .formatted_string(&CliUpgradeableProgramExtended { + program_id: program_pubkey.to_string(), + additional_bytes, + })) +} + pub fn calculate_max_chunk_size(create_msg: &F) -> usize where F: Fn(u32, Vec) -> Message, @@ -1737,11 +2137,12 @@ where fn do_process_program_write_and_deploy( rpc_client: Arc, config: &CliConfig, - program_data: &[u8], + program_data: &[u8], // can be empty, hence we have program_len program_len: usize, - programdata_len: usize, - minimum_balance: u64, + program_data_max_len: usize, + min_rent_exempt_program_data_balance: u64, loader_id: &Pubkey, + fee_payer_signer: &dyn Signer, program_signers: Option<&[&dyn Signer]>, buffer_signer: Option<&dyn Signer>, buffer_pubkey: &Pubkey, @@ -1758,7 +2159,7 @@ fn do_process_program_write_and_deploy( { complete_partial_program_init( loader_id, - &config.signers[0].pubkey(), + &fee_payer_signer.pubkey(), buffer_pubkey, &account, if loader_id == &bpf_loader_upgradeable::id() { @@ -1766,36 +2167,36 @@ fn do_process_program_write_and_deploy( } else { program_len }, - minimum_balance, + min_rent_exempt_program_data_balance, allow_excessive_balance, )? } else if loader_id == &bpf_loader_upgradeable::id() { ( bpf_loader_upgradeable::create_buffer( - &config.signers[0].pubkey(), + &fee_payer_signer.pubkey(), buffer_pubkey, &buffer_authority_signer.pubkey(), - minimum_balance, + min_rent_exempt_program_data_balance, program_len, )?, - minimum_balance, + min_rent_exempt_program_data_balance, ) } else { ( vec![system_instruction::create_account( - &config.signers[0].pubkey(), + &fee_payer_signer.pubkey(), buffer_pubkey, - minimum_balance, + min_rent_exempt_program_data_balance, program_len as u64, loader_id, )], - minimum_balance, + min_rent_exempt_program_data_balance, ) }; let initial_message = if !initial_instructions.is_empty() { Some(Message::new_with_blockhash( &initial_instructions, - Some(&config.signers[0].pubkey()), + Some(&fee_payer_signer.pubkey()), &blockhash, )) } else { @@ -1803,7 +2204,6 @@ fn do_process_program_write_and_deploy( }; // Create and add write messages - let payer_pubkey = config.signers[0].pubkey(); let create_msg = |offset: u32, bytes: Vec| { let instruction = if loader_id == &bpf_loader_upgradeable::id() { bpf_loader_upgradeable::write( @@ -1815,7 +2215,7 @@ fn do_process_program_write_and_deploy( } else { loader_instruction::write(buffer_pubkey, loader_id, offset, bytes) }; - Message::new_with_blockhash(&[instruction], Some(&payer_pubkey), &blockhash) + Message::new_with_blockhash(&[instruction], Some(&fee_payer_signer.pubkey()), &blockhash) }; let mut write_messages = vec![]; @@ -1829,22 +2229,22 @@ fn do_process_program_write_and_deploy( let message = if loader_id == &bpf_loader_upgradeable::id() { Message::new_with_blockhash( &bpf_loader_upgradeable::deploy_with_max_program_len( - &config.signers[0].pubkey(), + &fee_payer_signer.pubkey(), &program_signers[0].pubkey(), buffer_pubkey, &program_signers[1].pubkey(), rpc_client.get_minimum_balance_for_rent_exemption( UpgradeableLoaderState::size_of_program(), )?, - programdata_len, + program_data_max_len, )?, - Some(&config.signers[0].pubkey()), + Some(&fee_payer_signer.pubkey()), &blockhash, ) } else { Message::new_with_blockhash( &[loader_instruction::finalize(buffer_pubkey, loader_id)], - Some(&config.signers[0].pubkey()), + Some(&fee_payer_signer.pubkey()), &blockhash, ) }; @@ -1857,6 +2257,7 @@ fn do_process_program_write_and_deploy( check_payer( &rpc_client, config, + fee_payer_signer.pubkey(), balance_needed, &initial_message, &write_messages, @@ -1870,6 +2271,7 @@ fn do_process_program_write_and_deploy( &initial_message, &write_messages, &final_message, + fee_payer_signer, buffer_signer, Some(buffer_authority_signer), program_signers, @@ -1888,23 +2290,20 @@ fn do_process_program_write_and_deploy( } } +#[allow(clippy::too_many_arguments)] fn do_process_program_upgrade( rpc_client: Arc, config: &CliConfig, - program_data: &[u8], + program_data: &[u8], // can be empty, hence we have program_len + program_len: usize, + min_rent_exempt_program_data_balance: u64, + fee_payer_signer: &dyn Signer, program_id: &Pubkey, upgrade_authority: &dyn Signer, buffer_pubkey: &Pubkey, buffer_signer: Option<&dyn Signer>, skip_fee_check: bool, ) -> ProcessResult { - let loader_id = bpf_loader_upgradeable::id(); - let data_len = program_data.len(); - let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption( - UpgradeableLoaderState::size_of_programdata(data_len), - )?; - - // Build messages to calculate fees let blockhash = rpc_client.get_latest_blockhash()?; let (initial_message, write_messages, balance_needed) = @@ -1915,31 +2314,31 @@ fn do_process_program_upgrade( .value { complete_partial_program_init( - &loader_id, - &config.signers[0].pubkey(), + &bpf_loader_upgradeable::id(), + &fee_payer_signer.pubkey(), &buffer_signer.pubkey(), &account, - UpgradeableLoaderState::size_of_buffer(data_len), - minimum_balance, + UpgradeableLoaderState::size_of_buffer(program_len), + min_rent_exempt_program_data_balance, true, )? } else { ( bpf_loader_upgradeable::create_buffer( - &config.signers[0].pubkey(), + &fee_payer_signer.pubkey(), buffer_pubkey, &upgrade_authority.pubkey(), - minimum_balance, - data_len, + min_rent_exempt_program_data_balance, + program_len, )?, - minimum_balance, + min_rent_exempt_program_data_balance, ) }; let initial_message = if !initial_instructions.is_empty() { Some(Message::new_with_blockhash( &initial_instructions, - Some(&config.signers[0].pubkey()), + Some(&fee_payer_signer.pubkey()), &blockhash, )) } else { @@ -1948,7 +2347,6 @@ fn do_process_program_upgrade( let buffer_signer_pubkey = buffer_signer.pubkey(); let upgrade_authority_pubkey = upgrade_authority.pubkey(); - let payer_pubkey = config.signers[0].pubkey(); let create_msg = |offset: u32, bytes: Vec| { let instruction = bpf_loader_upgradeable::write( &buffer_signer_pubkey, @@ -1956,7 +2354,11 @@ fn do_process_program_upgrade( offset, bytes, ); - Message::new_with_blockhash(&[instruction], Some(&payer_pubkey), &blockhash) + Message::new_with_blockhash( + &[instruction], + Some(&fee_payer_signer.pubkey()), + &blockhash, + ) }; // Create and add write messages @@ -1977,9 +2379,9 @@ fn do_process_program_upgrade( program_id, buffer_pubkey, &upgrade_authority.pubkey(), - &config.signers[0].pubkey(), + &fee_payer_signer.pubkey(), )], - Some(&config.signers[0].pubkey()), + Some(&fee_payer_signer.pubkey()), &blockhash, ); let final_message = Some(final_message); @@ -1988,6 +2390,7 @@ fn do_process_program_upgrade( check_payer( &rpc_client, config, + fee_payer_signer.pubkey(), balance_needed, &initial_message, &write_messages, @@ -2001,6 +2404,7 @@ fn do_process_program_upgrade( &initial_message, &write_messages, &final_message, + fee_payer_signer, buffer_signer, Some(upgrade_authority), Some(&[upgrade_authority]), @@ -2049,7 +2453,7 @@ fn complete_partial_program_init( ) -> Result<(Vec, u64), Box> { let mut instructions: Vec = vec![]; let mut balance_needed = 0; - if account.executable { + if is_account_executable(account) { return Err("Buffer account is already executable".into()); } if account.owner != *loader_id && !system_program::check_id(&account.owner) { @@ -2093,6 +2497,7 @@ fn complete_partial_program_init( fn check_payer( rpc_client: &RpcClient, config: &CliConfig, + fee_payer_pubkey: Pubkey, balance_needed: u64, initial_message: &Option, write_messages: &[Message], @@ -2104,7 +2509,7 @@ fn check_payer( } if !write_messages.is_empty() { // Assume all write messages cost the same - if let Some(message) = write_messages.get(0) { + if let Some(message) = write_messages.first() { fee += rpc_client.get_fee_for_message(message)? * (write_messages.len() as u64); } } @@ -2113,7 +2518,7 @@ fn check_payer( } check_account_for_spend_and_fee_with_commitment( rpc_client, - &config.signers[0].pubkey(), + &fee_payer_pubkey, balance_needed, fee, config.commitment, @@ -2127,12 +2532,11 @@ fn send_deploy_messages( initial_message: &Option, write_messages: &[Message], final_message: &Option, + fee_payer_signer: &dyn Signer, initial_signer: Option<&dyn Signer>, write_signer: Option<&dyn Signer>, final_signers: Option<&[&dyn Signer]>, ) -> Result<(), Box> { - let payer_signer = config.signers[0]; - if let Some(message) = initial_message { if let Some(initial_signer) = initial_signer { trace!("Preparing the required accounts"); @@ -2144,9 +2548,9 @@ fn send_deploy_messages( // This check is to ensure signing does not fail on a KeypairPubkeyMismatch error from an // extraneous signature. if message.header.num_required_signatures == 2 { - initial_transaction.try_sign(&[payer_signer, initial_signer], blockhash)?; + initial_transaction.try_sign(&[fee_payer_signer, initial_signer], blockhash)?; } else { - initial_transaction.try_sign(&[payer_signer], blockhash)?; + initial_transaction.try_sign(&[fee_payer_signer], blockhash)?; } let result = rpc_client.send_and_confirm_transaction_with_spinner(&initial_transaction); log_instruction_custom_error::(result, config) @@ -2173,7 +2577,7 @@ fn send_deploy_messages( )? .send_and_confirm_messages_with_spinner( write_messages, - &[payer_signer, write_signer], + &[fee_payer_signer, write_signer], ), ConnectionCache::Quic(cache) => { let tpu_client_fut = solana_client::nonblocking::tpu_client::TpuClient::new_with_connection_cache( @@ -2191,7 +2595,7 @@ fn send_deploy_messages( rpc_client.clone(), Some(tpu_client), write_messages, - &[payer_signer, write_signer], + &[fee_payer_signer, write_signer], SendAndConfirmConfig { resign_txs_count: Some(5), with_spinner: true, @@ -2222,7 +2626,7 @@ fn send_deploy_messages( let mut final_tx = Transaction::new_unsigned(message.clone()); let mut signers = final_signers.to_vec(); - signers.push(payer_signer); + signers.push(fee_payer_signer); final_tx.try_sign(&signers, blockhash)?; rpc_client .send_and_confirm_transaction_with_spinner_and_config( @@ -2310,6 +2714,7 @@ mod tests { CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some("/Users/test/program.so".to_string()), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, program_signer_index: None, @@ -2337,6 +2742,7 @@ mod tests { CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some("/Users/test/program.so".to_string()), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, program_signer_index: None, @@ -2366,6 +2772,7 @@ mod tests { CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: None, + fee_payer_signer_index: 0, buffer_signer_index: Some(1), buffer_pubkey: Some(buffer_keypair.pubkey()), program_signer_index: None, @@ -2397,6 +2804,7 @@ mod tests { CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some("/Users/test/program.so".to_string()), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, program_signer_index: None, @@ -2427,6 +2835,7 @@ mod tests { CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some("/Users/test/program.so".to_string()), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, program_signer_index: Some(1), @@ -2460,6 +2869,7 @@ mod tests { CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some("/Users/test/program.so".to_string()), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, program_signer_index: None, @@ -2489,6 +2899,7 @@ mod tests { CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some("/Users/test/program.so".to_string()), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, program_signer_index: None, @@ -2526,6 +2937,7 @@ mod tests { CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: "/Users/test/program.so".to_string(), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, buffer_authority_signer_index: 0, @@ -2550,6 +2962,7 @@ mod tests { CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: "/Users/test/program.so".to_string(), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, buffer_authority_signer_index: 0, @@ -2577,6 +2990,7 @@ mod tests { CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: "/Users/test/program.so".to_string(), + fee_payer_signer_index: 0, buffer_signer_index: Some(1), buffer_pubkey: Some(buffer_keypair.pubkey()), buffer_authority_signer_index: 0, @@ -2607,6 +3021,7 @@ mod tests { CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: "/Users/test/program.so".to_string(), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, buffer_authority_signer_index: 1, @@ -2642,6 +3057,7 @@ mod tests { CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: "/Users/test/program.so".to_string(), + fee_payer_signer_index: 0, buffer_signer_index: Some(1), buffer_pubkey: Some(buffer_keypair.pubkey()), buffer_authority_signer_index: 2, @@ -3115,6 +3531,38 @@ mod tests { ); } + #[test] + fn test_cli_parse_extend_program() { + let test_commands = get_clap_app("test", "desc", "version"); + + let default_keypair = Keypair::new(); + let keypair_file = make_tmp_path("keypair_file"); + write_keypair_file(&default_keypair, &keypair_file).unwrap(); + let default_signer = DefaultSigner::new("", &keypair_file); + + // defaults + let program_pubkey = Pubkey::new_unique(); + let additional_bytes = 100; + + let test_command = test_commands.clone().get_matches_from(vec![ + "test", + "program", + "extend", + &program_pubkey.to_string(), + &additional_bytes.to_string(), + ]); + assert_eq!( + parse_command(&test_command, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::Program(ProgramCliCommand::ExtendProgram { + program_pubkey, + additional_bytes + }), + signers: vec![read_keypair_file(&keypair_file).unwrap().into()], + } + ); + } + #[test] fn test_cli_keypair_file() { solana_logger::setup(); @@ -3139,6 +3587,7 @@ mod tests { rpc_client: Some(Arc::new(RpcClient::new_mock("".to_string()))), command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(program_location.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, program_signer_index: None, diff --git a/cli/src/program_v4.rs b/cli/src/program_v4.rs index 324f3040b83d4c..a96b227ef85312 100644 --- a/cli/src/program_v4.rs +++ b/cli/src/program_v4.rs @@ -114,7 +114,10 @@ impl ProgramV4SubCommands for App<'_, '_> { .value_name("PROGRAM_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Program account signer. The program data is written to the associated account.") + .help( + "Program account signer. The program data is written to the \ + associated account.", + ), ) .arg( Arg::with_name("authority") @@ -122,7 +125,9 @@ impl ProgramV4SubCommands for App<'_, '_> { .value_name("AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Program authority [default: the default configured keypair]") + .help( + "Program authority [default: the default configured keypair]", + ), ), ) .subcommand( @@ -140,7 +145,7 @@ impl ProgramV4SubCommands for App<'_, '_> { .long("program-id") .value_name("PROGRAM_ID") .takes_value(true) - .help("Executable program's address") + .help("Executable program's address"), ) .arg( Arg::with_name("buffer") @@ -148,7 +153,10 @@ impl ProgramV4SubCommands for App<'_, '_> { .value_name("BUFFER_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Optional intermediate buffer account to write data to, which can be used to resume a failed deploy") + .help( + "Optional intermediate buffer account to write data to, which \ + can be used to resume a failed deploy", + ), ) .arg( Arg::with_name("authority") @@ -156,7 +164,9 @@ impl ProgramV4SubCommands for App<'_, '_> { .value_name("AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Program authority [default: the default configured keypair]") + .help( + "Program authority [default: the default configured keypair]", + ), ), ) .subcommand( @@ -167,7 +177,7 @@ impl ProgramV4SubCommands for App<'_, '_> { .long("program-id") .value_name("PROGRAM_ID") .takes_value(true) - .help("Executable program's address") + .help("Executable program's address"), ) .arg( Arg::with_name("authority") @@ -175,7 +185,9 @@ impl ProgramV4SubCommands for App<'_, '_> { .value_name("AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Program authority [default: the default configured keypair]") + .help( + "Program authority [default: the default configured keypair]", + ), ), ) .subcommand( @@ -186,7 +198,7 @@ impl ProgramV4SubCommands for App<'_, '_> { .long("program-id") .value_name("PROGRAM_ID") .takes_value(true) - .help("Executable program's address") + .help("Executable program's address"), ) .arg( Arg::with_name("authority") @@ -194,7 +206,9 @@ impl ProgramV4SubCommands for App<'_, '_> { .value_name("AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Program authority [default: the default configured keypair]") + .help( + "Program authority [default: the default configured keypair]", + ), ), ) .subcommand( @@ -205,22 +219,22 @@ impl ProgramV4SubCommands for App<'_, '_> { .index(1) .value_name("ACCOUNT_ADDRESS") .takes_value(true) - .help("Address of the program to show") + .help("Address of the program to show"), ) .arg( Arg::with_name("all") .long("all") .conflicts_with("account") .conflicts_with("buffer_authority") - .help("Show accounts for all authorities") + .help("Show accounts for all authorities"), ) - .arg( - pubkey!(Arg::with_name("authority") + .arg(pubkey!( + Arg::with_name("authority") .long("authority") .value_name("AUTHORITY") .conflicts_with("all"), - "Authority [default: the default configured keypair]"), - ), + "Authority [default: the default configured keypair]." + )), ) .subcommand( SubCommand::with_name("dump") @@ -231,7 +245,7 @@ impl ProgramV4SubCommands for App<'_, '_> { .value_name("ACCOUNT_ADDRESS") .takes_value(true) .required(true) - .help("Address of the buffer or program") + .help("Address of the buffer or program"), ) .arg( Arg::with_name("output_location") @@ -241,7 +255,7 @@ impl ProgramV4SubCommands for App<'_, '_> { .required(true) .help("/path/to/program.so"), ), - ) + ), ) } } @@ -795,7 +809,7 @@ fn check_payer( } if !write_messages.is_empty() { // Assume all write messages cost the same - if let Some(message) = write_messages.get(0) { + if let Some(message) = write_messages.first() { fee += rpc_client.get_fee_for_message(message)? * (write_messages.len() as u64); } } @@ -955,7 +969,11 @@ fn build_create_buffer_message( if account.lamports < lamports_required || account.data.len() != expected_account_data_len { if program_address == buffer_address { - return Err("Buffer account passed could be for a different deploy? It has different size/lamports".into()); + return Err( + "Buffer account passed could be for a different deploy? It has different \ + size/lamports" + .into(), + ); } let (truncate_instructions, balance_needed) = build_truncate_instructions( diff --git a/cli/src/spend_utils.rs b/cli/src/spend_utils.rs index c9ca3356a9f27d..62f7a8dfd1f05f 100644 --- a/cli/src/spend_utils.rs +++ b/cli/src/spend_utils.rs @@ -161,7 +161,7 @@ where dummy_message.recent_blockhash = *blockhash; get_fee_for_messages(rpc_client, &[&dummy_message])? } - None => 0, // Offline, cannot calulate fee + None => 0, // Offline, cannot calculate fee }; match amount { diff --git a/cli/src/stake.rs b/cli/src/stake.rs index 337b2843ff2229..f4bb8329278d94 100644 --- a/cli/src/stake.rs +++ b/cli/src/stake.rs @@ -144,7 +144,10 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .required(true) .validator(is_valid_signer) - .help("Stake account to create (or base of derived address if --seed is used)") + .help( + "Stake account to create (or base of derived address if --seed is \ + used)", + ), ) .arg( Arg::with_name("amount") @@ -153,28 +156,35 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .validator(is_amount_or_all) .required(true) - .help("The amount to send to the stake account, in SOL; accepts keyword ALL") + .help( + "The amount to send to the stake account, in SOL; accepts keyword ALL", + ), ) - .arg( - pubkey!(Arg::with_name("custodian") + .arg(pubkey!( + Arg::with_name("custodian") .long("custodian") .value_name("PUBKEY"), - "Authority to modify lockups. ") - ) + "Authority to modify lockups." + )) .arg( Arg::with_name("seed") .long("seed") .value_name("STRING") .takes_value(true) - .help("Seed for address generation; if specified, the resulting account \ - will be at a derived address of the STAKE_ACCOUNT_KEYPAIR pubkey") + .help( + "Seed for address generation; if specified, the resulting account \ + will be at a derived address of the STAKE_ACCOUNT_KEYPAIR pubkey", + ), ) .arg( Arg::with_name("lockup_epoch") .long("lockup-epoch") .value_name("NUMBER") .takes_value(true) - .help("The epoch height at which this account will be available for withdrawal") + .help( + "The epoch height at which this account will be available for \ + withdrawal", + ), ) .arg( Arg::with_name("lockup_date") @@ -182,7 +192,10 @@ impl StakeSubCommands for App<'_, '_> { .value_name("RFC3339 DATETIME") .validator(is_rfc3339_datetime) .takes_value(true) - .help("The date and time at which this account will be available for withdrawal") + .help( + "The date and time at which this account will be available for \ + withdrawal", + ), ) .arg( Arg::with_name(STAKE_AUTHORITY_ARG.name) @@ -190,7 +203,7 @@ impl StakeSubCommands for App<'_, '_> { .value_name("PUBKEY") .takes_value(true) .validator(is_valid_pubkey) - .help(STAKE_AUTHORITY_ARG.help) + .help(STAKE_AUTHORITY_ARG.help), ) .arg( Arg::with_name(WITHDRAW_AUTHORITY_ARG.name) @@ -198,7 +211,7 @@ impl StakeSubCommands for App<'_, '_> { .value_name("PUBKEY") .takes_value(true) .validator(is_valid_pubkey) - .help(WITHDRAW_AUTHORITY_ARG.help) + .help(WITHDRAW_AUTHORITY_ARG.help), ) .arg( Arg::with_name("from") @@ -212,7 +225,7 @@ impl StakeSubCommands for App<'_, '_> { .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("create-stake-account-checked") @@ -224,7 +237,10 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .required(true) .validator(is_valid_signer) - .help("Stake account to create (or base of derived address if --seed is used)") + .help( + "Stake account to create (or base of derived address if --seed is \ + used)", + ), ) .arg( Arg::with_name("amount") @@ -233,15 +249,19 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .validator(is_amount_or_all) .required(true) - .help("The amount to send to the stake account, in SOL; accepts keyword ALL") + .help( + "The amount to send to the stake account, in SOL; accepts keyword ALL", + ), ) .arg( Arg::with_name("seed") .long("seed") .value_name("STRING") .takes_value(true) - .help("Seed for address generation; if specified, the resulting account \ - will be at a derived address of the STAKE_ACCOUNT_KEYPAIR pubkey") + .help( + "Seed for address generation; if specified, the resulting account \ + will be at a derived address of the STAKE_ACCOUNT_KEYPAIR pubkey", + ), ) .arg( Arg::with_name(STAKE_AUTHORITY_ARG.name) @@ -249,7 +269,7 @@ impl StakeSubCommands for App<'_, '_> { .value_name("PUBKEY") .takes_value(true) .validator(is_valid_pubkey) - .help(STAKE_AUTHORITY_ARG.help) + .help(STAKE_AUTHORITY_ARG.help), ) .arg( Arg::with_name(WITHDRAW_AUTHORITY_ARG.name) @@ -257,7 +277,7 @@ impl StakeSubCommands for App<'_, '_> { .value_name("KEYPAIR") .takes_value(true) .validator(is_valid_signer) - .help(WITHDRAW_AUTHORITY_ARG.help) + .help(WITHDRAW_AUTHORITY_ARG.help), ) .arg( Arg::with_name("from") @@ -271,7 +291,7 @@ impl StakeSubCommands for App<'_, '_> { .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("delegate-stake") @@ -281,28 +301,28 @@ impl StakeSubCommands for App<'_, '_> { .long("force") .takes_value(false) .hidden(hidden_unless_forced()) // Don't document this argument to discourage its use - .help("Override vote account sanity checks (use carefully!)") + .help("Override vote account sanity checks (use carefully!)"), ) - .arg( - pubkey!(Arg::with_name("stake_account_pubkey") + .arg(pubkey!( + Arg::with_name("stake_account_pubkey") .index(1) .value_name("STAKE_ACCOUNT_ADDRESS") .required(true), - "Stake account to delegate") - ) - .arg( - pubkey!(Arg::with_name("vote_account_pubkey") + "Stake account to delegate." + )) + .arg(pubkey!( + Arg::with_name("vote_account_pubkey") .index(2) .value_name("VOTE_ACCOUNT_ADDRESS") .required(true), - "The vote account to which the stake will be delegated") - ) + "Vote account to which the stake will be delegated." + )) .arg(stake_authority_arg()) .offline_args() .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("redelegate-stake") @@ -312,24 +332,24 @@ impl StakeSubCommands for App<'_, '_> { .long("force") .takes_value(false) .hidden(hidden_unless_forced()) // Don't document this argument to discourage its use - .help("Override vote account sanity checks (use carefully!)") + .help("Override vote account sanity checks (use carefully!)"), ) - .arg( - pubkey!(Arg::with_name("stake_account_pubkey") + .arg(pubkey!( + Arg::with_name("stake_account_pubkey") .index(1) .value_name("STAKE_ACCOUNT_ADDRESS") .required(true), - "Existing delegated stake account that has been fully activated. \ - On success this stake account will be scheduled for deactivation and the rent-exempt balance \ - may be withdrawn once fully deactivated") - ) - .arg( - pubkey!(Arg::with_name("vote_account_pubkey") + "Existing delegated stake account that has been fully activated. On success \ + this stake account will be scheduled for deactivation and the rent-exempt \ + balance may be withdrawn once fully deactivated." + )) + .arg(pubkey!( + Arg::with_name("vote_account_pubkey") .index(2) .value_name("REDELEGATED_VOTE_ACCOUNT_ADDRESS") .required(true), - "The vote account to which the stake will be redelegated") - ) + "Vote account to which the stake will be redelegated." + )) .arg( Arg::with_name("redelegation_stake_account") .index(3) @@ -337,42 +357,43 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .required(true) .validator(is_valid_signer) - .help("Stake account to create for the redelegation. \ - On success this stake account will be created and scheduled for activation with all \ - the stake in the existing stake account, exclusive of the rent-exempt balance retained \ - in the existing account") + .help( + "Stake account to create for the redelegation. On success this stake \ + account will be created and scheduled for activation with all the \ + stake in the existing stake account, exclusive of the rent-exempt \ + balance retained in the existing account", + ), ) .arg(stake_authority_arg()) .offline_args() .nonce_args(false) .arg(fee_payer_arg()) - .arg(memo_arg()) + .arg(memo_arg()), ) - .subcommand( SubCommand::with_name("stake-authorize") .about("Authorize a new signing keypair for the given stake account") - .arg( - pubkey!(Arg::with_name("stake_account_pubkey") + .arg(pubkey!( + Arg::with_name("stake_account_pubkey") .required(true) .index(1) .value_name("STAKE_ACCOUNT_ADDRESS"), - "Stake account in which to set a new authority. ") - ) - .arg( - pubkey!(Arg::with_name("new_stake_authority") + "Stake account in which to set a new authority." + )) + .arg(pubkey!( + Arg::with_name("new_stake_authority") .long("new-stake-authority") .required_unless("new_withdraw_authority") .value_name("PUBKEY"), - "New authorized staker") - ) - .arg( - pubkey!(Arg::with_name("new_withdraw_authority") + "New authorized staker." + )) + .arg(pubkey!( + Arg::with_name("new_withdraw_authority") .long("new-withdraw-authority") .required_unless("new_stake_authority") .value_name("PUBKEY"), - "New authorized withdrawer. ") - ) + "New authorized withdrawer." + )) .arg(stake_authority_arg()) .arg(withdraw_authority_arg()) .offline_args() @@ -383,21 +404,27 @@ impl StakeSubCommands for App<'_, '_> { Arg::with_name("no_wait") .long("no-wait") .takes_value(false) - .help("Return signature immediately after submitting the transaction, instead of waiting for confirmations"), + .help( + "Return signature immediately after submitting the transaction, \ + instead of waiting for confirmations", + ), ) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("stake-authorize-checked") - .about("Authorize a new signing keypair for the given stake account, checking the authority as a signer") - .arg( - pubkey!(Arg::with_name("stake_account_pubkey") + .about( + "Authorize a new signing keypair for the given stake account, checking the \ + authority as a signer", + ) + .arg(pubkey!( + Arg::with_name("stake_account_pubkey") .required(true) .index(1) .value_name("STAKE_ACCOUNT_ADDRESS"), - "Stake account in which to set a new authority. ") - ) + "Stake account in which to set a new authority." + )) .arg( Arg::with_name("new_stake_authority") .long("new-stake-authority") @@ -405,7 +432,7 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .validator(is_valid_signer) .required_unless("new_withdraw_authority") - .help("New authorized staker") + .help("New authorized staker"), ) .arg( Arg::with_name("new_withdraw_authority") @@ -414,7 +441,7 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .validator(is_valid_signer) .required_unless("new_stake_authority") - .help("New authorized withdrawer") + .help("New authorized withdrawer"), ) .arg(stake_authority_arg()) .arg(withdraw_authority_arg()) @@ -426,53 +453,62 @@ impl StakeSubCommands for App<'_, '_> { Arg::with_name("no_wait") .long("no-wait") .takes_value(false) - .help("Return signature immediately after submitting the transaction, instead of waiting for confirmations"), + .help( + "Return signature immediately after submitting the transaction, \ + instead of waiting for confirmations", + ), ) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("deactivate-stake") .about("Deactivate the delegated stake from the stake account") - .arg( - pubkey!(Arg::with_name("stake_account_pubkey") + .arg(pubkey!( + Arg::with_name("stake_account_pubkey") .index(1) .value_name("STAKE_ACCOUNT_ADDRESS") .required(true), - "Stake account to be deactivated (or base of derived address if --seed is used). ") - ) + "Stake account to be deactivated (or base of derived address if --seed is \ + used)." + )) .arg( Arg::with_name("seed") .long("seed") .value_name("STRING") .takes_value(true) - .help("Seed for address generation; if specified, the resulting account \ - will be at a derived address of STAKE_ACCOUNT_ADDRESS") + .help( + "Seed for address generation; if specified, the resulting account \ + will be at a derived address of STAKE_ACCOUNT_ADDRESS", + ), ) .arg( Arg::with_name("delinquent") .long("delinquent") .takes_value(false) .conflicts_with(SIGN_ONLY_ARG.name) - .help("Deactivate abandoned stake that is currently delegated to a delinquent vote account") + .help( + "Deactivate abandoned stake that is currently delegated to a \ + delinquent vote account", + ), ) .arg(stake_authority_arg()) .offline_args() .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("split-stake") .about("Duplicate a stake account, splitting the tokens between the two") - .arg( - pubkey!(Arg::with_name("stake_account_pubkey") + .arg(pubkey!( + Arg::with_name("stake_account_pubkey") .index(1) .value_name("STAKE_ACCOUNT_ADDRESS") .required(true), - "Stake account to split (or base of derived address if --seed is used). ") - ) + "Stake account to split (or base of derived address if --seed is used)." + )) .arg( Arg::with_name("split_stake_account") .index(2) @@ -480,7 +516,7 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .required(true) .validator(is_valid_signer) - .help("Keypair of the new stake account") + .help("Keypair of the new stake account"), ) .arg( Arg::with_name("amount") @@ -489,18 +525,20 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .validator(is_amount) .required(true) - .help("The amount to move into the new stake account, in SOL") + .help("The amount to move into the new stake account, in SOL"), ) .arg( Arg::with_name("seed") .long("seed") .value_name("STRING") .takes_value(true) - .help("Seed for address generation; if specified, the resulting account \ - will be at a derived address of SPLIT_STAKE_ACCOUNT") + .help( + "Seed for address generation; if specified, the resulting account \ + will be at a derived address of SPLIT_STAKE_ACCOUNT", + ), ) .arg(stake_authority_arg()) - .offline_args_config(&SignOnlySplitNeedsRent{}) + .offline_args_config(&SignOnlySplitNeedsRent {}) .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) @@ -512,52 +550,55 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .validator(is_amount) .requires("sign_only") - .help("Offline signing only: the rent-exempt amount to move into the new \ - stake account, in SOL") - ) + .help( + "Offline signing only: the rent-exempt amount to move into the new \ + stake account, in SOL", + ), + ), ) .subcommand( SubCommand::with_name("merge-stake") .about("Merges one stake account into another") - .arg( - pubkey!(Arg::with_name("stake_account_pubkey") + .arg(pubkey!( + Arg::with_name("stake_account_pubkey") .index(1) .value_name("STAKE_ACCOUNT_ADDRESS") .required(true), - "Stake account to merge into") - ) - .arg( - pubkey!(Arg::with_name("source_stake_account_pubkey") + "Stake account to merge into." + )) + .arg(pubkey!( + Arg::with_name("source_stake_account_pubkey") .index(2) .value_name("SOURCE_STAKE_ACCOUNT_ADDRESS") .required(true), - "Source stake account for the merge. If successful, this stake account \ - will no longer exist after the merge") - ) + "Source stake account for the merge. If successful, this stake account will \ + no longer exist after the merge." + )) .arg(stake_authority_arg()) .offline_args() .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("withdraw-stake") .about("Withdraw the unstaked SOL from the stake account") - .arg( - pubkey!(Arg::with_name("stake_account_pubkey") + .arg(pubkey!( + Arg::with_name("stake_account_pubkey") .index(1) .value_name("STAKE_ACCOUNT_ADDRESS") .required(true), - "Stake account from which to withdraw (or base of derived address if --seed is used). ") - ) - .arg( - pubkey!(Arg::with_name("destination_account_pubkey") + "Stake account from which to withdraw (or base of derived address if --seed \ + is used)." + )) + .arg(pubkey!( + Arg::with_name("destination_account_pubkey") .index(2) .value_name("RECIPIENT_ADDRESS") .required(true), - "Recipient of withdrawn SOL") - ) + "Recipient of withdrawn stake." + )) .arg( Arg::with_name("amount") .index(3) @@ -565,15 +606,20 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .validator(is_amount_or_all) .required(true) - .help("The amount to withdraw from the stake account, in SOL; accepts keyword ALL") + .help( + "The amount to withdraw from the stake account, in SOL; accepts \ + keyword ALL", + ), ) .arg( Arg::with_name("seed") .long("seed") .value_name("STRING") .takes_value(true) - .help("Seed for address generation; if specified, the resulting account \ - will be at a derived address of STAKE_ACCOUNT_ADDRESS") + .help( + "Seed for address generation; if specified, the resulting account \ + will be at a derived address of STAKE_ACCOUNT_ADDRESS", + ), ) .arg(withdraw_authority_arg()) .offline_args() @@ -581,24 +627,27 @@ impl StakeSubCommands for App<'_, '_> { .arg(fee_payer_arg()) .arg(custodian_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("stake-set-lockup") .about("Set Lockup for the stake account") - .arg( - pubkey!(Arg::with_name("stake_account_pubkey") + .arg(pubkey!( + Arg::with_name("stake_account_pubkey") .index(1) .value_name("STAKE_ACCOUNT_ADDRESS") .required(true), - "Stake account for which to set lockup parameters. ") - ) + "Stake account for which to set lockup parameters." + )) .arg( Arg::with_name("lockup_epoch") .long("lockup-epoch") .value_name("NUMBER") .takes_value(true) - .help("The epoch height at which this account will be available for withdrawal") + .help( + "The epoch height at which this account will be available for \ + withdrawal", + ), ) .arg( Arg::with_name("lockup_date") @@ -606,48 +655,56 @@ impl StakeSubCommands for App<'_, '_> { .value_name("RFC3339 DATETIME") .validator(is_rfc3339_datetime) .takes_value(true) - .help("The date and time at which this account will be available for withdrawal") + .help( + "The date and time at which this account will be available for \ + withdrawal", + ), ) - .arg( - pubkey!(Arg::with_name("new_custodian") + .arg(pubkey!( + Arg::with_name("new_custodian") .long("new-custodian") .value_name("PUBKEY"), - "Identity of a new lockup custodian. ") + "New lockup custodian." + )) + .group( + ArgGroup::with_name("lockup_details") + .args(&["lockup_epoch", "lockup_date", "new_custodian"]) + .multiple(true) + .required(true), ) - .group(ArgGroup::with_name("lockup_details") - .args(&["lockup_epoch", "lockup_date", "new_custodian"]) - .multiple(true) - .required(true)) .arg( Arg::with_name("custodian") .long("custodian") .takes_value(true) .value_name("KEYPAIR") .validator(is_valid_signer) - .help("Keypair of the existing custodian [default: cli config pubkey]") + .help("Keypair of the existing custodian [default: cli config pubkey]"), ) .offline_args() .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("stake-set-lockup-checked") .about("Set Lockup for the stake account, checking the new authority as a signer") - .arg( - pubkey!(Arg::with_name("stake_account_pubkey") + .arg(pubkey!( + Arg::with_name("stake_account_pubkey") .index(1) .value_name("STAKE_ACCOUNT_ADDRESS") .required(true), - "Stake account for which to set lockup parameters. ") - ) + "Stake account for which to set lockup parameters." + )) .arg( Arg::with_name("lockup_epoch") .long("lockup-epoch") .value_name("NUMBER") .takes_value(true) - .help("The epoch height at which this account will be available for withdrawal") + .help( + "The epoch height at which this account will be available for \ + withdrawal", + ), ) .arg( Arg::with_name("lockup_date") @@ -655,7 +712,10 @@ impl StakeSubCommands for App<'_, '_> { .value_name("RFC3339 DATETIME") .validator(is_rfc3339_datetime) .takes_value(true) - .help("The date and time at which this account will be available for withdrawal") + .help( + "The date and time at which this account will be available for \ + withdrawal", + ), ) .arg( Arg::with_name("new_custodian") @@ -663,42 +723,44 @@ impl StakeSubCommands for App<'_, '_> { .value_name("KEYPAIR") .takes_value(true) .validator(is_valid_signer) - .help("Keypair of a new lockup custodian") + .help("Keypair of a new lockup custodian"), + ) + .group( + ArgGroup::with_name("lockup_details") + .args(&["lockup_epoch", "lockup_date", "new_custodian"]) + .multiple(true) + .required(true), ) - .group(ArgGroup::with_name("lockup_details") - .args(&["lockup_epoch", "lockup_date", "new_custodian"]) - .multiple(true) - .required(true)) .arg( Arg::with_name("custodian") .long("custodian") .takes_value(true) .value_name("KEYPAIR") .validator(is_valid_signer) - .help("Keypair of the existing custodian [default: cli config pubkey]") + .help("Keypair of the existing custodian [default: cli config pubkey]"), ) .offline_args() .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("stake-account") .about("Show the contents of a stake account") .alias("show-stake-account") - .arg( - pubkey!(Arg::with_name("stake_account_pubkey") + .arg(pubkey!( + Arg::with_name("stake_account_pubkey") .index(1) .value_name("STAKE_ACCOUNT_ADDRESS") .required(true), - "The stake account to display. ") - ) + "Stake account to display." + )) .arg( Arg::with_name("lamports") .long("lamports") .takes_value(false) - .help("Display balance in lamports instead of SOL") + .help("Display balance in lamports instead of SOL"), ) .arg( Arg::with_name("with_rewards") @@ -710,7 +772,7 @@ impl StakeSubCommands for App<'_, '_> { Arg::with_name("csv") .long("csv") .takes_value(false) - .help("Format stake rewards data in csv") + .help("Format stake rewards data in csv"), ) .arg( Arg::with_name("num_rewards_epochs") @@ -720,7 +782,10 @@ impl StakeSubCommands for App<'_, '_> { .validator(|s| is_within_range(s, 1..=50)) .default_value_if("with_rewards", None, "1") .requires("with_rewards") - .help("Display rewards for NUM recent epochs, max 10 [default: latest epoch only]"), + .help( + "Display rewards for NUM recent epochs, max 10 \ + [default: latest epoch only]", + ), ), ) .subcommand( @@ -731,7 +796,7 @@ impl StakeSubCommands for App<'_, '_> { Arg::with_name("lamports") .long("lamports") .takes_value(false) - .help("Display balance in lamports instead of SOL") + .help("Display balance in lamports instead of SOL"), ) .arg( Arg::with_name("limit") @@ -739,13 +804,12 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .value_name("NUM") .default_value("10") - .validator(|s| { - s.parse::() - .map(|_| ()) - .map_err(|e| e.to_string()) - }) - .help("Display NUM recent epochs worth of stake history in text mode. 0 for all") - ) + .validator(|s| s.parse::().map(|_| ()).map_err(|e| e.to_string())) + .help( + "Display NUM recent epochs worth of stake history in text mode. 0 for \ + all", + ), + ), ) .subcommand( SubCommand::with_name("stake-minimum-delegation") @@ -754,8 +818,8 @@ impl StakeSubCommands for App<'_, '_> { Arg::with_name("lamports") .long("lamports") .takes_value(false) - .help("Display minimum delegation in lamports instead of SOL") - ) + .help("Display minimum delegation in lamports instead of SOL"), + ), ) } } @@ -1456,7 +1520,8 @@ pub fn process_create_stake_account( if lamports < minimum_balance { return Err(CliError::BadParameter(format!( - "need at least {minimum_balance} lamports for stake account to be rent exempt, provided lamports: {lamports}" + "need at least {minimum_balance} lamports for stake account to be rent exempt, \ + provided lamports: {lamports}" )) .into()); } @@ -1919,7 +1984,8 @@ pub fn process_split_stake( format!("Stake account {split_stake_account_address} already exists") } else { format!( - "Account {split_stake_account_address} already exists and is not a stake account" + "Account {split_stake_account_address} already exists and is not a stake \ + account" ) }; return Err(CliError::BadParameter(err_msg).into()); @@ -1930,7 +1996,8 @@ pub fn process_split_stake( if lamports < minimum_balance { return Err(CliError::BadParameter(format!( - "need at least {minimum_balance} lamports for stake account to be rent exempt, provided lamports: {lamports}" + "need at least {minimum_balance} lamports for stake account to be rent exempt, \ + provided lamports: {lamports}" )) .into()); } @@ -2253,7 +2320,7 @@ pub fn build_stake_state( deactivating, } = stake.delegation.stake_activating_and_deactivating( current_epoch, - Some(stake_history), + stake_history, new_rate_activation_epoch, ); let lockup = if lockup.is_in_force(clock, None) { @@ -2359,7 +2426,8 @@ pub(crate) fn check_current_authority( ) -> Result<(), CliError> { if !permitted_authorities.contains(provided_current_authority) { Err(CliError::RpcRequestError(format!( - "Invalid authority provided: {provided_current_authority:?}, expected {permitted_authorities:?}" + "Invalid authority provided: {provided_current_authority:?}, expected \ + {permitted_authorities:?}" ))) } else { Ok(()) @@ -2603,8 +2671,8 @@ pub fn process_delegate_stake( // filter should return at most one result let rpc_vote_account = current - .get(0) - .or_else(|| delinquent.get(0)) + .first() + .or_else(|| delinquent.first()) .ok_or(CliError::RpcRequestError(format!( "Vote account not found: {vote_account_pubkey}" )))?; @@ -2622,8 +2690,8 @@ pub fn process_delegate_stake( )) } else { Err(CliError::DynamicProgramError(format!( - "Unable to delegate. Vote account appears delinquent \ - because its current root slot, {root_slot}, is less than {min_root_slot}" + "Unable to delegate. Vote account appears delinquent because its current root \ + slot, {root_slot}, is less than {min_root_slot}" ))) }; diff --git a/cli/src/validator_info.rs b/cli/src/validator_info.rs index f6251c649972e2..ad3df38d553499 100644 --- a/cli/src/validator_info.rs +++ b/cli/src/validator_info.rs @@ -88,7 +88,11 @@ fn verify_keybase( if client.head(&url).send()?.status().is_success() { Ok(()) } else { - Err(format!("keybase_username could not be confirmed at: {url}. Please add this pubkey file to your keybase profile to connect").into()) + Err(format!( + "keybase_username could not be confirmed at: {url}. Please add this pubkey file \ + to your keybase profile to connect" + ) + .into()) } } else { Err(format!("keybase_username could not be parsed as String: {keybase_username}").into()) @@ -204,7 +208,7 @@ impl ValidatorInfoSubCommands for App<'_, '_> { .value_name("DETAILS") .takes_value(true) .validator(check_details_length) - .help("Validator description") + .help("Validator description"), ) .arg( Arg::with_name("force") @@ -223,9 +227,12 @@ impl ValidatorInfoSubCommands for App<'_, '_> { .value_name("PUBKEY") .takes_value(true) .validator(is_pubkey) - .help("The pubkey of the Validator info account; without this argument, returns all"), + .help( + "The pubkey of the Validator info account; without this \ + argument, returns all Validator info accounts", + ), ), - ) + ), ) } } @@ -607,7 +614,12 @@ mod tests { let max_short_string = "Max Length String KWpP299aFCBWvWg1MHpSuaoTsud7cv8zMJsh99aAtP8X1s26yrR1".to_string(); // 300-character string - let max_long_string = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut libero quam, volutpat et aliquet eu, varius in mi. Aenean vestibulum ex in tristique faucibus. Maecenas in imperdiet turpis. Nullam feugiat aliquet erat. Morbi malesuada turpis sed dui pulvinar lobortis. Pellentesque a lectus eu leo nullam.".to_string(); + let max_long_string = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut libero \ + quam, volutpat et aliquet eu, varius in mi. Aenean vestibulum ex \ + in tristique faucibus. Maecenas in imperdiet turpis. Nullam \ + feugiat aliquet erat. Morbi malesuada turpis sed dui pulvinar \ + lobortis. Pellentesque a lectus eu leo nullam." + .to_string(); let mut info = Map::new(); info.insert("name".to_string(), Value::String(max_short_string.clone())); info.insert( diff --git a/cli/src/vote.rs b/cli/src/vote.rs index e4456fe1d2355c..9107d170058d2e 100644 --- a/cli/src/vote.rs +++ b/cli/src/vote.rs @@ -70,15 +70,15 @@ impl VoteSubCommands for App<'_, '_> { .validator(is_valid_signer) .help("Keypair of validator that will vote with this account"), ) - .arg( - pubkey!(Arg::with_name("authorized_withdrawer") + .arg(pubkey!( + Arg::with_name("authorized_withdrawer") .index(3) .value_name("WITHDRAWER_PUBKEY") .takes_value(true) .required(true) .long("authorized-withdrawer"), - "Public key of the authorized withdrawer") - ) + "Authorized withdrawer." + )) .arg( Arg::with_name("commission") .long("commission") @@ -87,43 +87,48 @@ impl VoteSubCommands for App<'_, '_> { .default_value("100") .help("The commission taken on reward redemption (0-100)"), ) - .arg( - pubkey!(Arg::with_name("authorized_voter") + .arg(pubkey!( + Arg::with_name("authorized_voter") .long("authorized-voter") .value_name("VOTER_PUBKEY"), - "Public key of the authorized voter [default: validator identity pubkey]. "), - ) + "Authorized voter [default: validator identity pubkey]." + )) .arg( Arg::with_name("allow_unsafe_authorized_withdrawer") .long("allow-unsafe-authorized-withdrawer") .takes_value(false) - .help("Allow an authorized withdrawer pubkey to be identical to the validator identity \ - account pubkey or vote account pubkey, which is normally an unsafe \ - configuration and should be avoided."), + .help( + "Allow an authorized withdrawer pubkey to be identical to the \ + validator identity account pubkey or vote account pubkey, which is \ + normally an unsafe configuration and should be avoided.", + ), ) .arg( Arg::with_name("seed") .long("seed") .value_name("STRING") .takes_value(true) - .help("Seed for address generation; if specified, the resulting account will be at a derived address of the VOTE ACCOUNT pubkey") + .help( + "Seed for address generation; if specified, the resulting account \ + will be at a derived address of the VOTE ACCOUNT pubkey", + ), ) .offline_args() .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("vote-authorize-voter") .about("Authorize a new vote signing keypair for the given vote account") - .arg( - pubkey!(Arg::with_name("vote_account_pubkey") + .arg(pubkey!( + Arg::with_name("vote_account_pubkey") .index(1) .value_name("VOTE_ACCOUNT_ADDRESS") .required(true), - "Vote account in which to set the authorized voter. "), - ) + "Vote account in which to set the authorized voter." + )) .arg( Arg::with_name("authorized") .index(2) @@ -132,29 +137,29 @@ impl VoteSubCommands for App<'_, '_> { .validator(is_valid_signer) .help("Current authorized vote signer."), ) - .arg( - pubkey!(Arg::with_name("new_authorized_pubkey") + .arg(pubkey!( + Arg::with_name("new_authorized_pubkey") .index(3) .value_name("NEW_AUTHORIZED_PUBKEY") .required(true), - "New authorized vote signer. "), - ) + "New authorized vote signer." + )) .offline_args() .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("vote-authorize-withdrawer") .about("Authorize a new withdraw signing keypair for the given vote account") - .arg( - pubkey!(Arg::with_name("vote_account_pubkey") + .arg(pubkey!( + Arg::with_name("vote_account_pubkey") .index(1) .value_name("VOTE_ACCOUNT_ADDRESS") .required(true), - "Vote account in which to set the authorized withdrawer. "), - ) + "Vote account in which to set the authorized withdrawer." + )) .arg( Arg::with_name("authorized") .index(2) @@ -163,30 +168,32 @@ impl VoteSubCommands for App<'_, '_> { .validator(is_valid_signer) .help("Current authorized withdrawer."), ) - .arg( - pubkey!(Arg::with_name("new_authorized_pubkey") + .arg(pubkey!( + Arg::with_name("new_authorized_pubkey") .index(3) .value_name("AUTHORIZED_PUBKEY") .required(true), - "New authorized withdrawer. "), - ) + "New authorized withdrawer." + )) .offline_args() .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("vote-authorize-voter-checked") - .about("Authorize a new vote signing keypair for the given vote account, \ - checking the new authority as a signer") - .arg( - pubkey!(Arg::with_name("vote_account_pubkey") + .about( + "Authorize a new vote signing keypair for the given vote account, checking \ + the new authority as a signer", + ) + .arg(pubkey!( + Arg::with_name("vote_account_pubkey") .index(1) .value_name("VOTE_ACCOUNT_ADDRESS") .required(true), - "Vote account in which to set the authorized voter. "), - ) + "Vote account in which to set the authorized voter." + )) .arg( Arg::with_name("authorized") .index(2) @@ -207,19 +214,21 @@ impl VoteSubCommands for App<'_, '_> { .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("vote-authorize-withdrawer-checked") - .about("Authorize a new withdraw signing keypair for the given vote account, \ - checking the new authority as a signer") - .arg( - pubkey!(Arg::with_name("vote_account_pubkey") + .about( + "Authorize a new withdraw signing keypair for the given vote account, \ + checking the new authority as a signer", + ) + .arg(pubkey!( + Arg::with_name("vote_account_pubkey") .index(1) .value_name("VOTE_ACCOUNT_ADDRESS") .required(true), - "Vote account in which to set the authorized withdrawer. "), - ) + "Vote account in which to set the authorized withdrawer." + )) .arg( Arg::with_name("authorized") .index(2) @@ -240,18 +249,18 @@ impl VoteSubCommands for App<'_, '_> { .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("vote-update-validator") .about("Update the vote account's validator identity") - .arg( - pubkey!(Arg::with_name("vote_account_pubkey") + .arg(pubkey!( + Arg::with_name("vote_account_pubkey") .index(1) .value_name("VOTE_ACCOUNT_ADDRESS") .required(true), - "Vote account to update. "), - ) + "Vote account to update." + )) .arg( Arg::with_name("new_identity_account") .index(2) @@ -274,18 +283,18 @@ impl VoteSubCommands for App<'_, '_> { .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("vote-update-commission") .about("Update the vote account's commission") - .arg( - pubkey!(Arg::with_name("vote_account_pubkey") + .arg(pubkey!( + Arg::with_name("vote_account_pubkey") .index(1) .value_name("VOTE_ACCOUNT_ADDRESS") .required(true), - "Vote account to update. "), - ) + "Vote account to update." + )) .arg( Arg::with_name("commission") .index(2) @@ -293,7 +302,7 @@ impl VoteSubCommands for App<'_, '_> { .takes_value(true) .required(true) .validator(is_valid_percentage) - .help("The new commission") + .help("The new commission"), ) .arg( Arg::with_name("authorized_withdrawer") @@ -308,19 +317,19 @@ impl VoteSubCommands for App<'_, '_> { .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("vote-account") .about("Show the contents of a vote account") .alias("show-vote-account") - .arg( - pubkey!(Arg::with_name("vote_account_pubkey") + .arg(pubkey!( + Arg::with_name("vote_account_pubkey") .index(1) .value_name("VOTE_ACCOUNT_ADDRESS") .required(true), - "Vote account pubkey. "), - ) + "Vote account." + )) .arg( Arg::with_name("lamports") .long("lamports") @@ -347,26 +356,29 @@ impl VoteSubCommands for App<'_, '_> { .validator(|s| is_within_range(s, 1..=50)) .default_value_if("with_rewards", None, "1") .requires("with_rewards") - .help("Display rewards for NUM recent epochs, max 10 [default: latest epoch only]"), + .help( + "Display rewards for NUM recent epochs, max 10 \ + [default: latest epoch only]", + ), ), ) .subcommand( SubCommand::with_name("withdraw-from-vote-account") .about("Withdraw lamports from a vote account into a specified account") - .arg( - pubkey!(Arg::with_name("vote_account_pubkey") + .arg(pubkey!( + Arg::with_name("vote_account_pubkey") .index(1) .value_name("VOTE_ACCOUNT_ADDRESS") .required(true), - "Vote account from which to withdraw. "), - ) - .arg( - pubkey!(Arg::with_name("destination_account_pubkey") + "Vote account from which to withdraw." + )) + .arg(pubkey!( + Arg::with_name("destination_account_pubkey") .index(2) .value_name("RECIPIENT_ADDRESS") .required(true), - "The recipient of withdrawn SOL. "), - ) + "The recipient of withdrawn SOL." + )) .arg( Arg::with_name("amount") .index(3) @@ -374,7 +386,10 @@ impl VoteSubCommands for App<'_, '_> { .takes_value(true) .required(true) .validator(is_amount_or_all) - .help("The amount to withdraw, in SOL; accepts keyword ALL, which for this command means account balance minus rent-exempt minimum"), + .help( + "The amount to withdraw, in SOL; accepts keyword ALL, which for this \ + command means account balance minus rent-exempt minimum", + ), ) .arg( Arg::with_name("authorized_withdrawer") @@ -388,26 +403,25 @@ impl VoteSubCommands for App<'_, '_> { .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg() - ) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("close-vote-account") .about("Close a vote account and withdraw all funds remaining") - .arg( - pubkey!(Arg::with_name("vote_account_pubkey") + .arg(pubkey!( + Arg::with_name("vote_account_pubkey") .index(1) .value_name("VOTE_ACCOUNT_ADDRESS") .required(true), - "Vote account to be closed. "), - ) - .arg( - pubkey!(Arg::with_name("destination_account_pubkey") + "Vote account to be closed." + )) + .arg(pubkey!( + Arg::with_name("destination_account_pubkey") .index(2) .value_name("RECIPIENT_ADDRESS") .required(true), - "The recipient of all withdrawn SOL. "), - ) + "The recipient of all withdrawn SOL." + )) .arg( Arg::with_name("authorized_withdrawer") .long("authorized-withdrawer") @@ -418,8 +432,7 @@ impl VoteSubCommands for App<'_, '_> { ) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg() - ) + .arg(compute_unit_price_arg()), ) } } @@ -451,15 +464,15 @@ pub fn parse_create_vote_account( if !allow_unsafe { if authorized_withdrawer == vote_account_pubkey.unwrap() { return Err(CliError::BadParameter( - "Authorized withdrawer pubkey is identical to vote \ - account pubkey, an unsafe configuration" + "Authorized withdrawer pubkey is identical to vote account pubkey, an unsafe \ + configuration" .to_owned(), )); } if authorized_withdrawer == identity_pubkey.unwrap() { return Err(CliError::BadParameter( - "Authorized withdrawer pubkey is identical to identity \ - account pubkey, an unsafe configuration" + "Authorized withdrawer pubkey is identical to identity account pubkey, an unsafe \ + configuration" .to_owned(), )); } @@ -956,8 +969,10 @@ pub fn process_vote_authorize( if let Some(signer) = new_authorized_signer { if signer.is_interactive() { return Err(CliError::BadParameter(format!( - "invalid new authorized vote signer {new_authorized_pubkey:?}. Interactive vote signers not supported" - )).into()); + "invalid new authorized vote signer {new_authorized_pubkey:?}. \ + Interactive vote signers not supported" + )) + .into()); } } } @@ -1337,7 +1352,9 @@ pub fn process_withdraw_from_vote_account( let balance_remaining = current_balance.saturating_sub(withdraw_amount); if balance_remaining < minimum_balance && balance_remaining != 0 { return Err(CliError::BadParameter(format!( - "Withdraw amount too large. The vote account balance must be at least {} SOL to remain rent exempt", lamports_to_sol(minimum_balance) + "Withdraw amount too large. The vote account balance must be at least {} SOL \ + to remain rent exempt", + lamports_to_sol(minimum_balance) )) .into()); } diff --git a/cli/src/wallet.rs b/cli/src/wallet.rs index bc3e5d4e0081d3..04b891562f395e 100644 --- a/cli/src/wallet.rs +++ b/cli/src/wallet.rs @@ -60,13 +60,13 @@ impl WalletSubCommands for App<'_, '_> { SubCommand::with_name("account") .about("Show the contents of an account") .alias("account") - .arg( - pubkey!(Arg::with_name("account_pubkey") + .arg(pubkey!( + Arg::with_name("account_pubkey") .index(1) .value_name("ACCOUNT_ADDRESS") .required(true), - "Account key URI. ") - ) + "Account contents to show." + )) .arg( Arg::with_name("output_file") .long("output-file") @@ -104,22 +104,22 @@ impl WalletSubCommands for App<'_, '_> { .required(true) .help("The airdrop amount to request, in SOL"), ) - .arg( - pubkey!(Arg::with_name("to") + .arg(pubkey!( + Arg::with_name("to") .index(2) .value_name("RECIPIENT_ADDRESS"), - "The account address of airdrop recipient. "), - ), + "Account of airdrop recipient." + )), ) .subcommand( SubCommand::with_name("balance") .about("Get your balance") - .arg( - pubkey!(Arg::with_name("pubkey") + .arg(pubkey!( + Arg::with_name("pubkey") .index(1) .value_name("ACCOUNT_ADDRESS"), - "The account address of the balance to check. ") - ) + "Account balance to check." + )) .arg( Arg::with_name("lamports") .long("lamports") @@ -138,23 +138,25 @@ impl WalletSubCommands for App<'_, '_> { .required(true) .help("The transaction signature to confirm"), ) - .after_help(// Formatted specifically for the manually-indented heredoc string - "Note: This will show more detailed information for finalized transactions with verbose mode (-v/--verbose).\ - \n\ - \nAccount modes:\ - \n |srwx|\ - \n s: signed\ - \n r: readable (always true)\ - \n w: writable\ - \n x: program account (inner instructions excluded)\ - " + .after_help( + // Formatted specifically for the manually-indented heredoc string + "Note: This will show more detailed information for finalized \ + transactions with verbose mode (-v/--verbose).\ + \n\ + \nAccount modes:\ + \n |srwx|\ + \n s: signed\ + \n r: readable (always true)\ + \n w: writable\ + \n x: program account (inner instructions excluded)\ + ", ), ) .subcommand( SubCommand::with_name("create-address-with-seed") .about( - "Generate a derived account address with a seed. \ - For program derived addresses (PDAs), use the find-program-derived-address command instead" + "Generate a derived account address with a seed. For program derived \ + addresses (PDAs), use the find-program-derived-address command instead", ) .arg( Arg::with_name("seed") @@ -176,45 +178,46 @@ impl WalletSubCommands for App<'_, '_> { or one of NONCE, STAKE, and VOTE keywords", ), ) - .arg( - pubkey!(Arg::with_name("from") + .arg(pubkey!( + Arg::with_name("from") .long("from") .value_name("FROM_PUBKEY") .required(false), - "From (base) key, [default: cli config keypair]. "), + "From (base) key, [default: cli config keypair]." + )), + ) + .subcommand( + SubCommand::with_name("find-program-derived-address") + .about("Generate a program derived account address with a seed") + .arg( + Arg::with_name("program_id") + .index(1) + .value_name("PROGRAM_ID") + .takes_value(true) + .required(true) + .help( + "The program_id that the address will ultimately be used for, \n\ + or one of NONCE, STAKE, and VOTE keywords", + ), + ) + .arg( + Arg::with_name("seeds") + .min_values(0) + .value_name("SEED") + .takes_value(true) + .validator(is_structured_seed) + .help( + "The seeds. \n\ + Each one must match the pattern PREFIX:VALUE. \n\ + PREFIX can be one of [string, pubkey, hex, u8] \n\ + or matches the pattern [u,i][16,32,64,128][le,be] \ + (for example u64le) for number values \n\ + [u,i] - represents whether the number is unsigned or signed, \n\ + [16,32,64,128] - represents the bit length, and \n\ + [le,be] - represents the byte order - little endian or big endian", + ), ), ) - .subcommand( - SubCommand::with_name("find-program-derived-address") - .about("Generate a program derived account address with a seed") - .arg( - Arg::with_name("program_id") - .index(1) - .value_name("PROGRAM_ID") - .takes_value(true) - .required(true) - .help( - "The program_id that the address will ultimately be used for, \n\ - or one of NONCE, STAKE, and VOTE keywords", - ), - ) - .arg( - Arg::with_name("seeds") - .min_values(0) - .value_name("SEED") - .takes_value(true) - .validator(is_structured_seed) - .help( - "The seeds. \n\ - Each one must match the pattern PREFIX:VALUE. \n\ - PREFIX can be one of [string, pubkey, hex, u8] \n\ - or matches the pattern [u,i][16,32,64,128][le,be] (for example u64le) for number values \n\ - [u,i] - represents whether the number is unsigned or signed, \n\ - [16,32,64,128] - represents the bit length, and \n\ - [le,be] - represents the byte order - little endian or big endian" - ), - ), - ) .subcommand( SubCommand::with_name("decode-transaction") .about("Decode a serialized transaction") @@ -239,7 +242,10 @@ impl WalletSubCommands for App<'_, '_> { ) .subcommand( SubCommand::with_name("resolve-signer") - .about("Checks that a signer is valid, and returns its specific path; useful for signers that may be specified generally, eg. usb://ledger") + .about( + "Checks that a signer is valid, and returns its specific path; useful for \ + signers that may be specified generally, eg. usb://ledger", + ) .arg( Arg::with_name("signer") .index(1) @@ -247,20 +253,20 @@ impl WalletSubCommands for App<'_, '_> { .takes_value(true) .required(true) .validator(is_valid_signer) - .help("The signer path to resolve") - ) + .help("The signer path to resolve"), + ), ) .subcommand( SubCommand::with_name("transfer") .about("Transfer funds between system accounts") .alias("pay") - .arg( - pubkey!(Arg::with_name("to") + .arg(pubkey!( + Arg::with_name("to") .index(1) .value_name("RECIPIENT_ADDRESS") .required(true), - "The account address of recipient. "), - ) + "Account of recipient." + )) .arg( Arg::with_name("amount") .index(2) @@ -270,17 +276,20 @@ impl WalletSubCommands for App<'_, '_> { .required(true) .help("The amount to send, in SOL; accepts keyword ALL"), ) - .arg( - pubkey!(Arg::with_name("from") + .arg(pubkey!( + Arg::with_name("from") .long("from") .value_name("FROM_ADDRESS"), - "Source account of funds (if different from client local account). "), - ) + "Source account of funds [default: cli config keypair]." + )) .arg( Arg::with_name("no_wait") .long("no-wait") .takes_value(false) - .help("Return signature immediately after submitting the transaction, instead of waiting for confirmations"), + .help( + "Return signature immediately after submitting the transaction, \ + instead of waiting for confirmations", + ), ) .arg( Arg::with_name("derived_address_seed") @@ -289,7 +298,7 @@ impl WalletSubCommands for App<'_, '_> { .value_name("SEED_STRING") .requires("derived_address_program_id") .validator(is_derived_address_seed) - .hidden(hidden_unless_forced()) + .hidden(hidden_unless_forced()), ) .arg( Arg::with_name("derived_address_program_id") @@ -297,13 +306,13 @@ impl WalletSubCommands for App<'_, '_> { .takes_value(true) .value_name("PROGRAM_ID") .requires("derived_address_seed") - .hidden(hidden_unless_forced()) + .hidden(hidden_unless_forced()), ) .arg( Arg::with_name("allow_unfunded_recipient") .long("allow-unfunded-recipient") .takes_value(false) - .help("Complete the transfer even if the recipient address is not funded") + .help("Complete the transfer even if the recipient address is not funded"), ) .offline_args() .nonce_args(false) @@ -320,7 +329,7 @@ impl WalletSubCommands for App<'_, '_> { .takes_value(true) .value_name("STRING") .required(true) - .help("The message text to be signed") + .help("The message text to be signed"), ) .arg( Arg::with_name("version") @@ -331,10 +340,10 @@ impl WalletSubCommands for App<'_, '_> { .default_value("0") .validator(|p| match p.parse::() { Err(_) => Err(String::from("Must be unsigned integer")), - Ok(_) => { Ok(()) } + Ok(_) => Ok(()), }) - .help("The off-chain message version") - ) + .help("The off-chain message version"), + ), ) .subcommand( SubCommand::with_name("verify-offchain-signature") @@ -345,7 +354,7 @@ impl WalletSubCommands for App<'_, '_> { .takes_value(true) .value_name("STRING") .required(true) - .help("The text of the original message") + .help("The text of the original message"), ) .arg( Arg::with_name("signature") @@ -353,7 +362,7 @@ impl WalletSubCommands for App<'_, '_> { .value_name("SIGNATURE") .takes_value(true) .required(true) - .help("The message signature to verify") + .help("The message signature to verify"), ) .arg( Arg::with_name("version") @@ -364,17 +373,17 @@ impl WalletSubCommands for App<'_, '_> { .default_value("0") .validator(|p| match p.parse::() { Err(_) => Err(String::from("Must be unsigned integer")), - Ok(_) => { Ok(()) } + Ok(_) => Ok(()), }) - .help("The off-chain message version") + .help("The off-chain message version"), ) - .arg( - pubkey!(Arg::with_name("signer") + .arg(pubkey!( + Arg::with_name("signer") .long("signer") .value_name("PUBKEY") .required(false), - "The pubkey of the message signer (if different from config default)") - ) + "Message signer [default: cli config keypair]." + )), ) } } @@ -889,9 +898,8 @@ pub fn process_transfer( .value; if recipient_balance == 0 { return Err(format!( - "The recipient address ({to}) is not funded. \ - Add `--allow-unfunded-recipient` to complete the transfer \ - " + "The recipient address ({to}) is not funded. Add `--allow-unfunded-recipient` to \ + complete the transfer " ) .into()); } diff --git a/cli/tests/program.rs b/cli/tests/program.rs index de5ef8cd01319f..ac937ef0d2d6cd 100644 --- a/cli/tests/program.rs +++ b/cli/tests/program.rs @@ -1,4 +1,6 @@ #![allow(clippy::arithmetic_side_effects)] +// REMOVE once https://github.com/rust-lang/rust-clippy/issues/11153 is fixed +#![allow(clippy::items_after_test_module)] use { serde_json::Value, @@ -7,19 +9,29 @@ use { program::{ProgramCliCommand, CLOSE_PROGRAM_WARNING}, test_utils::wait_n_slots, }, - solana_cli_output::OutputFormat, + solana_cli_output::{parse_sign_only_reply_string, OutputFormat}, solana_faucet::faucet::run_local_faucet, solana_rpc_client::rpc_client::RpcClient, + solana_rpc_client_nonce_utils::blockhash_query::BlockhashQuery, solana_sdk::{ + account::is_executable, account_utils::StateMut, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, commitment_config::CommitmentConfig, + feature_set::FeatureSet, pubkey::Pubkey, - signature::{Keypair, Signer}, + signature::{Keypair, NullSigner, Signer}, }, solana_streamer::socket::SocketAddrSpace, solana_test_validator::TestValidator, - std::{env, fs::File, io::Read, path::PathBuf, str::FromStr}, + std::{ + env, + fs::File, + io::Read, + path::{Path, PathBuf}, + str::FromStr, + }, + test_case::test_case, }; #[test] @@ -65,6 +77,7 @@ fn test_cli_program_deploy_non_upgradeable() { config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: None, program_pubkey: None, buffer_signer_index: None, @@ -89,7 +102,8 @@ fn test_cli_program_deploy_non_upgradeable() { let account0 = rpc_client.get_account(&program_id).unwrap(); assert_eq!(account0.lamports, minimum_balance_for_program); assert_eq!(account0.owner, bpf_loader_upgradeable::id()); - assert!(account0.executable); + assert!(is_executable(&account0, &FeatureSet::all_enabled())); + let (programdata_pubkey, _) = Pubkey::find_program_address(&[program_id.as_ref()], &bpf_loader_upgradeable::id()); let programdata_account = rpc_client.get_account(&programdata_pubkey).unwrap(); @@ -98,7 +112,10 @@ fn test_cli_program_deploy_non_upgradeable() { minimum_balance_for_programdata ); assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id()); - assert!(!programdata_account.executable); + assert!(!is_executable( + &programdata_account, + &FeatureSet::all_enabled() + )); assert_eq!( programdata_account.data[UpgradeableLoaderState::size_of_programdata_metadata()..], program_data[..] @@ -109,6 +126,7 @@ fn test_cli_program_deploy_non_upgradeable() { config.signers = vec![&keypair, &custom_address_keypair]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: Some(1), program_pubkey: None, buffer_signer_index: None, @@ -125,7 +143,7 @@ fn test_cli_program_deploy_non_upgradeable() { .unwrap(); assert_eq!(account1.lamports, minimum_balance_for_program); assert_eq!(account1.owner, bpf_loader_upgradeable::id()); - assert!(account1.executable); + assert!(is_executable(&account1, &FeatureSet::all_enabled())); let (programdata_pubkey, _) = Pubkey::find_program_address( &[custom_address_keypair.pubkey().as_ref()], &bpf_loader_upgradeable::id(), @@ -136,7 +154,10 @@ fn test_cli_program_deploy_non_upgradeable() { minimum_balance_for_programdata ); assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id()); - assert!(!programdata_account.executable); + assert!(!is_executable( + &programdata_account, + &FeatureSet::all_enabled() + )); assert_eq!( programdata_account.data[UpgradeableLoaderState::size_of_programdata_metadata()..], program_data[..] @@ -163,6 +184,7 @@ fn test_cli_program_deploy_non_upgradeable() { config.signers = vec![&keypair, &custom_address_keypair]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: Some(1), program_pubkey: None, buffer_signer_index: None, @@ -185,6 +207,7 @@ fn test_cli_program_deploy_non_upgradeable() { // Use forcing parameter to deploy to account with excess balance config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: Some(1), program_pubkey: None, buffer_signer_index: None, @@ -245,6 +268,7 @@ fn test_cli_program_deploy_no_authority() { config.signers = vec![&keypair, &upgrade_authority]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: None, program_pubkey: None, buffer_signer_index: None, @@ -271,6 +295,7 @@ fn test_cli_program_deploy_no_authority() { config.signers = vec![&keypair, &upgrade_authority]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: None, program_pubkey: Some(program_id), buffer_signer_index: None, @@ -332,6 +357,7 @@ fn test_cli_program_deploy_with_authority() { config.signers = vec![&keypair, &upgrade_authority, &program_keypair]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: Some(2), program_pubkey: Some(program_keypair.pubkey()), buffer_signer_index: None, @@ -359,7 +385,7 @@ fn test_cli_program_deploy_with_authority() { let program_account = rpc_client.get_account(&program_keypair.pubkey()).unwrap(); assert_eq!(program_account.lamports, minimum_balance_for_program); assert_eq!(program_account.owner, bpf_loader_upgradeable::id()); - assert!(program_account.executable); + assert!(is_executable(&program_account, &FeatureSet::all_enabled())); let (programdata_pubkey, _) = Pubkey::find_program_address( &[program_keypair.pubkey().as_ref()], &bpf_loader_upgradeable::id(), @@ -370,7 +396,10 @@ fn test_cli_program_deploy_with_authority() { minimum_balance_for_programdata ); assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id()); - assert!(!programdata_account.executable); + assert!(!is_executable( + &programdata_account, + &FeatureSet::all_enabled() + )); assert_eq!( programdata_account.data[UpgradeableLoaderState::size_of_programdata_metadata()..], program_data[..] @@ -380,6 +409,7 @@ fn test_cli_program_deploy_with_authority() { config.signers = vec![&keypair, &upgrade_authority]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: None, program_pubkey: None, buffer_signer_index: None, @@ -403,7 +433,7 @@ fn test_cli_program_deploy_with_authority() { let program_account = rpc_client.get_account(&program_pubkey).unwrap(); assert_eq!(program_account.lamports, minimum_balance_for_program); assert_eq!(program_account.owner, bpf_loader_upgradeable::id()); - assert!(program_account.executable); + assert!(is_executable(&program_account, &FeatureSet::all_enabled())); let (programdata_pubkey, _) = Pubkey::find_program_address(&[program_pubkey.as_ref()], &bpf_loader_upgradeable::id()); let programdata_account = rpc_client.get_account(&programdata_pubkey).unwrap(); @@ -412,7 +442,10 @@ fn test_cli_program_deploy_with_authority() { minimum_balance_for_programdata ); assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id()); - assert!(!programdata_account.executable); + assert!(!is_executable( + &programdata_account, + &FeatureSet::all_enabled() + )); assert_eq!( programdata_account.data[UpgradeableLoaderState::size_of_programdata_metadata()..], program_data[..] @@ -422,6 +455,7 @@ fn test_cli_program_deploy_with_authority() { config.signers = vec![&keypair, &upgrade_authority]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: None, program_pubkey: Some(program_pubkey), buffer_signer_index: None, @@ -436,7 +470,7 @@ fn test_cli_program_deploy_with_authority() { let program_account = rpc_client.get_account(&program_pubkey).unwrap(); assert_eq!(program_account.lamports, minimum_balance_for_program); assert_eq!(program_account.owner, bpf_loader_upgradeable::id()); - assert!(program_account.executable); + assert!(is_executable(&program_account, &FeatureSet::all_enabled())); let (programdata_pubkey, _) = Pubkey::find_program_address(&[program_pubkey.as_ref()], &bpf_loader_upgradeable::id()); let programdata_account = rpc_client.get_account(&programdata_pubkey).unwrap(); @@ -445,7 +479,10 @@ fn test_cli_program_deploy_with_authority() { minimum_balance_for_programdata ); assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id()); - assert!(!programdata_account.executable); + assert!(!is_executable( + &programdata_account, + &FeatureSet::all_enabled() + )); assert_eq!( programdata_account.data[UpgradeableLoaderState::size_of_programdata_metadata()..], program_data[..] @@ -477,6 +514,7 @@ fn test_cli_program_deploy_with_authority() { config.signers = vec![&keypair, &new_upgrade_authority]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: None, program_pubkey: Some(program_pubkey), buffer_signer_index: None, @@ -491,7 +529,7 @@ fn test_cli_program_deploy_with_authority() { let program_account = rpc_client.get_account(&program_pubkey).unwrap(); assert_eq!(program_account.lamports, minimum_balance_for_program); assert_eq!(program_account.owner, bpf_loader_upgradeable::id()); - assert!(program_account.executable); + assert!(is_executable(&program_account, &FeatureSet::all_enabled())); let (programdata_pubkey, _) = Pubkey::find_program_address(&[program_pubkey.as_ref()], &bpf_loader_upgradeable::id()); let programdata_account = rpc_client.get_account(&programdata_pubkey).unwrap(); @@ -500,7 +538,10 @@ fn test_cli_program_deploy_with_authority() { minimum_balance_for_programdata ); assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id()); - assert!(!programdata_account.executable); + assert!(!is_executable( + &programdata_account, + &FeatureSet::all_enabled() + )); assert_eq!( programdata_account.data[UpgradeableLoaderState::size_of_programdata_metadata()..], program_data[..] @@ -552,6 +593,7 @@ fn test_cli_program_deploy_with_authority() { config.signers = vec![&keypair, &new_upgrade_authority]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: None, program_pubkey: Some(program_pubkey), buffer_signer_index: None, @@ -568,6 +610,7 @@ fn test_cli_program_deploy_with_authority() { config.signers = vec![&keypair, &new_upgrade_authority]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: None, program_pubkey: None, buffer_signer_index: None, @@ -671,6 +714,7 @@ fn test_cli_program_close_program() { config.signers = vec![&keypair, &upgrade_authority, &program_keypair]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: Some(2), program_pubkey: Some(program_keypair.pubkey()), buffer_signer_index: None, @@ -726,6 +770,91 @@ fn test_cli_program_close_program() { assert_eq!(programdata_lamports, recipient_account.lamports); } +#[test] +fn test_cli_program_extend_program() { + solana_logger::setup(); + + let mut noop_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + noop_path.push("tests"); + noop_path.push("fixtures"); + noop_path.push("noop"); + noop_path.set_extension("so"); + + let mint_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let faucet_addr = run_local_faucet(mint_keypair, None); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); + + let rpc_client = + RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); + + let mut file = File::open(noop_path.to_str().unwrap()).unwrap(); + let mut program_data = Vec::new(); + file.read_to_end(&mut program_data).unwrap(); + let max_len = program_data.len(); + let minimum_balance_for_programdata = rpc_client + .get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_programdata( + max_len, + )) + .unwrap(); + let minimum_balance_for_program = rpc_client + .get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_program()) + .unwrap(); + let upgrade_authority = Keypair::new(); + + let mut config = CliConfig::recent_for_tests(); + let keypair = Keypair::new(); + config.json_rpc_url = test_validator.rpc_url(); + config.signers = vec![&keypair]; + config.command = CliCommand::Airdrop { + pubkey: None, + lamports: 100 * minimum_balance_for_programdata + minimum_balance_for_program, + }; + process_command(&config).unwrap(); + + // Deploy the upgradeable program + let program_keypair = Keypair::new(); + config.signers = vec![&keypair, &upgrade_authority, &program_keypair]; + config.command = CliCommand::Program(ProgramCliCommand::Deploy { + program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, + program_signer_index: Some(2), + program_pubkey: Some(program_keypair.pubkey()), + buffer_signer_index: None, + buffer_pubkey: None, + allow_excessive_balance: false, + upgrade_authority_signer_index: 1, + is_final: false, + max_len: Some(max_len), + skip_fee_check: false, + }); + config.output_format = OutputFormat::JsonCompact; + process_command(&config).unwrap(); + + let (programdata_pubkey, _) = Pubkey::find_program_address( + &[program_keypair.pubkey().as_ref()], + &bpf_loader_upgradeable::id(), + ); + + // Wait one slot to avoid "Program was deployed in this block already" error + wait_n_slots(&rpc_client, 1); + + // Extend program + let additional_bytes = 100; + config.signers = vec![&keypair]; + config.command = CliCommand::Program(ProgramCliCommand::ExtendProgram { + program_pubkey: program_keypair.pubkey(), + additional_bytes, + }); + process_command(&config).unwrap(); + + let programdata_account = rpc_client.get_account(&programdata_pubkey).unwrap(); + let expected_len = + UpgradeableLoaderState::size_of_programdata(max_len + additional_bytes as usize); + assert_eq!(expected_len, programdata_account.data.len()); +} + #[test] fn test_cli_program_write_buffer() { solana_logger::setup(); @@ -780,6 +909,7 @@ fn test_cli_program_write_buffer() { config.signers = vec![&keypair]; config.command = CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: noop_path.to_str().unwrap().to_string(), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, buffer_authority_signer_index: 0, @@ -815,6 +945,7 @@ fn test_cli_program_write_buffer() { config.signers = vec![&keypair, &buffer_keypair]; config.command = CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: noop_path.to_str().unwrap().to_string(), + fee_payer_signer_index: 0, buffer_signer_index: Some(1), buffer_pubkey: Some(buffer_keypair.pubkey()), buffer_authority_signer_index: 0, @@ -877,6 +1008,7 @@ fn test_cli_program_write_buffer() { config.signers = vec![&keypair, &buffer_keypair, &authority_keypair]; config.command = CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: noop_path.to_str().unwrap().to_string(), + fee_payer_signer_index: 0, buffer_signer_index: Some(1), buffer_pubkey: Some(buffer_keypair.pubkey()), buffer_authority_signer_index: 2, @@ -915,6 +1047,7 @@ fn test_cli_program_write_buffer() { config.signers = vec![&keypair, &buffer_keypair, &authority_keypair]; config.command = CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: noop_path.to_str().unwrap().to_string(), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, buffer_authority_signer_index: 2, @@ -989,6 +1122,7 @@ fn test_cli_program_write_buffer() { config.signers = vec![&keypair]; config.command = CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: noop_path.to_str().unwrap().to_string(), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, buffer_authority_signer_index: 0, @@ -1030,6 +1164,7 @@ fn test_cli_program_write_buffer() { config.signers = vec![&keypair, &buffer_keypair]; config.command = CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: noop_path.to_str().unwrap().to_string(), + fee_payer_signer_index: 0, buffer_signer_index: Some(1), buffer_pubkey: Some(buffer_keypair.pubkey()), buffer_authority_signer_index: 0, @@ -1040,6 +1175,7 @@ fn test_cli_program_write_buffer() { config.signers = vec![&keypair, &buffer_keypair]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_large_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: None, program_pubkey: None, buffer_signer_index: Some(1), @@ -1102,6 +1238,7 @@ fn test_cli_program_set_buffer_authority() { config.signers = vec![&keypair, &buffer_keypair]; config.command = CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: noop_path.to_str().unwrap().to_string(), + fee_payer_signer_index: 0, buffer_signer_index: Some(1), buffer_pubkey: Some(buffer_keypair.pubkey()), buffer_authority_signer_index: 0, @@ -1116,7 +1253,7 @@ fn test_cli_program_set_buffer_authority() { panic!("not a buffer account"); } - // Set new authority + // Set new buffer authority let new_buffer_authority = Keypair::new(); config.signers = vec![&keypair, &buffer_keypair]; config.command = CliCommand::Program(ProgramCliCommand::SetBufferAuthority { @@ -1145,7 +1282,25 @@ fn test_cli_program_set_buffer_authority() { panic!("not a buffer account"); } - // Set authority to buffer + // Attempt to deploy program from buffer using previous authority (should fail) + config.signers = vec![&keypair, &buffer_keypair]; + config.command = CliCommand::Program(ProgramCliCommand::Deploy { + program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, + program_signer_index: None, + program_pubkey: None, + buffer_signer_index: None, + buffer_pubkey: Some(buffer_keypair.pubkey()), + allow_excessive_balance: false, + upgrade_authority_signer_index: 0, + is_final: false, + max_len: None, + skip_fee_check: false, + }); + config.output_format = OutputFormat::JsonCompact; + process_command(&config).unwrap_err(); + + // Set buffer authority to the buffer identity (it's a common way for program devs to do so) config.signers = vec![&keypair, &new_buffer_authority]; config.command = CliCommand::Program(ProgramCliCommand::SetBufferAuthority { buffer_pubkey: buffer_keypair.pubkey(), @@ -1171,6 +1326,24 @@ fn test_cli_program_set_buffer_authority() { } else { panic!("not a buffer account"); } + + // Deploy from buffer using proper(new) buffer authority + config.signers = vec![&keypair, &buffer_keypair]; + config.command = CliCommand::Program(ProgramCliCommand::Deploy { + program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, + program_signer_index: None, + program_pubkey: None, + buffer_signer_index: None, + buffer_pubkey: Some(buffer_keypair.pubkey()), + allow_excessive_balance: false, + upgrade_authority_signer_index: 1, + is_final: false, + max_len: None, + skip_fee_check: false, + }); + config.output_format = OutputFormat::JsonCompact; + process_command(&config).unwrap(); } #[test] @@ -1218,6 +1391,7 @@ fn test_cli_program_mismatch_buffer_authority() { config.signers = vec![&keypair, &buffer_keypair, &buffer_authority]; config.command = CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: noop_path.to_str().unwrap().to_string(), + fee_payer_signer_index: 0, buffer_signer_index: Some(1), buffer_pubkey: Some(buffer_keypair.pubkey()), buffer_authority_signer_index: 2, @@ -1237,6 +1411,7 @@ fn test_cli_program_mismatch_buffer_authority() { config.signers = vec![&keypair, &upgrade_authority]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: None, program_pubkey: None, buffer_signer_index: None, @@ -1253,6 +1428,7 @@ fn test_cli_program_mismatch_buffer_authority() { config.signers = vec![&keypair, &buffer_authority]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: None, program_pubkey: None, buffer_signer_index: None, @@ -1266,6 +1442,198 @@ fn test_cli_program_mismatch_buffer_authority() { process_command(&config).unwrap(); } +// Assume fee payer will be either online signer or offline signer (could be completely +// separate signer too, but that option is unlikely to be chosen often, so don't bother +// testing for it), we want to test for most common choices. +#[test_case(true; "offline signer will be fee payer")] +#[test_case(false; "online signer will be fee payer")] +fn test_cli_program_deploy_with_offline_signing(use_offline_signer_as_fee_payer: bool) { + solana_logger::setup(); + + let mut noop_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + noop_path.push("tests"); + noop_path.push("fixtures"); + noop_path.push("noop"); + noop_path.set_extension("so"); + + let mut noop_large_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + noop_large_path.push("tests"); + noop_large_path.push("fixtures"); + noop_large_path.push("noop_large"); + noop_large_path.set_extension("so"); + + let mint_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let faucet_addr = run_local_faucet(mint_keypair, None); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); + + let rpc_client = + RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); + + let blockhash = rpc_client.get_latest_blockhash().unwrap(); + + let mut file = File::open(noop_large_path.to_str().unwrap()).unwrap(); + let mut large_program_data = Vec::new(); + file.read_to_end(&mut large_program_data).unwrap(); + let max_program_data_len = large_program_data.len(); + let minimum_balance_for_large_buffer = rpc_client + .get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_programdata( + max_program_data_len, + )) + .unwrap(); + + let mut config = CliConfig::recent_for_tests(); + config.json_rpc_url = test_validator.rpc_url(); + + let online_signer = Keypair::new(); + let online_signer_identity = NullSigner::new(&online_signer.pubkey()); + let offline_signer = Keypair::new(); + let buffer_signer = Keypair::new(); + // Typically, keypair for program signer should be different from online signer or + // offline signer keypairs. + let program_signer = Keypair::new(); + + config.command = CliCommand::Airdrop { + pubkey: None, + lamports: 100 * minimum_balance_for_large_buffer, // gotta be enough for this test + }; + config.signers = vec![&online_signer]; + process_command(&config).unwrap(); + config.command = CliCommand::Airdrop { + pubkey: None, + lamports: 100 * minimum_balance_for_large_buffer, // gotta be enough for this test + }; + config.signers = vec![&offline_signer]; + process_command(&config).unwrap(); + + // Deploy upgradeable program with authority set to offline signer + config.signers = vec![&online_signer, &offline_signer, &program_signer]; + config.command = CliCommand::Program(ProgramCliCommand::Deploy { + program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, + program_signer_index: Some(2), + program_pubkey: Some(program_signer.pubkey()), + buffer_signer_index: None, + buffer_pubkey: None, + allow_excessive_balance: false, + upgrade_authority_signer_index: 1, // must be offline signer for security reasons + is_final: false, + max_len: Some(max_program_data_len), // allows for larger program size with future upgrades + skip_fee_check: false, + }); + config.output_format = OutputFormat::JsonCompact; + process_command(&config).unwrap(); + + // Prepare buffer to upgrade deployed program to a larger program + create_buffer_with_offline_authority( + &rpc_client, + &noop_large_path, + &mut config, + &online_signer, + &offline_signer, + &buffer_signer, + ); + + // Offline sign-only with signature over "wrong" message (with different buffer) + config.signers = vec![&offline_signer]; + let fee_payer_signer_index = if use_offline_signer_as_fee_payer { + 0 // offline signer + } else { + config.signers.push(&online_signer_identity); // can't (and won't) provide signature in --sign-only mode + 1 // online signer + }; + config.command = CliCommand::Program(ProgramCliCommand::Upgrade { + fee_payer_signer_index, + program_pubkey: program_signer.pubkey(), + buffer_pubkey: program_signer.pubkey(), // will ensure offline signature applies to wrong(different) message + upgrade_authority_signer_index: 0, + sign_only: true, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::new(Some(blockhash), true, None), + }); + config.output_format = OutputFormat::JsonCompact; + let sig_response = process_command(&config).unwrap(); + let sign_only = parse_sign_only_reply_string(&sig_response); + let offline_pre_signer = sign_only.presigner_of(&offline_signer.pubkey()).unwrap(); + // Attempt to deploy from buffer using signature over wrong(different) message (should fail) + config.signers = vec![&offline_pre_signer, &program_signer]; + let fee_payer_signer_index = if use_offline_signer_as_fee_payer { + 0 // offline signer + } else { + config.signers.push(&online_signer); // can provide signature when not in --sign-only mode + 2 // online signer + }; + config.command = CliCommand::Program(ProgramCliCommand::Upgrade { + fee_payer_signer_index, + program_pubkey: program_signer.pubkey(), + buffer_pubkey: buffer_signer.pubkey(), + upgrade_authority_signer_index: 0, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::new(Some(blockhash), true, None), + }); + config.output_format = OutputFormat::JsonCompact; + let error = process_command(&config).unwrap_err(); + assert_eq!(error.to_string(), "presigner error"); + + // Offline sign-only with online signer as fee payer (correct signature for program upgrade) + config.signers = vec![&offline_signer]; + let fee_payer_signer_index = if use_offline_signer_as_fee_payer { + 0 // offline signer + } else { + config.signers.push(&online_signer_identity); // can't (and won't) provide signature in --sign-only mode + 1 // online signer + }; + config.command = CliCommand::Program(ProgramCliCommand::Upgrade { + fee_payer_signer_index, + program_pubkey: program_signer.pubkey(), + buffer_pubkey: buffer_signer.pubkey(), + upgrade_authority_signer_index: 0, + sign_only: true, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::new(Some(blockhash), true, None), + }); + config.output_format = OutputFormat::JsonCompact; + let sig_response = process_command(&config).unwrap(); + let sign_only = parse_sign_only_reply_string(&sig_response); + let offline_pre_signer = sign_only.presigner_of(&offline_signer.pubkey()).unwrap(); + // Attempt to deploy from buffer using signature over correct message (should succeed) + config.signers = vec![&offline_pre_signer, &program_signer]; + let fee_payer_signer_index = if use_offline_signer_as_fee_payer { + 0 // offline signer + } else { + config.signers.push(&online_signer); // can provide signature when not in --sign-only mode + 2 // online signer + }; + config.command = CliCommand::Program(ProgramCliCommand::Upgrade { + fee_payer_signer_index, + program_pubkey: program_signer.pubkey(), + buffer_pubkey: buffer_signer.pubkey(), + upgrade_authority_signer_index: 0, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::new(Some(blockhash), true, None), + }); + config.output_format = OutputFormat::JsonCompact; + process_command(&config).unwrap(); + let (programdata_pubkey, _) = Pubkey::find_program_address( + &[program_signer.pubkey().as_ref()], + &bpf_loader_upgradeable::id(), + ); + let programdata_account = rpc_client.get_account(&programdata_pubkey).unwrap(); + assert_eq!( + programdata_account.lamports, + minimum_balance_for_large_buffer + ); + assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id()); + assert!(!programdata_account.executable); + assert_eq!( + programdata_account.data[UpgradeableLoaderState::size_of_programdata_metadata()..], + large_program_data[..] + ); +} + #[test] fn test_cli_program_show() { solana_logger::setup(); @@ -1314,6 +1682,7 @@ fn test_cli_program_show() { config.signers = vec![&keypair, &buffer_keypair, &authority_keypair]; config.command = CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: noop_path.to_str().unwrap().to_string(), + fee_payer_signer_index: 0, buffer_signer_index: Some(1), buffer_pubkey: Some(buffer_keypair.pubkey()), buffer_authority_signer_index: 2, @@ -1370,6 +1739,7 @@ fn test_cli_program_show() { config.signers = vec![&keypair, &authority_keypair, &program_keypair]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: Some(2), program_pubkey: Some(program_keypair.pubkey()), buffer_signer_index: None, @@ -1501,6 +1871,7 @@ fn test_cli_program_dump() { config.signers = vec![&keypair, &buffer_keypair, &authority_keypair]; config.command = CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: noop_path.to_str().unwrap().to_string(), + fee_payer_signer_index: 0, buffer_signer_index: Some(1), buffer_pubkey: Some(buffer_keypair.pubkey()), buffer_authority_signer_index: 2, @@ -1530,3 +1901,47 @@ fn test_cli_program_dump() { assert_eq!(program_data[i], out_data[i]); } } + +fn create_buffer_with_offline_authority<'a>( + rpc_client: &RpcClient, + program_path: &Path, + config: &mut CliConfig<'a>, + online_signer: &'a Keypair, + offline_signer: &'a Keypair, + buffer_signer: &'a Keypair, +) { + // Write a buffer + config.signers = vec![online_signer, buffer_signer]; + config.command = CliCommand::Program(ProgramCliCommand::WriteBuffer { + program_location: program_path.to_str().unwrap().to_string(), + fee_payer_signer_index: 0, + buffer_signer_index: Some(1), + buffer_pubkey: Some(buffer_signer.pubkey()), + buffer_authority_signer_index: 0, + max_len: None, + skip_fee_check: false, + }); + process_command(config).unwrap(); + let buffer_account = rpc_client.get_account(&buffer_signer.pubkey()).unwrap(); + if let UpgradeableLoaderState::Buffer { authority_address } = buffer_account.state().unwrap() { + assert_eq!(authority_address, Some(online_signer.pubkey())); + } else { + panic!("not a buffer account"); + } + + // Set buffer authority to offline signer + config.signers = vec![online_signer]; + config.command = CliCommand::Program(ProgramCliCommand::SetBufferAuthority { + buffer_pubkey: buffer_signer.pubkey(), + buffer_authority_index: Some(0), + new_buffer_authority: offline_signer.pubkey(), + }); + config.output_format = OutputFormat::JsonCompact; + process_command(config).unwrap(); + let buffer_account = rpc_client.get_account(&buffer_signer.pubkey()).unwrap(); + if let UpgradeableLoaderState::Buffer { authority_address } = buffer_account.state().unwrap() { + assert_eq!(authority_address, Some(offline_signer.pubkey())); + } else { + panic!("not a buffer account"); + } +} diff --git a/client-test/Cargo.toml b/client-test/Cargo.toml index 5a88e4d114fb84..514d99ada1ca10 100644 --- a/client-test/Cargo.toml +++ b/client-test/Cargo.toml @@ -37,6 +37,7 @@ tungstenite = { workspace = true, features = ["rustls-tls-webpki-roots"] } [dev-dependencies] solana-logger = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/src/connection_cache.rs b/client/src/connection_cache.rs index 44673c06f4d087..216687aecf916c 100644 --- a/client/src/connection_cache.rs +++ b/client/src/connection_cache.rs @@ -9,7 +9,10 @@ use { }, }, solana_quic_client::{QuicConfig, QuicConnectionManager, QuicPool}, - solana_sdk::{pubkey::Pubkey, signature::Keypair, transport::Result as TransportResult}, + solana_sdk::{ + pubkey::Pubkey, quic::NotifyKeyUpdate, signature::Keypair, + transport::Result as TransportResult, + }, solana_streamer::streamer::StakedNodes, solana_udp_client::{UdpConfig, UdpConnectionManager, UdpPool}, std::{ @@ -43,6 +46,15 @@ pub enum NonblockingClientConnection { Udp(Arc<::NonblockingClientConnection>), } +impl NotifyKeyUpdate for ConnectionCache { + fn update_key(&self, key: &Keypair) -> Result<(), Box> { + match self { + Self::Udp(_) => Ok(()), + Self::Quic(backend) => backend.update_key(key), + } + } +} + impl ConnectionCache { pub fn new(name: &'static str) -> Self { if DEFAULT_CONNECTION_CACHE_USE_QUIC { @@ -64,7 +76,7 @@ impl ConnectionCache { Self::new_with_client_options(name, connection_pool_size, None, None, None) } - /// Create a quic conneciton_cache with more client options + /// Create a quic connection_cache with more client options pub fn new_with_client_options( name: &'static str, connection_pool_size: usize, @@ -217,7 +229,8 @@ mod tests { crossbeam_channel::unbounded, solana_sdk::{net::DEFAULT_TPU_COALESCE, signature::Keypair}, solana_streamer::{ - nonblocking::quic::DEFAULT_WAIT_FOR_CHUNK_TIMEOUT, streamer::StakedNodes, + nonblocking::quic::DEFAULT_WAIT_FOR_CHUNK_TIMEOUT, quic::SpawnServerResult, + streamer::StakedNodes, }, std::{ net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, @@ -245,7 +258,11 @@ mod tests { let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); - let (response_recv_endpoint, response_recv_thread) = solana_streamer::quic::spawn_server( + let SpawnServerResult { + endpoint: response_recv_endpoint, + thread: response_recv_thread, + key_updater: _, + } = solana_streamer::quic::spawn_server( "quic_streamer_test", response_recv_socket, &keypair2, diff --git a/client/src/send_and_confirm_transactions_in_parallel.rs b/client/src/send_and_confirm_transactions_in_parallel.rs index 70f79eba192389..9185ed790c39b1 100644 --- a/client/src/send_and_confirm_transactions_in_parallel.rs +++ b/client/src/send_and_confirm_transactions_in_parallel.rs @@ -102,7 +102,7 @@ fn create_blockhash_data_updating_task( fn create_transaction_confirmation_task( rpc_client: Arc, current_block_height: Arc, - unconfirmed_transasction_map: Arc>, + unconfirmed_transaction_map: Arc>, errors_map: Arc>, num_confirmed_transactions: Arc, ) -> JoinHandle<()> { @@ -111,9 +111,9 @@ fn create_transaction_confirmation_task( let mut last_block_height = current_block_height.load(Ordering::Relaxed); loop { - if !unconfirmed_transasction_map.is_empty() { + if !unconfirmed_transaction_map.is_empty() { let current_block_height = current_block_height.load(Ordering::Relaxed); - let transactions_to_verify: Vec = unconfirmed_transasction_map + let transactions_to_verify: Vec = unconfirmed_transaction_map .iter() .filter(|x| { let is_not_expired = current_block_height <= x.last_valid_block_height; @@ -135,7 +135,7 @@ fn create_transaction_confirmation_task( status.satisfies_commitment(rpc_client.commitment()) }) .and_then(|status| { - unconfirmed_transasction_map + unconfirmed_transaction_map .remove(signature) .map(|(_, data)| (status, data)) }) @@ -158,7 +158,7 @@ fn create_transaction_confirmation_task( #[derive(Clone, Debug)] struct SendingContext { - unconfirmed_transasction_map: Arc>, + unconfirmed_transaction_map: Arc>, error_map: Arc>, blockhash_data_rw: Arc>, num_confirmed_transactions: Arc, @@ -249,7 +249,7 @@ async fn sign_all_messages_and_send( transaction .try_sign(signers, blockhashdata.blockhash) .expect("Transaction should be signable"); - let serialized_transaction = serialize(&transaction).expect("Transaction should serailize"); + let serialized_transaction = serialize(&transaction).expect("Transaction should serialize"); let signature = transaction.signatures[0]; futures.push( send_transaction_with_rpc_fallback( @@ -263,7 +263,7 @@ async fn sign_all_messages_and_send( ) .and_then(move |_| async move { // send to confirm the transaction - context.unconfirmed_transasction_map.insert( + context.unconfirmed_transaction_map.insert( signature, TransactionData { index: *index, @@ -300,11 +300,11 @@ async fn confirm_transactions_till_block_height_and_resend_unexpired_transaction tpu_client: &Option, context: &SendingContext, ) { - let unconfirmed_transasction_map = context.unconfirmed_transasction_map.clone(); + let unconfirmed_transaction_map = context.unconfirmed_transaction_map.clone(); let current_block_height = context.current_block_height.clone(); - let transactions_to_confirm = unconfirmed_transasction_map.len(); - let max_valid_block_height = unconfirmed_transasction_map + let transactions_to_confirm = unconfirmed_transaction_map.len(); + let max_valid_block_height = unconfirmed_transaction_map .iter() .map(|x| x.last_valid_block_height) .max(); @@ -321,7 +321,7 @@ async fn confirm_transactions_till_block_height_and_resend_unexpired_transaction } // wait till all transactions are confirmed or we have surpassed max processing age for the last sent transaction - while !unconfirmed_transasction_map.is_empty() + while !unconfirmed_transaction_map.is_empty() && current_block_height.load(Ordering::Relaxed) <= max_valid_block_height { let block_height = current_block_height.load(Ordering::Relaxed); @@ -339,7 +339,7 @@ async fn confirm_transactions_till_block_height_and_resend_unexpired_transaction let instant = Instant::now(); // retry sending transaction only over TPU port // any transactions sent over RPC will be automatically rebroadcast by the RPC server - let txs_to_resend_over_tpu = unconfirmed_transasction_map + let txs_to_resend_over_tpu = unconfirmed_transaction_map .iter() .filter(|x| block_height < x.last_valid_block_height) .map(|x| x.serialized_transaction.clone()) @@ -356,7 +356,7 @@ async fn confirm_transactions_till_block_height_and_resend_unexpired_transaction tokio::time::sleep(Duration::from_millis(100)).await; } if let Some(max_valid_block_height_in_remaining_transaction) = - unconfirmed_transasction_map + unconfirmed_transaction_map .iter() .map(|x| x.last_valid_block_height) .max() @@ -439,7 +439,7 @@ pub async fn send_and_confirm_transactions_in_parallel( let mut initial = true; let signing_count = config.resign_txs_count.unwrap_or(1); let context = SendingContext { - unconfirmed_transasction_map: unconfirmed_transasction_map.clone(), + unconfirmed_transaction_map: unconfirmed_transasction_map.clone(), blockhash_data_rw: blockhash_data_rw.clone(), num_confirmed_transactions: num_confirmed_transactions.clone(), current_block_height: current_block_height.clone(), diff --git a/connection-cache/src/connection_cache.rs b/connection-cache/src/connection_cache.rs index a674dccd7020fb..eed6991abf1b5a 100644 --- a/connection-cache/src/connection_cache.rs +++ b/connection-cache/src/connection_cache.rs @@ -9,7 +9,7 @@ use { log::*, rand::{thread_rng, Rng}, solana_measure::measure::Measure, - solana_sdk::timing::AtomicInterval, + solana_sdk::{signature::Keypair, timing::AtomicInterval}, std::{ net::SocketAddr, sync::{atomic::Ordering, Arc, RwLock}, @@ -38,6 +38,7 @@ pub trait ConnectionManager: Send + Sync + 'static { fn new_connection_pool(&self) -> Self::ConnectionPool; fn new_connection_config(&self) -> Self::NewConnectionConfig; + fn update_key(&self, _key: &Keypair) -> Result<(), Box>; } pub struct ConnectionCache< @@ -137,6 +138,11 @@ where .unwrap() } + pub fn update_key(&self, key: &Keypair) -> Result<(), Box> { + let mut map = self.map.write().unwrap(); + map.clear(); + self.connection_manager.update_key(key) + } /// Create a lazy connection object under the exclusive lock of the cache map if there is not /// enough used connections in the connection pool for the specified address. /// Returns CreateConnectionResult. @@ -636,6 +642,10 @@ mod tests { fn new_connection_config(&self) -> Self::NewConnectionConfig { MockUdpConfig::new().unwrap() } + + fn update_key(&self, _key: &Keypair) -> Result<(), Box> { + Ok(()) + } } impl BlockingClientConnection for MockUdpConnection { diff --git a/core/Cargo.toml b/core/Cargo.toml index 0bc1a3fe3770aa..bc1bd4549f6751 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -69,6 +69,7 @@ solana-streamer = { workspace = true } solana-tpu-client = { workspace = true } solana-transaction-status = { workspace = true } solana-turbine = { workspace = true } +solana-unified-scheduler-pool = { workspace = true } solana-version = { workspace = true } solana-vote = { workspace = true } solana-vote-program = { workspace = true } diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index bc22ae4774d798..242d3b0ed6b530 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -37,8 +37,7 @@ use { }, solana_poh::poh_recorder::{create_test_recorder, WorkingBankEntry}, solana_runtime::{ - bank::Bank, bank_forks::BankForks, installed_scheduler_pool::BankWithScheduler, - prioritization_fee_cache::PrioritizationFeeCache, + bank::Bank, bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache, }, solana_sdk::{ genesis_config::GenesisConfig, @@ -82,7 +81,9 @@ fn check_txs(receiver: &Arc>, ref_tx_count: usize) { #[bench] fn bench_consume_buffered(bencher: &mut Bencher) { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(100_000); - let bank = Arc::new(Bank::new_for_benches(&genesis_config)); + let bank = Bank::new_for_benches(&genesis_config) + .wrap_with_bank_forks_for_tests() + .0; let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Arc::new( Blockstore::open(ledger_path.path()).expect("Expected to be able to open database ledger"), @@ -392,8 +393,15 @@ fn simulate_process_entries( initial_lamports: u64, num_accounts: usize, ) { - let bank = Arc::new(Bank::new_for_benches(genesis_config)); - let bank = BankWithScheduler::new_without_scheduler(bank); + let bank = Bank::new_for_benches(genesis_config); + let slot = bank.slot(); + let bank_fork = BankForks::new_rw_arc(bank); + let bank = bank_fork.read().unwrap().get_with_scheduler(slot).unwrap(); + bank.clone_without_scheduler() + .loaded_programs_cache + .write() + .unwrap() + .set_fork_graph(bank_fork.clone()); for i in 0..(num_accounts / 2) { bank.transfer(initial_lamports, mint_keypair, &keypairs[i * 2].pubkey()) diff --git a/core/benches/consumer.rs b/core/benches/consumer.rs index 928758deb7f55a..735255d59cc160 100644 --- a/core/benches/consumer.rs +++ b/core/benches/consumer.rs @@ -115,7 +115,7 @@ fn setup(apply_cost_tracker_during_replay: bool) -> BenchFrame { bank.write_cost_tracker() .unwrap() .set_limits(std::u64::MAX, std::u64::MAX, std::u64::MAX); - let bank = Arc::new(bank); + let bank = bank.wrap_with_bank_forks_for_tests().0; let ledger_path = TempDir::new().unwrap(); let blockstore = Arc::new( diff --git a/core/src/admin_rpc_post_init.rs b/core/src/admin_rpc_post_init.rs index 110e1f5aa42b66..3acd0f84336113 100644 --- a/core/src/admin_rpc_post_init.rs +++ b/core/src/admin_rpc_post_init.rs @@ -1,7 +1,7 @@ use { solana_gossip::cluster_info::ClusterInfo, solana_runtime::bank_forks::BankForks, - solana_sdk::pubkey::Pubkey, + solana_sdk::{pubkey::Pubkey, quic::NotifyKeyUpdate}, std::{ collections::HashSet, sync::{Arc, RwLock}, @@ -14,4 +14,5 @@ pub struct AdminRpcRequestMetadataPostInit { pub bank_forks: Arc>, pub vote_account: Pubkey, pub repair_whitelist: Arc>>, + pub notifies: Vec>, } diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 19c3eb55eb675a..158614b32d7963 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -16,17 +16,26 @@ use { unprocessed_transaction_storage::{ThreadType, UnprocessedTransactionStorage}, }, crate::{ - banking_trace::BankingPacketReceiver, tracer_packet_stats::TracerPacketStats, + banking_stage::{ + consume_worker::ConsumeWorker, + packet_deserializer::PacketDeserializer, + transaction_scheduler::{ + prio_graph_scheduler::PrioGraphScheduler, + scheduler_controller::SchedulerController, scheduler_error::SchedulerError, + }, + }, + banking_trace::BankingPacketReceiver, + tracer_packet_stats::TracerPacketStats, validator::BlockProductionMethod, }, - crossbeam_channel::RecvTimeoutError, + crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender}, histogram::Histogram, solana_client::connection_cache::ConnectionCache, solana_gossip::cluster_info::ClusterInfo, solana_ledger::blockstore_processor::TransactionStatusSender, solana_measure::{measure, measure_us}, solana_perf::{data_budget::DataBudget, packet::PACKETS_PER_BATCH}, - solana_poh::poh_recorder::PohRecorder, + solana_poh::poh_recorder::{PohRecorder, TransactionRecorder}, solana_runtime::{bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache}, solana_sdk::timing::AtomicInterval, solana_vote::vote_sender_types::ReplayVoteSender, @@ -378,6 +387,20 @@ impl BankingStage { prioritization_fee_cache, ) } + BlockProductionMethod::CentralScheduler => Self::new_central_scheduler( + cluster_info, + poh_recorder, + non_vote_receiver, + tpu_vote_receiver, + gossip_vote_receiver, + num_threads, + transaction_status_sender, + replay_vote_sender, + log_messages_bytes_limit, + connection_cache, + bank_forks, + prioritization_fee_cache, + ), } } @@ -405,6 +428,15 @@ impl BankingStage { TOTAL_BUFFERED_PACKETS / ((num_threads - NUM_VOTE_PROCESSING_THREADS) as usize); // Keeps track of extraneous vote transactions for the vote threads let latest_unprocessed_votes = Arc::new(LatestUnprocessedVotes::new()); + + let decision_maker = DecisionMaker::new(cluster_info.id(), poh_recorder.clone()); + let committer = Committer::new( + transaction_status_sender.clone(), + replay_vote_sender.clone(), + prioritization_fee_cache.clone(), + ); + let transaction_recorder = poh_recorder.read().unwrap().new_recorder(); + // Many banks that process transactions in parallel. let bank_thread_hdls: Vec> = (0..num_threads) .map(|id| { @@ -432,16 +464,6 @@ impl BankingStage { ), }; - let mut packet_receiver = - PacketReceiver::new(id, packet_receiver, bank_forks.clone()); - let poh_recorder = poh_recorder.clone(); - - let committer = Committer::new( - transaction_status_sender.clone(), - replay_vote_sender.clone(), - prioritization_fee_cache.clone(), - ); - let decision_maker = DecisionMaker::new(cluster_info.id(), poh_recorder.clone()); let forwarder = Forwarder::new( poh_recorder.clone(), bank_forks.clone(), @@ -449,31 +471,179 @@ impl BankingStage { connection_cache.clone(), data_budget.clone(), ); - let consumer = Consumer::new( - committer, + + Self::spawn_thread_local_multi_iterator_thread( + id, + packet_receiver, + bank_forks.clone(), + decision_maker.clone(), + committer.clone(), + transaction_recorder.clone(), + log_messages_bytes_limit, + forwarder, + unprocessed_transaction_storage, + ) + }) + .collect(); + Self { bank_thread_hdls } + } + + #[allow(clippy::too_many_arguments)] + pub fn new_central_scheduler( + cluster_info: &Arc, + poh_recorder: &Arc>, + non_vote_receiver: BankingPacketReceiver, + tpu_vote_receiver: BankingPacketReceiver, + gossip_vote_receiver: BankingPacketReceiver, + num_threads: u32, + transaction_status_sender: Option, + replay_vote_sender: ReplayVoteSender, + log_messages_bytes_limit: Option, + connection_cache: Arc, + bank_forks: Arc>, + prioritization_fee_cache: &Arc, + ) -> Self { + assert!(num_threads >= MIN_TOTAL_THREADS); + // Single thread to generate entries from many banks. + // This thread talks to poh_service and broadcasts the entries once they have been recorded. + // Once an entry has been recorded, its blockhash is registered with the bank. + let data_budget = Arc::new(DataBudget::default()); + // Keeps track of extraneous vote transactions for the vote threads + let latest_unprocessed_votes = Arc::new(LatestUnprocessedVotes::new()); + + let decision_maker = DecisionMaker::new(cluster_info.id(), poh_recorder.clone()); + let committer = Committer::new( + transaction_status_sender.clone(), + replay_vote_sender.clone(), + prioritization_fee_cache.clone(), + ); + let transaction_recorder = poh_recorder.read().unwrap().new_recorder(); + + // + 1 for the central scheduler thread + let mut bank_thread_hdls = Vec::with_capacity(num_threads as usize + 1); + + // Spawn legacy voting threads first: 1 gossip, 1 tpu + for (id, packet_receiver, vote_source) in [ + (0, gossip_vote_receiver, VoteSource::Gossip), + (1, tpu_vote_receiver, VoteSource::Tpu), + ] { + bank_thread_hdls.push(Self::spawn_thread_local_multi_iterator_thread( + id, + packet_receiver, + bank_forks.clone(), + decision_maker.clone(), + committer.clone(), + transaction_recorder.clone(), + log_messages_bytes_limit, + Forwarder::new( + poh_recorder.clone(), + bank_forks.clone(), + cluster_info.clone(), + connection_cache.clone(), + data_budget.clone(), + ), + UnprocessedTransactionStorage::new_vote_storage( + latest_unprocessed_votes.clone(), + vote_source, + ), + )); + } + + // Create channels for communication between scheduler and workers + let num_workers = (num_threads).saturating_sub(NUM_VOTE_PROCESSING_THREADS); + let (work_senders, work_receivers): (Vec>, Vec>) = + (0..num_workers).map(|_| unbounded()).unzip(); + let (finished_work_sender, finished_work_receiver) = unbounded(); + + // Spawn the worker threads + let mut worker_metrics = Vec::with_capacity(num_workers as usize); + for (index, work_receiver) in work_receivers.into_iter().enumerate() { + let id = (index as u32).saturating_add(NUM_VOTE_PROCESSING_THREADS); + let consume_worker = ConsumeWorker::new( + id, + work_receiver, + Consumer::new( + committer.clone(), poh_recorder.read().unwrap().new_recorder(), QosService::new(id), log_messages_bytes_limit, - ); + ), + finished_work_sender.clone(), + poh_recorder.read().unwrap().new_leader_bank_notifier(), + ); + worker_metrics.push(consume_worker.metrics_handle()); + bank_thread_hdls.push( Builder::new() - .name(format!("solBanknStgTx{id:02}")) + .name(format!("solCoWorker{id:02}")) .spawn(move || { - Self::process_loop( - &mut packet_receiver, - &decision_maker, - &forwarder, - &consumer, - id, - unprocessed_transaction_storage, - ); + let _ = consume_worker.run(); }) - .unwrap() - }) - .collect(); + .unwrap(), + ) + } + + // Spawn the central scheduler thread + bank_thread_hdls.push({ + let packet_deserializer = + PacketDeserializer::new(non_vote_receiver, bank_forks.clone()); + let scheduler = PrioGraphScheduler::new(work_senders, finished_work_receiver); + let scheduler_controller = SchedulerController::new( + decision_maker.clone(), + packet_deserializer, + bank_forks, + scheduler, + worker_metrics, + ); + Builder::new() + .name("solBnkTxSched".to_string()) + .spawn(move || match scheduler_controller.run() { + Ok(_) => {} + Err(SchedulerError::DisconnectedRecvChannel(_)) => {} + Err(SchedulerError::DisconnectedSendChannel(_)) => { + warn!("Unexpected worker disconnect from scheduler") + } + }) + .unwrap() + }); + Self { bank_thread_hdls } } + fn spawn_thread_local_multi_iterator_thread( + id: u32, + packet_receiver: BankingPacketReceiver, + bank_forks: Arc>, + decision_maker: DecisionMaker, + committer: Committer, + transaction_recorder: TransactionRecorder, + log_messages_bytes_limit: Option, + forwarder: Forwarder, + unprocessed_transaction_storage: UnprocessedTransactionStorage, + ) -> JoinHandle<()> { + let mut packet_receiver = PacketReceiver::new(id, packet_receiver, bank_forks); + let consumer = Consumer::new( + committer, + transaction_recorder, + QosService::new(id), + log_messages_bytes_limit, + ); + + Builder::new() + .name(format!("solBanknStgTx{id:02}")) + .spawn(move || { + Self::process_loop( + &mut packet_receiver, + &decision_maker, + &forwarder, + &consumer, + id, + unprocessed_transaction_storage, + ) + }) + .unwrap() + } + #[allow(clippy::too_many_arguments)] fn process_buffered_packets( decision_maker: &DecisionMaker, @@ -631,9 +801,7 @@ mod tests { }, poh_service::PohService, }, - solana_runtime::{ - bank::Bank, bank_forks::BankForks, genesis_utils::bootstrap_validator_stake_lamports, - }, + solana_runtime::{bank::Bank, genesis_utils::bootstrap_validator_stake_lamports}, solana_sdk::{ hash::Hash, poh_config::PohConfig, @@ -669,9 +837,7 @@ mod tests { #[test] fn test_banking_stage_shutdown1() { let genesis_config = create_genesis_config(2).genesis_config; - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); - let bank_forks = BankForks::new_rw_arc(bank); - let bank = bank_forks.read().unwrap().get(0).unwrap(); + let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let banking_tracer = BankingTracer::new_disabled(); let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote(); let (tpu_vote_sender, tpu_vote_receiver) = banking_tracer.create_channel_tpu_vote(); @@ -721,9 +887,7 @@ mod tests { } = create_genesis_config(2); genesis_config.ticks_per_slot = 4; let num_extra_ticks = 2; - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); - let bank_forks = BankForks::new_rw_arc(bank); - let bank = bank_forks.read().unwrap().get(0).unwrap(); + let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let start_hash = bank.last_blockhash(); let banking_tracer = BankingTracer::new_disabled(); let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote(); @@ -793,17 +957,14 @@ mod tests { with_vers.into_iter().map(|(b, _)| b).collect() } - #[test] - fn test_banking_stage_entries_only() { + fn test_banking_stage_entries_only(block_production_method: BlockProductionMethod) { solana_logger::setup(); let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_slow_genesis_config(10); - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); - let bank_forks = BankForks::new_rw_arc(bank); - let bank = bank_forks.read().unwrap().get(0).unwrap(); + let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let start_hash = bank.last_blockhash(); let banking_tracer = BankingTracer::new_disabled(); let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote(); @@ -829,7 +990,7 @@ mod tests { let (replay_vote_sender, _replay_vote_receiver) = unbounded(); let banking_stage = BankingStage::new( - BlockProductionMethod::ThreadLocalMultiIterator, + block_production_method, &cluster_info, &poh_recorder, non_vote_receiver, @@ -888,7 +1049,7 @@ mod tests { drop(poh_recorder); let mut blockhash = start_hash; - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; bank.process_transaction(&fund_tx).unwrap(); //receive entries + ticks loop { @@ -922,6 +1083,16 @@ mod tests { Blockstore::destroy(ledger_path.path()).unwrap(); } + #[test] + fn test_banking_stage_entries_only_thread_local_multi_iterator() { + test_banking_stage_entries_only(BlockProductionMethod::ThreadLocalMultiIterator); + } + + #[test] + fn test_banking_stage_entries_only_central_scheduler() { + test_banking_stage_entries_only(BlockProductionMethod::CentralScheduler); + } + #[test] fn test_banking_stage_entryfication() { solana_logger::setup(); @@ -973,9 +1144,7 @@ mod tests { let entry_receiver = { // start a banking_stage to eat verified receiver - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); - let bank_forks = BankForks::new_rw_arc(bank); - let bank = bank_forks.read().unwrap().get(0).unwrap(); + let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let blockstore = Arc::new( Blockstore::open(ledger_path.path()) .expect("Expected to be able to open database ledger"), @@ -1024,7 +1193,7 @@ mod tests { .map(|(_bank, (entry, _tick_height))| entry) .collect(); - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; for entry in entries { bank.process_entry_transactions(entry.transactions) .iter() @@ -1048,7 +1217,7 @@ mod tests { mint_keypair, .. } = create_genesis_config(10_000); - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; let ledger_path = get_tmp_ledger_path_auto_delete!(); { let blockstore = Blockstore::open(ledger_path.path()) @@ -1157,9 +1326,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10000); - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); - let bank_forks = BankForks::new_rw_arc(bank); - let bank = bank_forks.read().unwrap().get(0).unwrap(); + let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let start_hash = bank.last_blockhash(); let banking_tracer = BankingTracer::new_disabled(); let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote(); diff --git a/core/src/banking_stage/committer.rs b/core/src/banking_stage/committer.rs index a5e42cbc75f8ec..ab8f3a9ed57b5b 100644 --- a/core/src/banking_stage/committer.rs +++ b/core/src/banking_stage/committer.rs @@ -15,7 +15,7 @@ use { prioritization_fee_cache::PrioritizationFeeCache, transaction_batch::TransactionBatch, }, - solana_sdk::{pubkey::Pubkey, saturating_add_assign}, + solana_sdk::{hash::Hash, pubkey::Pubkey, saturating_add_assign}, solana_transaction_status::{ token_balances::TransactionTokenBalancesSet, TransactionTokenBalance, }, @@ -36,6 +36,7 @@ pub(super) struct PreBalanceInfo { pub mint_decimals: HashMap, } +#[derive(Clone)] pub struct Committer { transaction_status_sender: Option, replay_vote_sender: ReplayVoteSender, @@ -65,6 +66,8 @@ impl Committer { batch: &TransactionBatch, loaded_transactions: &mut [TransactionLoadResult], execution_results: Vec, + last_blockhash: Hash, + lamports_per_signature: u64, starting_transaction_index: Option, bank: &Arc, pre_balance_info: &mut PreBalanceInfo, @@ -74,9 +77,6 @@ impl Committer { executed_non_vote_transactions_count: usize, executed_with_successful_result_count: usize, ) -> (u64, Vec) { - let (last_blockhash, lamports_per_signature) = - bank.last_blockhash_and_lamports_per_signature(); - let executed_transactions = execution_results .iter() .zip(batch.sanitized_transactions()) diff --git a/core/src/banking_stage/consume_worker.rs b/core/src/banking_stage/consume_worker.rs index f18f3da5d16acd..d3a53aa42e91b8 100644 --- a/core/src/banking_stage/consume_worker.rs +++ b/core/src/banking_stage/consume_worker.rs @@ -1,12 +1,21 @@ use { super::{ consumer::{Consumer, ExecuteAndCommitTransactionsOutput, ProcessTransactionBatchOutput}, + leader_slot_timing_metrics::LeaderExecuteAndCommitTimings, scheduler_messages::{ConsumeWork, FinishedConsumeWork}, }, crossbeam_channel::{Receiver, RecvError, SendError, Sender}, + solana_accounts_db::transaction_error_metrics::TransactionErrorMetrics, solana_poh::leader_bank_notifier::LeaderBankNotifier, solana_runtime::bank::Bank, - std::{sync::Arc, time::Duration}, + solana_sdk::timing::AtomicInterval, + std::{ + sync::{ + atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, + Arc, + }, + time::Duration, + }, thiserror::Error, }; @@ -24,11 +33,13 @@ pub(crate) struct ConsumeWorker { consumed_sender: Sender, leader_bank_notifier: Arc, + metrics: Arc, } #[allow(dead_code)] impl ConsumeWorker { pub fn new( + id: u32, consume_receiver: Receiver, consumer: Consumer, consumed_sender: Sender, @@ -39,9 +50,14 @@ impl ConsumeWorker { consumer, consumed_sender, leader_bank_notifier, + metrics: Arc::new(ConsumeWorkerMetrics::new(id)), } } + pub fn metrics_handle(&self) -> Arc { + self.metrics.clone() + } + pub fn run(self) -> Result<(), ConsumeWorkerError> { loop { let work = self.consume_receiver.recv()?; @@ -70,22 +86,20 @@ impl ConsumeWorker { /// Consume a single batch. fn consume(&self, bank: &Arc, work: ConsumeWork) -> Result<(), ConsumeWorkerError> { - let ProcessTransactionBatchOutput { - execute_and_commit_transactions_output: - ExecuteAndCommitTransactionsOutput { - retryable_transaction_indexes, - .. - }, - .. - } = self.consumer.process_and_record_aged_transactions( + let output = self.consumer.process_and_record_aged_transactions( bank, &work.transactions, &work.max_age_slots, ); + self.metrics.update_for_consume(&output); + self.metrics.has_data.store(true, Ordering::Relaxed); + self.consumed_sender.send(FinishedConsumeWork { work, - retryable_indexes: retryable_transaction_indexes, + retryable_indexes: output + .execute_and_commit_transactions_output + .retryable_transaction_indexes, })?; Ok(()) } @@ -107,7 +121,17 @@ impl ConsumeWorker { /// Send transactions back to scheduler as retryable. fn retry(&self, work: ConsumeWork) -> Result<(), ConsumeWorkerError> { - let retryable_indexes = (0..work.transactions.len()).collect(); + let retryable_indexes: Vec<_> = (0..work.transactions.len()).collect(); + let num_retryable = retryable_indexes.len(); + self.metrics + .count_metrics + .retryable_transaction_count + .fetch_add(num_retryable, Ordering::Relaxed); + self.metrics + .count_metrics + .retryable_expired_bank_count + .fetch_add(num_retryable, Ordering::Relaxed); + self.metrics.has_data.store(true, Ordering::Relaxed); self.consumed_sender.send(FinishedConsumeWork { work, retryable_indexes, @@ -122,6 +146,470 @@ fn try_drain_iter(work: T, receiver: &Receiver) -> impl Iterator std::iter::once(work).chain(receiver.try_iter()) } +/// Metrics tracking number of packets processed by the consume worker. +/// These are atomic, and intended to be reported by the scheduling thread +/// since the consume worker thread is sleeping unless there is work to be +/// done. +pub(crate) struct ConsumeWorkerMetrics { + id: u32, + interval: AtomicInterval, + has_data: AtomicBool, + + count_metrics: ConsumeWorkerCountMetrics, + error_metrics: ConsumeWorkerTransactionErrorMetrics, + timing_metrics: ConsumeWorkerTimingMetrics, +} + +impl ConsumeWorkerMetrics { + /// Report and reset metrics iff the interval has elapsed and the worker did some work. + pub fn maybe_report_and_reset(&self) { + const REPORT_INTERVAL_MS: u64 = 1000; + if self.interval.should_update(REPORT_INTERVAL_MS) + && self.has_data.swap(false, Ordering::Relaxed) + { + self.count_metrics.report_and_reset(self.id); + self.timing_metrics.report_and_reset(self.id); + self.error_metrics.report_and_reset(self.id); + } + } + + fn new(id: u32) -> Self { + Self { + id, + interval: AtomicInterval::default(), + has_data: AtomicBool::new(false), + count_metrics: ConsumeWorkerCountMetrics::default(), + error_metrics: ConsumeWorkerTransactionErrorMetrics::default(), + timing_metrics: ConsumeWorkerTimingMetrics::default(), + } + } + + fn update_for_consume( + &self, + ProcessTransactionBatchOutput { + cost_model_throttled_transactions_count, + cost_model_us, + execute_and_commit_transactions_output, + }: &ProcessTransactionBatchOutput, + ) { + self.count_metrics + .cost_model_throttled_transactions_count + .fetch_add(*cost_model_throttled_transactions_count, Ordering::Relaxed); + self.timing_metrics + .cost_model_us + .fetch_add(*cost_model_us, Ordering::Relaxed); + self.update_on_execute_and_commit_transactions_output( + execute_and_commit_transactions_output, + ); + } + + fn update_on_execute_and_commit_transactions_output( + &self, + ExecuteAndCommitTransactionsOutput { + transactions_attempted_execution_count, + executed_transactions_count, + executed_with_successful_result_count, + retryable_transaction_indexes, + execute_and_commit_timings, + error_counters, + .. + }: &ExecuteAndCommitTransactionsOutput, + ) { + self.count_metrics + .transactions_attempted_execution_count + .fetch_add(*transactions_attempted_execution_count, Ordering::Relaxed); + self.count_metrics + .executed_transactions_count + .fetch_add(*executed_transactions_count, Ordering::Relaxed); + self.count_metrics + .executed_with_successful_result_count + .fetch_add(*executed_with_successful_result_count, Ordering::Relaxed); + self.count_metrics + .retryable_transaction_count + .fetch_add(retryable_transaction_indexes.len(), Ordering::Relaxed); + + self.update_on_execute_and_commit_timings(execute_and_commit_timings); + self.update_on_error_counters(error_counters); + } + + fn update_on_execute_and_commit_timings( + &self, + LeaderExecuteAndCommitTimings { + collect_balances_us, + load_execute_us, + freeze_lock_us, + last_blockhash_us, + record_us, + commit_us, + find_and_send_votes_us, + .. + }: &LeaderExecuteAndCommitTimings, + ) { + self.timing_metrics + .collect_balances_us + .fetch_add(*collect_balances_us, Ordering::Relaxed); + self.timing_metrics + .load_execute_us + .fetch_add(*load_execute_us, Ordering::Relaxed); + self.timing_metrics + .freeze_lock_us + .fetch_add(*freeze_lock_us, Ordering::Relaxed); + self.timing_metrics + .last_blockhash_us + .fetch_add(*last_blockhash_us, Ordering::Relaxed); + self.timing_metrics + .record_us + .fetch_add(*record_us, Ordering::Relaxed); + self.timing_metrics + .commit_us + .fetch_add(*commit_us, Ordering::Relaxed); + self.timing_metrics + .find_and_send_votes_us + .fetch_add(*find_and_send_votes_us, Ordering::Relaxed); + } + + fn update_on_error_counters( + &self, + TransactionErrorMetrics { + total, + account_in_use, + too_many_account_locks, + account_loaded_twice, + account_not_found, + blockhash_not_found, + blockhash_too_old, + call_chain_too_deep, + already_processed, + instruction_error, + insufficient_funds, + invalid_account_for_fee, + invalid_account_index, + invalid_program_for_execution, + not_allowed_during_cluster_maintenance, + invalid_writable_account, + invalid_rent_paying_account, + would_exceed_max_block_cost_limit, + would_exceed_max_account_cost_limit, + would_exceed_max_vote_cost_limit, + would_exceed_account_data_block_limit, + max_loaded_accounts_data_size_exceeded, + program_execution_temporarily_restricted, + }: &TransactionErrorMetrics, + ) { + self.error_metrics + .total + .fetch_add(*total, Ordering::Relaxed); + self.error_metrics + .account_in_use + .fetch_add(*account_in_use, Ordering::Relaxed); + self.error_metrics + .too_many_account_locks + .fetch_add(*too_many_account_locks, Ordering::Relaxed); + self.error_metrics + .account_loaded_twice + .fetch_add(*account_loaded_twice, Ordering::Relaxed); + self.error_metrics + .account_not_found + .fetch_add(*account_not_found, Ordering::Relaxed); + self.error_metrics + .blockhash_not_found + .fetch_add(*blockhash_not_found, Ordering::Relaxed); + self.error_metrics + .blockhash_too_old + .fetch_add(*blockhash_too_old, Ordering::Relaxed); + self.error_metrics + .call_chain_too_deep + .fetch_add(*call_chain_too_deep, Ordering::Relaxed); + self.error_metrics + .already_processed + .fetch_add(*already_processed, Ordering::Relaxed); + self.error_metrics + .instruction_error + .fetch_add(*instruction_error, Ordering::Relaxed); + self.error_metrics + .insufficient_funds + .fetch_add(*insufficient_funds, Ordering::Relaxed); + self.error_metrics + .invalid_account_for_fee + .fetch_add(*invalid_account_for_fee, Ordering::Relaxed); + self.error_metrics + .invalid_account_index + .fetch_add(*invalid_account_index, Ordering::Relaxed); + self.error_metrics + .invalid_program_for_execution + .fetch_add(*invalid_program_for_execution, Ordering::Relaxed); + self.error_metrics + .not_allowed_during_cluster_maintenance + .fetch_add(*not_allowed_during_cluster_maintenance, Ordering::Relaxed); + self.error_metrics + .invalid_writable_account + .fetch_add(*invalid_writable_account, Ordering::Relaxed); + self.error_metrics + .invalid_rent_paying_account + .fetch_add(*invalid_rent_paying_account, Ordering::Relaxed); + self.error_metrics + .would_exceed_max_block_cost_limit + .fetch_add(*would_exceed_max_block_cost_limit, Ordering::Relaxed); + self.error_metrics + .would_exceed_max_account_cost_limit + .fetch_add(*would_exceed_max_account_cost_limit, Ordering::Relaxed); + self.error_metrics + .would_exceed_max_vote_cost_limit + .fetch_add(*would_exceed_max_vote_cost_limit, Ordering::Relaxed); + self.error_metrics + .would_exceed_account_data_block_limit + .fetch_add(*would_exceed_account_data_block_limit, Ordering::Relaxed); + self.error_metrics + .max_loaded_accounts_data_size_exceeded + .fetch_add(*max_loaded_accounts_data_size_exceeded, Ordering::Relaxed); + self.error_metrics + .program_execution_temporarily_restricted + .fetch_add(*program_execution_temporarily_restricted, Ordering::Relaxed); + } +} + +#[derive(Default)] +struct ConsumeWorkerCountMetrics { + transactions_attempted_execution_count: AtomicUsize, + executed_transactions_count: AtomicUsize, + executed_with_successful_result_count: AtomicUsize, + retryable_transaction_count: AtomicUsize, + retryable_expired_bank_count: AtomicUsize, + cost_model_throttled_transactions_count: AtomicUsize, +} + +impl ConsumeWorkerCountMetrics { + fn report_and_reset(&self, id: u32) { + datapoint_info!( + "banking_stage_worker_counts", + ("id", id, i64), + ( + "transactions_attempted_execution_count", + self.transactions_attempted_execution_count + .swap(0, Ordering::Relaxed), + i64 + ), + ( + "executed_transactions_count", + self.executed_transactions_count.swap(0, Ordering::Relaxed), + i64 + ), + ( + "executed_with_successful_result_count", + self.executed_with_successful_result_count + .swap(0, Ordering::Relaxed), + i64 + ), + ( + "retryable_transaction_count", + self.retryable_transaction_count.swap(0, Ordering::Relaxed), + i64 + ), + ( + "retryable_expired_bank_count", + self.retryable_expired_bank_count.swap(0, Ordering::Relaxed), + i64 + ), + ( + "cost_model_throttled_transactions_count", + self.cost_model_throttled_transactions_count + .swap(0, Ordering::Relaxed), + i64 + ), + ); + } +} + +#[derive(Default)] +struct ConsumeWorkerTimingMetrics { + cost_model_us: AtomicU64, + collect_balances_us: AtomicU64, + load_execute_us: AtomicU64, + freeze_lock_us: AtomicU64, + last_blockhash_us: AtomicU64, + record_us: AtomicU64, + commit_us: AtomicU64, + find_and_send_votes_us: AtomicU64, +} + +impl ConsumeWorkerTimingMetrics { + fn report_and_reset(&self, id: u32) { + datapoint_info!( + "banking_stage_worker_timing", + ("id", id, i64), + ( + "cost_model_us", + self.cost_model_us.swap(0, Ordering::Relaxed), + i64 + ), + ( + "collect_balances_us", + self.collect_balances_us.swap(0, Ordering::Relaxed), + i64 + ), + ( + "load_execute_us", + self.load_execute_us.swap(0, Ordering::Relaxed), + i64 + ), + ( + "freeze_lock_us", + self.freeze_lock_us.swap(0, Ordering::Relaxed), + i64 + ), + ( + "last_blockhash_us", + self.last_blockhash_us.swap(0, Ordering::Relaxed), + i64 + ), + ("record_us", self.record_us.swap(0, Ordering::Relaxed), i64), + ("commit_us", self.commit_us.swap(0, Ordering::Relaxed), i64), + ( + "find_and_send_votes_us", + self.find_and_send_votes_us.swap(0, Ordering::Relaxed), + i64 + ), + ); + } +} + +#[derive(Default)] +struct ConsumeWorkerTransactionErrorMetrics { + total: AtomicUsize, + account_in_use: AtomicUsize, + too_many_account_locks: AtomicUsize, + account_loaded_twice: AtomicUsize, + account_not_found: AtomicUsize, + blockhash_not_found: AtomicUsize, + blockhash_too_old: AtomicUsize, + call_chain_too_deep: AtomicUsize, + already_processed: AtomicUsize, + instruction_error: AtomicUsize, + insufficient_funds: AtomicUsize, + invalid_account_for_fee: AtomicUsize, + invalid_account_index: AtomicUsize, + invalid_program_for_execution: AtomicUsize, + not_allowed_during_cluster_maintenance: AtomicUsize, + invalid_writable_account: AtomicUsize, + invalid_rent_paying_account: AtomicUsize, + would_exceed_max_block_cost_limit: AtomicUsize, + would_exceed_max_account_cost_limit: AtomicUsize, + would_exceed_max_vote_cost_limit: AtomicUsize, + would_exceed_account_data_block_limit: AtomicUsize, + max_loaded_accounts_data_size_exceeded: AtomicUsize, + program_execution_temporarily_restricted: AtomicUsize, +} + +impl ConsumeWorkerTransactionErrorMetrics { + fn report_and_reset(&self, id: u32) { + datapoint_info!( + "banking_stage_worker_error_metrics", + ("id", id, i64), + ("total", self.total.swap(0, Ordering::Relaxed), i64), + ( + "account_in_use", + self.account_in_use.swap(0, Ordering::Relaxed), + i64 + ), + ( + "too_many_account_locks", + self.too_many_account_locks.swap(0, Ordering::Relaxed), + i64 + ), + ( + "account_loaded_twice", + self.account_loaded_twice.swap(0, Ordering::Relaxed), + i64 + ), + ( + "account_not_found", + self.account_not_found.swap(0, Ordering::Relaxed), + i64 + ), + ( + "blockhash_not_found", + self.blockhash_not_found.swap(0, Ordering::Relaxed), + i64 + ), + ( + "blockhash_too_old", + self.blockhash_too_old.swap(0, Ordering::Relaxed), + i64 + ), + ( + "call_chain_too_deep", + self.call_chain_too_deep.swap(0, Ordering::Relaxed), + i64 + ), + ( + "already_processed", + self.already_processed.swap(0, Ordering::Relaxed), + i64 + ), + ( + "instruction_error", + self.instruction_error.swap(0, Ordering::Relaxed), + i64 + ), + ( + "insufficient_funds", + self.insufficient_funds.swap(0, Ordering::Relaxed), + i64 + ), + ( + "invalid_account_for_fee", + self.invalid_account_for_fee.swap(0, Ordering::Relaxed), + i64 + ), + ( + "invalid_account_index", + self.invalid_account_index.swap(0, Ordering::Relaxed), + i64 + ), + ( + "invalid_program_for_execution", + self.invalid_program_for_execution + .swap(0, Ordering::Relaxed), + i64 + ), + ( + "not_allowed_during_cluster_maintenance", + self.not_allowed_during_cluster_maintenance + .swap(0, Ordering::Relaxed), + i64 + ), + ( + "invalid_writable_account", + self.invalid_writable_account.swap(0, Ordering::Relaxed), + i64 + ), + ( + "invalid_rent_paying_account", + self.invalid_rent_paying_account.swap(0, Ordering::Relaxed), + i64 + ), + ( + "would_exceed_max_block_cost_limit", + self.would_exceed_max_block_cost_limit + .swap(0, Ordering::Relaxed), + i64 + ), + ( + "would_exceed_max_account_cost_limit", + self.would_exceed_max_account_cost_limit + .swap(0, Ordering::Relaxed), + i64 + ), + ( + "would_exceed_max_vote_cost_limit", + self.would_exceed_max_vote_cost_limit + .swap(0, Ordering::Relaxed), + i64 + ), + ); + } +} + #[cfg(test)] mod tests { use { @@ -138,7 +626,7 @@ mod tests { get_tmp_ledger_path_auto_delete, leader_schedule_cache::LeaderScheduleCache, }, solana_poh::poh_recorder::{PohRecorder, WorkingBankEntry}, - solana_runtime::{bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache}, + solana_runtime::prioritization_fee_cache::PrioritizationFeeCache, solana_sdk::{ genesis_config::GenesisConfig, poh_config::PohConfig, pubkey::Pubkey, signature::Keypair, system_transaction, @@ -173,9 +661,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); - let bank_forks = BankForks::new_rw_arc(bank); - let bank = bank_forks.read().unwrap().working_bank(); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()) @@ -207,6 +693,7 @@ mod tests { let (consume_sender, consume_receiver) = unbounded(); let (consumed_sender, consumed_receiver) = unbounded(); let worker = ConsumeWorker::new( + 0, consume_receiver, consumer, consumed_sender, diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index 9f9edcf89fd6bb..1d22d64397074b 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -41,29 +41,29 @@ pub const TARGET_NUM_TRANSACTIONS_PER_BATCH: usize = 64; pub struct ProcessTransactionBatchOutput { // The number of transactions filtered out by the cost model - cost_model_throttled_transactions_count: usize, + pub(crate) cost_model_throttled_transactions_count: usize, // Amount of time spent running the cost model - cost_model_us: u64, + pub(crate) cost_model_us: u64, pub execute_and_commit_transactions_output: ExecuteAndCommitTransactionsOutput, } pub struct ExecuteAndCommitTransactionsOutput { // Total number of transactions that were passed as candidates for execution - transactions_attempted_execution_count: usize, + pub(crate) transactions_attempted_execution_count: usize, // The number of transactions of that were executed. See description of in `ProcessTransactionsSummary` // for possible outcomes of execution. - executed_transactions_count: usize, + pub(crate) executed_transactions_count: usize, // Total number of the executed transactions that returned success/not // an error. - executed_with_successful_result_count: usize, + pub(crate) executed_with_successful_result_count: usize, // Transactions that either were not executed, or were executed and failed to be committed due // to the block ending. pub(crate) retryable_transaction_indexes: Vec, // A result that indicates whether transactions were successfully // committed into the Poh stream. pub commit_transactions_result: Result, PohRecorderError>, - execute_and_commit_timings: LeaderExecuteAndCommitTimings, - error_counters: TransactionErrorMetrics, + pub(crate) execute_and_commit_timings: LeaderExecuteAndCommitTimings, + pub(crate) error_counters: TransactionErrorMetrics, } pub struct Consumer { @@ -587,6 +587,10 @@ impl Consumer { let (freeze_lock, freeze_lock_us) = measure_us!(bank.freeze_lock()); execute_and_commit_timings.freeze_lock_us = freeze_lock_us; + let ((last_blockhash, lamports_per_signature), last_blockhash_us) = + measure_us!(bank.last_blockhash_and_lamports_per_signature()); + execute_and_commit_timings.last_blockhash_us = last_blockhash_us; + let (record_transactions_summary, record_us) = measure_us!(self .transaction_recorder .record_transactions(bank.slot(), executed_transactions)); @@ -623,6 +627,8 @@ impl Consumer { batch, &mut loaded_transactions, execution_results, + last_blockhash, + lamports_per_signature, starting_transaction_index, bank, &mut pre_balance_info, @@ -881,7 +887,7 @@ mod tests { } = &genesis_config_info; let blockstore = Blockstore::open(ledger_path).expect("Expected to be able to open database ledger"); - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(genesis_config).0; let exit = Arc::new(AtomicBool::default()); let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new( bank.tick_height(), @@ -938,7 +944,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; let pubkey = solana_sdk::pubkey::new_rand(); let transactions = sanitize_transactions(vec![system_transaction::transfer( @@ -1067,7 +1073,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; let pubkey = solana_sdk::pubkey::new_rand(); let transactions = { @@ -1160,11 +1166,12 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let mut bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); + let mut bank = Bank::new_for_tests(&genesis_config); + bank.ns_per_slot = std::u128::MAX; if !apply_cost_tracker_during_replay_enabled { bank.deactivate_feature(&feature_set::apply_cost_tracker_during_replay::id()); } - let bank = Arc::new(bank); + let bank = bank.wrap_with_bank_forks_for_tests().0; let pubkey = solana_sdk::pubkey::new_rand(); let ledger_path = get_tmp_ledger_path_auto_delete!(); @@ -1264,7 +1271,7 @@ mod tests { let commit_transactions_result = commit_transactions_result.unwrap(); assert_eq!(commit_transactions_result.len(), 2); assert_matches!( - commit_transactions_result.get(0), + commit_transactions_result.first(), Some(CommitTransactionDetails::Committed { .. }) ); assert_matches!( @@ -1274,7 +1281,7 @@ mod tests { assert_eq!(retryable_transaction_indexes, vec![1]); let expected_block_cost = if !apply_cost_tracker_during_replay_enabled { - let actual_bpf_execution_cost = match commit_transactions_result.get(0).unwrap() { + let actual_bpf_execution_cost = match commit_transactions_result.first().unwrap() { CommitTransactionDetails::Committed { compute_units } => *compute_units, CommitTransactionDetails::NotCommitted => { unreachable!() @@ -1312,7 +1319,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; let pubkey = solana_sdk::pubkey::new_rand(); let pubkey1 = solana_sdk::pubkey::new_rand(); @@ -1390,7 +1397,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(lamports); - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; // set cost tracker limits to MAX so it will not filter out TXs bank.write_cost_tracker() .unwrap() @@ -1451,7 +1458,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; // set cost tracker limits to MAX so it will not filter out TXs bank.write_cost_tracker() .unwrap() @@ -1510,7 +1517,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; let pubkey = solana_sdk::pubkey::new_rand(); @@ -1592,7 +1599,7 @@ mod tests { } = create_slow_genesis_config(solana_sdk::native_token::sol_to_lamports(1000.0)); genesis_config.rent.lamports_per_byte_year = 50; genesis_config.rent.exemption_threshold = 2.0; - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; let pubkey = solana_sdk::pubkey::new_rand(); let pubkey1 = solana_sdk::pubkey::new_rand(); let keypair1 = Keypair::new(); @@ -1723,14 +1730,19 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let keypair = Keypair::new(); let address_table_key = Pubkey::new_unique(); let address_table_state = generate_new_address_lookup_table(None, 2); store_address_lookup_table(&bank, address_table_key, address_table_state); - let bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::new_unique(), 1)); + let new_bank = Bank::new_from_parent(bank, &Pubkey::new_unique(), 2); + let bank = bank_forks + .write() + .unwrap() + .insert(new_bank) + .clone_without_scheduler(); let message = VersionedMessage::V0(v0::Message { header: MessageHeader { num_required_signatures: 1, diff --git a/core/src/banking_stage/decision_maker.rs b/core/src/banking_stage/decision_maker.rs index a2d19937ad614c..6ad2c3042b254f 100644 --- a/core/src/banking_stage/decision_maker.rs +++ b/core/src/banking_stage/decision_maker.rs @@ -28,6 +28,7 @@ impl BufferedPacketsDecision { } } +#[derive(Clone)] pub struct DecisionMaker { my_pubkey: Pubkey, poh_recorder: Arc>, @@ -147,7 +148,7 @@ mod tests { #[test] fn test_make_consume_or_forward_decision() { let genesis_config = create_genesis_config(2).genesis_config; - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; let ledger_path = temp_dir(); let blockstore = Arc::new(Blockstore::open(ledger_path.as_path()).unwrap()); let (exit, poh_recorder, poh_service, _entry_receiver) = diff --git a/core/src/banking_stage/forward_worker.rs b/core/src/banking_stage/forward_worker.rs index c13b8c426378be..255f1b8e01be99 100644 --- a/core/src/banking_stage/forward_worker.rs +++ b/core/src/banking_stage/forward_worker.rs @@ -97,7 +97,7 @@ mod tests { }, solana_perf::packet::to_packet_batches, solana_poh::poh_recorder::{PohRecorder, WorkingBankEntry}, - solana_runtime::{bank::Bank, bank_forks::BankForks}, + solana_runtime::bank::Bank, solana_sdk::{ genesis_config::GenesisConfig, poh_config::PohConfig, pubkey::Pubkey, signature::Keypair, system_transaction, @@ -128,9 +128,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); - let bank_forks = BankForks::new_rw_arc(bank); - let bank = bank_forks.read().unwrap().working_bank(); + let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()) diff --git a/core/src/banking_stage/forwarder.rs b/core/src/banking_stage/forwarder.rs index 1cb656f0ddc701..1092e5c57b07cb 100644 --- a/core/src/banking_stage/forwarder.rs +++ b/core/src/banking_stage/forwarder.rs @@ -306,9 +306,7 @@ mod tests { create_slow_genesis_config_with_leader(10_000, &validator_keypair.pubkey()); let GenesisConfigInfo { genesis_config, .. } = &genesis_config_info; - let bank: Bank = Bank::new_no_wallclock_throttle_for_tests(genesis_config); - let bank_forks = BankForks::new_rw_arc(bank); - let bank = bank_forks.read().unwrap().working_bank(); + let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(genesis_config); let ledger_path = TempDir::new().unwrap(); let blockstore = Arc::new( diff --git a/core/src/banking_stage/latest_unprocessed_votes.rs b/core/src/banking_stage/latest_unprocessed_votes.rs index 10772b74dee3cc..a62e5bf9b3e455 100644 --- a/core/src/banking_stage/latest_unprocessed_votes.rs +++ b/core/src/banking_stage/latest_unprocessed_votes.rs @@ -26,7 +26,7 @@ pub enum VoteSource { Tpu, } -/// Holds deserialized vote messages as well as their source, foward status and slot +/// Holds deserialized vote messages as well as their source, forward status and slot #[derive(Debug, Clone)] pub struct LatestValidatorVotePacket { vote_source: VoteSource, @@ -64,7 +64,7 @@ impl LatestValidatorVotePacket { let &pubkey = message .message .static_account_keys() - .get(0) + .first() .ok_or(DeserializedPacketError::VoteTransactionError)?; let slot = vote_state_update_instruction.last_voted_slot().unwrap_or(0); let timestamp = vote_state_update_instruction.timestamp(); diff --git a/core/src/banking_stage/leader_slot_metrics.rs b/core/src/banking_stage/leader_slot_metrics.rs index b36200d86e12d6..449ff7801991fa 100644 --- a/core/src/banking_stage/leader_slot_metrics.rs +++ b/core/src/banking_stage/leader_slot_metrics.rs @@ -47,7 +47,7 @@ pub(crate) struct ProcessTransactionsSummary { // Total amount of time spent running the cost model pub cost_model_us: u64, - // Breakdown of time spent executing and comitting transactions + // Breakdown of time spent executing and committing transactions pub execute_and_commit_timings: LeaderExecuteAndCommitTimings, // Breakdown of all the transaction errors from transactions passed for execution @@ -104,7 +104,7 @@ struct LeaderSlotPacketCountMetrics { // total number of transactions that were executed, but failed to be committed into the Poh stream because // the block ended. Some of these may be already counted in `nonretryable_errored_transactions_count` if they - // then hit the age limit after failing to be comitted. + // then hit the age limit after failing to be committed. executed_transactions_failed_commit_count: u64, // total number of transactions that were excluded from the block because there were concurrent write locks active. diff --git a/core/src/banking_stage/leader_slot_timing_metrics.rs b/core/src/banking_stage/leader_slot_timing_metrics.rs index 543b80b4a48897..7727b6cf6c6563 100644 --- a/core/src/banking_stage/leader_slot_timing_metrics.rs +++ b/core/src/banking_stage/leader_slot_timing_metrics.rs @@ -10,6 +10,7 @@ pub struct LeaderExecuteAndCommitTimings { pub collect_balances_us: u64, pub load_execute_us: u64, pub freeze_lock_us: u64, + pub last_blockhash_us: u64, pub record_us: u64, pub commit_us: u64, pub find_and_send_votes_us: u64, @@ -22,6 +23,7 @@ impl LeaderExecuteAndCommitTimings { saturating_add_assign!(self.collect_balances_us, other.collect_balances_us); saturating_add_assign!(self.load_execute_us, other.load_execute_us); saturating_add_assign!(self.freeze_lock_us, other.freeze_lock_us); + saturating_add_assign!(self.last_blockhash_us, other.last_blockhash_us); saturating_add_assign!(self.record_us, other.record_us); saturating_add_assign!(self.commit_us, other.commit_us); saturating_add_assign!(self.find_and_send_votes_us, other.find_and_send_votes_us); @@ -38,6 +40,7 @@ impl LeaderExecuteAndCommitTimings { ("collect_balances_us", self.collect_balances_us as i64, i64), ("load_execute_us", self.load_execute_us as i64, i64), ("freeze_lock_us", self.freeze_lock_us as i64, i64), + ("last_blockhash_us", self.last_blockhash_us as i64, i64), ("record_us", self.record_us as i64, i64), ("commit_us", self.commit_us as i64, i64), ( diff --git a/core/src/banking_stage/multi_iterator_scanner.rs b/core/src/banking_stage/multi_iterator_scanner.rs index 866470e8e4c68c..fe1b5906ba1b6d 100644 --- a/core/src/banking_stage/multi_iterator_scanner.rs +++ b/core/src/banking_stage/multi_iterator_scanner.rs @@ -1,6 +1,6 @@ //! Provides an iterator interface that create non-conflicting batches of elements to process. //! -//! The problem that this structure is targetting is as following: +//! The problem that this structure is targeting is as following: //! We have a slice of transactions we want to process in batches where transactions //! in the same batch do not conflict with each other. This allows us process them in //! parallel. The original slice is ordered by priority, and it is often the case diff --git a/core/src/banking_stage/read_write_account_set.rs b/core/src/banking_stage/read_write_account_set.rs index 7a2117675b31d0..b9d65ff4756857 100644 --- a/core/src/banking_stage/read_write_account_set.rs +++ b/core/src/banking_stage/read_write_account_set.rs @@ -173,7 +173,7 @@ mod tests { fn create_test_bank() -> Arc { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); - Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)) + Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0 } // Helper function (could potentially use test_case in future). diff --git a/core/src/banking_stage/transaction_scheduler/mod.rs b/core/src/banking_stage/transaction_scheduler/mod.rs index 0b65dce06a48fc..65ece5fee6a8a1 100644 --- a/core/src/banking_stage/transaction_scheduler/mod.rs +++ b/core/src/banking_stage/transaction_scheduler/mod.rs @@ -1,19 +1,13 @@ +mod batch_id_generator; #[allow(dead_code)] +mod in_flight_tracker; +pub(crate) mod prio_graph_scheduler; +pub(crate) mod scheduler_controller; +pub(crate) mod scheduler_error; mod thread_aware_account_locks; - +mod transaction_id_generator; mod transaction_priority_id; #[allow(dead_code)] mod transaction_state; #[allow(dead_code)] mod transaction_state_container; - -mod batch_id_generator; -#[allow(dead_code)] -mod in_flight_tracker; -#[allow(dead_code)] -mod prio_graph_scheduler; -#[allow(dead_code)] -mod scheduler_controller; -mod scheduler_error; -#[allow(dead_code)] -mod transaction_id_generator; diff --git a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs index 23e15562e1ae54..52f7dda718e722 100644 --- a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs +++ b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs @@ -15,7 +15,11 @@ use { crossbeam_channel::{Receiver, Sender, TryRecvError}, itertools::izip, prio_graph::{AccessKind, PrioGraph}, - solana_sdk::{pubkey::Pubkey, slot_history::Slot, transaction::SanitizedTransaction}, + solana_measure::measure_us, + solana_sdk::{ + pubkey::Pubkey, saturating_add_assign, slot_history::Slot, + transaction::SanitizedTransaction, + }, std::collections::HashMap, }; @@ -42,8 +46,17 @@ impl PrioGraphScheduler { } } - /// Schedule transactions from the given `TransactionStateContainer` to be consumed by the - /// worker threads. Returns the number of transactions scheduled, or an error. + /// Schedule transactions from the given `TransactionStateContainer` to be + /// consumed by the worker threads. Returns summary of scheduling, or an + /// error. + /// `pre_graph_filter` is used to filter out transactions that should be + /// skipped and dropped before insertion to the prio-graph. This fn should + /// set `false` for transactions that should be dropped, and `true` + /// otherwise. + /// `pre_lock_filter` is used to filter out transactions after they have + /// made it to the top of the prio-graph, and immediately before locks are + /// checked and taken. This fn should return `true` for transactions that + /// should be scheduled, and `false` otherwise. /// /// Uses a `PrioGraph` to perform look-ahead during the scheduling of transactions. /// This, combined with internal tracking of threads' in-flight transactions, allows @@ -52,7 +65,9 @@ impl PrioGraphScheduler { pub(crate) fn schedule( &mut self, container: &mut TransactionStateContainer, - ) -> Result { + pre_graph_filter: impl Fn(&[&SanitizedTransaction], &mut [bool]), + pre_lock_filter: impl Fn(&SanitizedTransaction) -> bool, + ) -> Result { let num_threads = self.consume_work_senders.len(); let mut batches = Batches::new(num_threads); let mut chain_id_to_thread_index = HashMap::new(); @@ -64,21 +79,68 @@ impl PrioGraphScheduler { let mut blocking_locks = ReadWriteAccountSet::default(); let mut prio_graph = PrioGraph::new(|id: &TransactionPriorityId, _graph_node| *id); - // Create the initial look-ahead window. - for _ in 0..self.look_ahead_window_size { - let Some(id) = container.pop() else { - break; - }; + // Track metrics on filter. + let mut num_filtered_out: usize = 0; + let mut total_filter_time_us: u64 = 0; + + let mut window_budget = self.look_ahead_window_size; + let mut chunked_pops = |container: &mut TransactionStateContainer, + prio_graph: &mut PrioGraph<_, _, _, _>, + window_budget: &mut usize| { + while *window_budget > 0 { + const MAX_FILTER_CHUNK_SIZE: usize = 128; + let mut filter_array = [true; MAX_FILTER_CHUNK_SIZE]; + let mut ids = Vec::with_capacity(MAX_FILTER_CHUNK_SIZE); + let mut txs = Vec::with_capacity(MAX_FILTER_CHUNK_SIZE); + + let chunk_size = (*window_budget).min(MAX_FILTER_CHUNK_SIZE); + for _ in 0..chunk_size { + if let Some(id) = container.pop() { + ids.push(id); + } else { + break; + } + } + *window_budget = window_budget.saturating_sub(chunk_size); + + ids.iter().for_each(|id| { + let transaction = container.get_transaction_ttl(&id.id).unwrap(); + txs.push(&transaction.transaction); + }); + + let (_, filter_us) = + measure_us!(pre_graph_filter(&txs, &mut filter_array[..chunk_size])); + saturating_add_assign!(total_filter_time_us, filter_us); + + for (id, filter_result) in ids.iter().zip(&filter_array[..chunk_size]) { + if *filter_result { + let transaction = container.get_transaction_ttl(&id.id).unwrap(); + prio_graph.insert_transaction( + *id, + Self::get_transaction_account_access(transaction), + ); + } else { + saturating_add_assign!(num_filtered_out, 1); + container.remove_by_id(&id.id); + } + } - let transaction = container.get_transaction_ttl(&id.id).unwrap(); - prio_graph.insert_transaction(id, Self::get_transaction_account_access(transaction)); - } + if ids.len() != chunk_size { + break; + } + } + }; + + // Create the initial look-ahead window. + // Check transactions against filter, remove from container if it fails. + chunked_pops(container, &mut prio_graph, &mut window_budget); let mut unblock_this_batch = Vec::with_capacity(self.consume_work_senders.len() * TARGET_NUM_TRANSACTIONS_PER_BATCH); const MAX_TRANSACTIONS_PER_SCHEDULING_PASS: usize = 100_000; - let mut num_scheduled = 0; - let mut num_sent = 0; + let mut num_scheduled: usize = 0; + let mut num_sent: usize = 0; + let mut num_unschedulable: usize = 0; while num_scheduled < MAX_TRANSACTIONS_PER_SCHEDULING_PASS { // If nothing is in the main-queue of the `PrioGraph` then there's nothing left to schedule. if prio_graph.is_empty() { @@ -88,15 +150,6 @@ impl PrioGraphScheduler { while let Some(id) = prio_graph.pop() { unblock_this_batch.push(id); - // Push next transaction from container into the `PrioGraph` look-ahead window. - if let Some(next_id) = container.pop() { - let transaction = container.get_transaction_ttl(&next_id.id).unwrap(); - prio_graph.insert_transaction( - next_id, - Self::get_transaction_account_access(transaction), - ); - } - // Should always be in the container, during initial testing phase panic. // Later, we can replace with a continue in case this does happen. let Some(transaction_state) = container.get_mut_transaction_state(&id.id) else { @@ -104,11 +157,16 @@ impl PrioGraphScheduler { }; let transaction = &transaction_state.transaction_ttl().transaction; + if !pre_lock_filter(transaction) { + container.remove_by_id(&id.id); + continue; + } // Check if this transaction conflicts with any blocked transactions if !blocking_locks.check_locks(transaction.message()) { blocking_locks.take_locks(transaction.message()); unschedulable_ids.push(id); + saturating_add_assign!(num_unschedulable, 1); continue; } @@ -133,10 +191,11 @@ impl PrioGraphScheduler { ) else { blocking_locks.take_locks(transaction.message()); unschedulable_ids.push(id); + saturating_add_assign!(num_unschedulable, 1); continue; }; - num_scheduled += 1; + saturating_add_assign!(num_scheduled, 1); // Track the chain-id to thread-index mapping. chain_id_to_thread_index.insert(prio_graph.chain_id(&id), thread_id); @@ -154,11 +213,11 @@ impl PrioGraphScheduler { batches.transactions[thread_id].push(transaction); batches.ids[thread_id].push(id.id); batches.max_age_slots[thread_id].push(max_age_slot); - batches.total_cus[thread_id] += cu_limit; + saturating_add_assign!(batches.total_cus[thread_id], cu_limit); // If target batch size is reached, send only this batch. if batches.ids[thread_id].len() >= TARGET_NUM_TRANSACTIONS_PER_BATCH { - num_sent += self.send_batch(&mut batches, thread_id)?; + saturating_add_assign!(num_sent, self.send_batch(&mut batches, thread_id)?); } if num_scheduled >= MAX_TRANSACTIONS_PER_SCHEDULING_PASS { @@ -167,7 +226,11 @@ impl PrioGraphScheduler { } // Send all non-empty batches - num_sent += self.send_batches(&mut batches)?; + saturating_add_assign!(num_sent, self.send_batches(&mut batches)?); + + // Refresh window budget and do chunked pops + saturating_add_assign!(window_budget, unblock_this_batch.len()); + chunked_pops(container, &mut prio_graph, &mut window_budget); // Unblock all transactions that were blocked by the transactions that were just sent. for id in unblock_this_batch.drain(..) { @@ -176,7 +239,7 @@ impl PrioGraphScheduler { } // Send batches for any remaining transactions - num_sent += self.send_batches(&mut batches)?; + saturating_add_assign!(num_sent, self.send_batches(&mut batches)?); // Push unschedulable ids back into the container for id in unschedulable_ids { @@ -184,7 +247,7 @@ impl PrioGraphScheduler { } // Push remaining transactions back into the container - while let Some(id) = prio_graph.pop_and_unblock() { + while let Some((id, _)) = prio_graph.pop_and_unblock() { container.push_id_into_queue(id); } @@ -193,24 +256,39 @@ impl PrioGraphScheduler { "number of scheduled and sent transactions must match" ); - Ok(num_scheduled) + Ok(SchedulingSummary { + num_scheduled, + num_unschedulable, + num_filtered_out, + filter_time_us: total_filter_time_us, + }) } /// Receive completed batches of transactions without blocking. + /// Returns (num_transactions, num_retryable_transactions) on success. pub fn receive_completed( &mut self, container: &mut TransactionStateContainer, - ) -> Result<(), SchedulerError> { - while self.try_receive_completed(container)? {} - Ok(()) + ) -> Result<(usize, usize), SchedulerError> { + let mut total_num_transactions: usize = 0; + let mut total_num_retryable: usize = 0; + loop { + let (num_transactions, num_retryable) = self.try_receive_completed(container)?; + if num_transactions == 0 { + break; + } + saturating_add_assign!(total_num_transactions, num_transactions); + saturating_add_assign!(total_num_retryable, num_retryable); + } + Ok((total_num_transactions, total_num_retryable)) } /// Receive completed batches of transactions. - /// Returns `Ok(true)` if a batch was received, `Ok(false)` if no batch was received. + /// Returns `Ok((num_transactions, num_retryable))` if a batch was received, `Ok((0, 0))` if no batch was received. fn try_receive_completed( &mut self, container: &mut TransactionStateContainer, - ) -> Result { + ) -> Result<(usize, usize), SchedulerError> { match self.finished_consume_work_receiver.try_recv() { Ok(FinishedConsumeWork { work: @@ -222,6 +300,9 @@ impl PrioGraphScheduler { }, retryable_indexes, }) => { + let num_transactions = ids.len(); + let num_retryable = retryable_indexes.len(); + // Free the locks self.complete_batch(batch_id, &transactions); @@ -246,9 +327,9 @@ impl PrioGraphScheduler { container.remove_by_id(&id); } - Ok(true) + Ok((num_transactions, num_retryable)) } - Err(TryRecvError::Empty) => Ok(false), + Err(TryRecvError::Empty) => Ok((0, 0)), Err(TryRecvError::Disconnected) => Err(SchedulerError::DisconnectedRecvChannel( "finished consume work", )), @@ -364,6 +445,19 @@ impl PrioGraphScheduler { } } +/// Metrics from scheduling transactions. +#[derive(Debug, PartialEq, Eq)] +pub(crate) struct SchedulingSummary { + /// Number of transactions scheduled. + pub num_scheduled: usize, + /// Number of transactions that were not scheduled due to conflicts. + pub num_unschedulable: usize, + /// Number of transactions that were dropped due to filter. + pub num_filtered_out: usize, + /// Time spent filtering transactions + pub filter_time_us: u64, +} + struct Batches { ids: Vec>, transactions: Vec>, @@ -520,6 +614,14 @@ mod tests { .unzip() } + fn test_pre_graph_filter(_txs: &[&SanitizedTransaction], results: &mut [bool]) { + results.fill(true); + } + + fn test_pre_lock_filter(_tx: &SanitizedTransaction) -> bool { + true + } + #[test] fn test_schedule_disconnected_channel() { let (mut scheduler, work_receivers, _finished_work_sender) = create_test_frame(1); @@ -527,7 +629,7 @@ mod tests { drop(work_receivers); // explicitly drop receivers assert_matches!( - scheduler.schedule(&mut container), + scheduler.schedule(&mut container, test_pre_graph_filter, test_pre_lock_filter), Err(SchedulerError::DisconnectedSendChannel(_)) ); } @@ -540,8 +642,11 @@ mod tests { (&Keypair::new(), &[Pubkey::new_unique()], 2, 2), ]); - let num_scheduled = scheduler.schedule(&mut container).unwrap(); - assert_eq!(num_scheduled, 2); + let scheduling_summary = scheduler + .schedule(&mut container, test_pre_graph_filter, test_pre_lock_filter) + .unwrap(); + assert_eq!(scheduling_summary.num_scheduled, 2); + assert_eq!(scheduling_summary.num_unschedulable, 0); assert_eq!(collect_work(&work_receivers[0]).1, vec![txids!([1, 0])]); } @@ -554,8 +659,11 @@ mod tests { (&Keypair::new(), &[pubkey], 1, 2), ]); - let num_scheduled = scheduler.schedule(&mut container).unwrap(); - assert_eq!(num_scheduled, 2); + let scheduling_summary = scheduler + .schedule(&mut container, test_pre_graph_filter, test_pre_lock_filter) + .unwrap(); + assert_eq!(scheduling_summary.num_scheduled, 2); + assert_eq!(scheduling_summary.num_unschedulable, 0); assert_eq!( collect_work(&work_receivers[0]).1, vec![txids!([1]), txids!([0])] @@ -571,8 +679,14 @@ mod tests { ); // expect 4 full batches to be scheduled - let num_scheduled = scheduler.schedule(&mut container).unwrap(); - assert_eq!(num_scheduled, 4 * TARGET_NUM_TRANSACTIONS_PER_BATCH); + let scheduling_summary = scheduler + .schedule(&mut container, test_pre_graph_filter, test_pre_lock_filter) + .unwrap(); + assert_eq!( + scheduling_summary.num_scheduled, + 4 * TARGET_NUM_TRANSACTIONS_PER_BATCH + ); + assert_eq!(scheduling_summary.num_unschedulable, 0); let thread0_work_counts: Vec<_> = work_receivers[0] .try_iter() @@ -587,8 +701,11 @@ mod tests { let mut container = create_container((0..4).map(|i| (Keypair::new(), [Pubkey::new_unique()], 1, i))); - let num_scheduled = scheduler.schedule(&mut container).unwrap(); - assert_eq!(num_scheduled, 4); + let scheduling_summary = scheduler + .schedule(&mut container, test_pre_graph_filter, test_pre_lock_filter) + .unwrap(); + assert_eq!(scheduling_summary.num_scheduled, 4); + assert_eq!(scheduling_summary.num_unschedulable, 0); assert_eq!(collect_work(&work_receivers[0]).1, [txids!([3, 1])]); assert_eq!(collect_work(&work_receivers[1]).1, [txids!([2, 0])]); } @@ -618,8 +735,11 @@ mod tests { // fact they eventually join means that the scheduler will schedule them // onto the same thread to avoid causing [4], which conflicts with both // chains, to be un-schedulable. - let num_scheduled = scheduler.schedule(&mut container).unwrap(); - assert_eq!(num_scheduled, 5); + let scheduling_summary = scheduler + .schedule(&mut container, test_pre_graph_filter, test_pre_lock_filter) + .unwrap(); + assert_eq!(scheduling_summary.num_scheduled, 5); + assert_eq!(scheduling_summary.num_unschedulable, 0); assert_eq!( collect_work(&work_receivers[0]).1, [txids!([0, 2]), txids!([1, 3]), txids!([4])] @@ -658,15 +778,24 @@ mod tests { // Because the look-ahead window is shortened to a size of 4, the scheduler does // not have knowledge of the joining at transaction [4] until after [0] and [1] // have been scheduled. - let num_scheduled = scheduler.schedule(&mut container).unwrap(); - assert_eq!(num_scheduled, 4); + let scheduling_summary = scheduler + .schedule(&mut container, test_pre_graph_filter, test_pre_lock_filter) + .unwrap(); + assert_eq!(scheduling_summary.num_scheduled, 4); + assert_eq!(scheduling_summary.num_unschedulable, 2); let (thread_0_work, thread_0_ids) = collect_work(&work_receivers[0]); - assert_eq!(thread_0_ids, [txids!([0, 2])]); - assert_eq!(collect_work(&work_receivers[1]).1, [txids!([1, 3])]); + assert_eq!(thread_0_ids, [txids!([0]), txids!([2])]); + assert_eq!( + collect_work(&work_receivers[1]).1, + [txids!([1]), txids!([3])] + ); // Cannot schedule even on next pass because of lock conflicts - let num_scheduled = scheduler.schedule(&mut container).unwrap(); - assert_eq!(num_scheduled, 0); + let scheduling_summary = scheduler + .schedule(&mut container, test_pre_graph_filter, test_pre_lock_filter) + .unwrap(); + assert_eq!(scheduling_summary.num_scheduled, 0); + assert_eq!(scheduling_summary.num_unschedulable, 2); // Complete batch on thread 0. Remaining txs can be scheduled onto thread 1 finished_work_sender @@ -676,12 +805,40 @@ mod tests { }) .unwrap(); scheduler.receive_completed(&mut container).unwrap(); - let num_scheduled = scheduler.schedule(&mut container).unwrap(); - assert_eq!(num_scheduled, 2); + let scheduling_summary = scheduler + .schedule(&mut container, test_pre_graph_filter, test_pre_lock_filter) + .unwrap(); + assert_eq!(scheduling_summary.num_scheduled, 2); + assert_eq!(scheduling_summary.num_unschedulable, 0); assert_eq!( collect_work(&work_receivers[1]).1, [txids!([4]), txids!([5])] ); } + + #[test] + fn test_schedule_pre_lock_filter() { + let (mut scheduler, work_receivers, _finished_work_sender) = create_test_frame(1); + let pubkey = Pubkey::new_unique(); + let keypair = Keypair::new(); + let mut container = create_container([ + (&Keypair::new(), &[pubkey], 1, 1), + (&keypair, &[pubkey], 1, 2), + (&Keypair::new(), &[pubkey], 1, 3), + ]); + + // 2nd transaction should be filtered out and dropped before locking. + let pre_lock_filter = + |tx: &SanitizedTransaction| tx.message().fee_payer() != &keypair.pubkey(); + let scheduling_summary = scheduler + .schedule(&mut container, test_pre_graph_filter, pre_lock_filter) + .unwrap(); + assert_eq!(scheduling_summary.num_scheduled, 2); + assert_eq!(scheduling_summary.num_unschedulable, 0); + assert_eq!( + collect_work(&work_receivers[0]).1, + vec![txids!([2]), txids!([0])] + ); + } } diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index 8c1dc4f9172f73..930d2fe1d067c3 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -9,13 +9,20 @@ use { transaction_state_container::TransactionStateContainer, }, crate::banking_stage::{ + consume_worker::ConsumeWorkerMetrics, decision_maker::{BufferedPacketsDecision, DecisionMaker}, immutable_deserialized_packet::ImmutableDeserializedPacket, packet_deserializer::PacketDeserializer, TOTAL_BUFFERED_PACKETS, }, crossbeam_channel::RecvTimeoutError, - solana_runtime::bank_forks::BankForks, + solana_accounts_db::transaction_error_metrics::TransactionErrorMetrics, + solana_measure::measure_us, + solana_runtime::{bank::Bank, bank_forks::BankForks}, + solana_sdk::{ + clock::MAX_PROCESSING_AGE, saturating_add_assign, timing::AtomicInterval, + transaction::SanitizedTransaction, + }, std::{ sync::{Arc, RwLock}, time::Duration, @@ -36,6 +43,12 @@ pub(crate) struct SchedulerController { container: TransactionStateContainer, /// State for scheduling and communicating with worker threads. scheduler: PrioGraphScheduler, + /// Metrics tracking counts on transactions in different states. + count_metrics: SchedulerCountMetrics, + /// Metrics tracking time spent in different code sections. + timing_metrics: SchedulerTimingMetrics, + /// Metric report handles for the worker threads. + worker_metrics: Vec>, } impl SchedulerController { @@ -44,6 +57,7 @@ impl SchedulerController { packet_deserializer: PacketDeserializer, bank_forks: Arc>, scheduler: PrioGraphScheduler, + worker_metrics: Vec>, ) -> Self { Self { decision_maker, @@ -52,6 +66,9 @@ impl SchedulerController { transaction_id_generator: TransactionIdGenerator::default(), container: TransactionStateContainer::with_capacity(TOTAL_BUFFERED_PACKETS), scheduler, + count_metrics: SchedulerCountMetrics::default(), + timing_metrics: SchedulerTimingMetrics::default(), + worker_metrics, } } @@ -67,13 +84,24 @@ impl SchedulerController { // `Forward` will drop packets from the buffer instead of forwarding. // During receiving, since packets would be dropped from buffer anyway, we can // bypass sanitization and buffering and immediately drop the packets. - let decision = self.decision_maker.make_consume_or_forward_decision(); + let (decision, decision_time_us) = + measure_us!(self.decision_maker.make_consume_or_forward_decision()); + saturating_add_assign!(self.timing_metrics.decision_time_us, decision_time_us); self.process_transactions(&decision)?; - self.scheduler.receive_completed(&mut self.container)?; - if !self.receive_packets(&decision) { + self.receive_completed()?; + if !self.receive_and_buffer_packets(&decision) { break; } + + // Report metrics only if there is data. + // Reset intervals when appropriate, regardless of report. + let should_report = self.count_metrics.has_data(); + self.count_metrics.maybe_report_and_reset(should_report); + self.timing_metrics.maybe_report_and_reset(should_report); + self.worker_metrics + .iter() + .for_each(|metrics| metrics.maybe_report_and_reset()); } Ok(()) @@ -85,28 +113,132 @@ impl SchedulerController { decision: &BufferedPacketsDecision, ) -> Result<(), SchedulerError> { match decision { - BufferedPacketsDecision::Consume(_bank_start) => { - let _num_scheduled = self.scheduler.schedule(&mut self.container)?; + BufferedPacketsDecision::Consume(bank_start) => { + let (scheduling_summary, schedule_time_us) = measure_us!(self.scheduler.schedule( + &mut self.container, + |txs, results| { + Self::pre_graph_filter(txs, results, &bank_start.working_bank) + }, + |_| true // no pre-lock filter for now + )?); + saturating_add_assign!( + self.count_metrics.num_scheduled, + scheduling_summary.num_scheduled + ); + saturating_add_assign!( + self.count_metrics.num_unschedulable, + scheduling_summary.num_unschedulable + ); + saturating_add_assign!( + self.count_metrics.num_schedule_filtered_out, + scheduling_summary.num_filtered_out + ); + saturating_add_assign!( + self.timing_metrics.schedule_filter_time_us, + scheduling_summary.filter_time_us + ); + saturating_add_assign!(self.timing_metrics.schedule_time_us, schedule_time_us); } BufferedPacketsDecision::Forward => { - self.clear_container(); + let (_, clear_time_us) = measure_us!(self.clear_container()); + saturating_add_assign!(self.timing_metrics.clear_time_us, clear_time_us); + } + BufferedPacketsDecision::ForwardAndHold => { + let (_, clean_time_us) = measure_us!(self.clean_queue()); + saturating_add_assign!(self.timing_metrics.clean_time_us, clean_time_us); } - BufferedPacketsDecision::ForwardAndHold | BufferedPacketsDecision::Hold => {} + BufferedPacketsDecision::Hold => {} } Ok(()) } + fn pre_graph_filter(transactions: &[&SanitizedTransaction], results: &mut [bool], bank: &Bank) { + let lock_results = vec![Ok(()); transactions.len()]; + let mut error_counters = TransactionErrorMetrics::default(); + let check_results = bank.check_transactions( + transactions, + &lock_results, + MAX_PROCESSING_AGE, + &mut error_counters, + ); + + for ((check_result, _), result) in check_results.into_iter().zip(results.iter_mut()) { + *result = check_result.is_ok(); + } + } + /// Clears the transaction state container. /// This only clears pending transactions, and does **not** clear in-flight transactions. fn clear_container(&mut self) { while let Some(id) = self.container.pop() { self.container.remove_by_id(&id.id); + saturating_add_assign!(self.count_metrics.num_dropped_on_clear, 1); } } + /// Clean unprocessable transactions from the queue. These will be transactions that are + /// expired, already processed, or are no longer sanitizable. + /// This only clears pending transactions, and does **not** clear in-flight transactions. + fn clean_queue(&mut self) { + // Clean up any transactions that have already been processed, are too old, or do not have + // valid nonce accounts. + const MAX_TRANSACTION_CHECKS: usize = 10_000; + let mut transaction_ids = Vec::with_capacity(MAX_TRANSACTION_CHECKS); + + while let Some(id) = self.container.pop() { + transaction_ids.push(id); + } + + let bank = self.bank_forks.read().unwrap().working_bank(); + + const CHUNK_SIZE: usize = 128; + let mut error_counters = TransactionErrorMetrics::default(); + + for chunk in transaction_ids.chunks(CHUNK_SIZE) { + let lock_results = vec![Ok(()); chunk.len()]; + let sanitized_txs: Vec<_> = chunk + .iter() + .map(|id| { + &self + .container + .get_transaction_ttl(&id.id) + .expect("transaction must exist") + .transaction + }) + .collect(); + + let check_results = bank.check_transactions( + &sanitized_txs, + &lock_results, + MAX_PROCESSING_AGE, + &mut error_counters, + ); + + for ((result, _nonce), id) in check_results.into_iter().zip(chunk.iter()) { + if result.is_err() { + saturating_add_assign!(self.count_metrics.num_dropped_on_age_and_status, 1); + self.container.remove_by_id(&id.id); + } + } + } + } + + /// Receives completed transactions from the workers and updates metrics. + fn receive_completed(&mut self) -> Result<(), SchedulerError> { + let ((num_transactions, num_retryable), receive_completed_time_us) = + measure_us!(self.scheduler.receive_completed(&mut self.container)?); + saturating_add_assign!(self.count_metrics.num_finished, num_transactions); + saturating_add_assign!(self.count_metrics.num_retryable, num_retryable); + saturating_add_assign!( + self.timing_metrics.receive_completed_time_us, + receive_completed_time_us + ); + Ok(()) + } + /// Returns whether the packet receiver is still connected. - fn receive_packets(&mut self, decision: &BufferedPacketsDecision) -> bool { + fn receive_and_buffer_packets(&mut self, decision: &BufferedPacketsDecision) -> bool { let remaining_queue_capacity = self.container.remaining_queue_capacity(); const MAX_PACKET_RECEIVE_TIME: Duration = Duration::from_millis(100); @@ -125,17 +257,29 @@ impl SchedulerController { } }; - let received_packet_results = self + let (received_packet_results, receive_time_us) = measure_us!(self .packet_receiver - .receive_packets(recv_timeout, remaining_queue_capacity); - - match (received_packet_results, should_buffer) { - (Ok(receive_packet_results), true) => { - self.buffer_packets(receive_packet_results.deserialized_packets) + .receive_packets(recv_timeout, remaining_queue_capacity)); + saturating_add_assign!(self.timing_metrics.receive_time_us, receive_time_us); + + match received_packet_results { + Ok(receive_packet_results) => { + let num_received_packets = receive_packet_results.deserialized_packets.len(); + saturating_add_assign!(self.count_metrics.num_received, num_received_packets); + if should_buffer { + let (_, buffer_time_us) = measure_us!( + self.buffer_packets(receive_packet_results.deserialized_packets) + ); + saturating_add_assign!(self.timing_metrics.buffer_time_us, buffer_time_us); + } else { + saturating_add_assign!( + self.count_metrics.num_dropped_on_receive, + num_received_packets + ); + } } - (Ok(receive_packet_results), false) => drop(receive_packet_results), - (Err(RecvTimeoutError::Timeout), _) => {} - (Err(RecvTimeoutError::Disconnected), _) => return false, + Err(RecvTimeoutError::Timeout) => {} + Err(RecvTimeoutError::Disconnected) => return false, } true @@ -145,28 +289,270 @@ impl SchedulerController { // Sanitize packets, generate IDs, and insert into the container. let bank = self.bank_forks.read().unwrap().working_bank(); let last_slot_in_epoch = bank.epoch_schedule().get_last_slot_in_epoch(bank.epoch()); + let transaction_account_lock_limit = bank.get_transaction_account_lock_limit(); let feature_set = &bank.feature_set; let vote_only = bank.vote_only_bank(); - for packet in packets { - let Some(transaction) = - packet.build_sanitized_transaction(feature_set, vote_only, bank.as_ref()) - else { - continue; - }; - - let transaction_id = self.transaction_id_generator.next(); - let transaction_ttl = SanitizedTransactionTTL { - transaction, - max_age_slot: last_slot_in_epoch, - }; - let transaction_priority_details = packet.priority_details(); - self.container.insert_new_transaction( - transaction_id, - transaction_ttl, - transaction_priority_details, + + const CHUNK_SIZE: usize = 128; + let lock_results: [_; CHUNK_SIZE] = core::array::from_fn(|_| Ok(())); + let mut error_counts = TransactionErrorMetrics::default(); + for chunk in packets.chunks(CHUNK_SIZE) { + let mut post_sanitization_count: usize = 0; + let (transactions, priority_details): (Vec<_>, Vec<_>) = chunk + .iter() + .filter_map(|packet| { + packet + .build_sanitized_transaction(feature_set, vote_only, bank.as_ref()) + .map(|tx| (tx, packet.priority_details())) + }) + .inspect(|_| saturating_add_assign!(post_sanitization_count, 1)) + .filter(|(tx, _)| { + SanitizedTransaction::validate_account_locks( + tx.message(), + transaction_account_lock_limit, + ) + .is_ok() + }) + .unzip(); + + let check_results = bank.check_transactions( + &transactions, + &lock_results[..transactions.len()], + MAX_PROCESSING_AGE, + &mut error_counts, + ); + let post_lock_validation_count = transactions.len(); + + let mut post_transaction_check_count: usize = 0; + for ((transaction, priority_details), _) in transactions + .into_iter() + .zip(priority_details) + .zip(check_results) + .filter(|(_, check_result)| check_result.0.is_ok()) + { + saturating_add_assign!(post_transaction_check_count, 1); + let transaction_id = self.transaction_id_generator.next(); + let transaction_ttl = SanitizedTransactionTTL { + transaction, + max_age_slot: last_slot_in_epoch, + }; + + if self.container.insert_new_transaction( + transaction_id, + transaction_ttl, + priority_details, + ) { + saturating_add_assign!(self.count_metrics.num_dropped_on_capacity, 1); + } + saturating_add_assign!(self.count_metrics.num_buffered, 1); + } + + // Update metrics for transactions that were dropped. + let num_dropped_on_sanitization = chunk.len().saturating_sub(post_sanitization_count); + let num_dropped_on_lock_validation = + post_sanitization_count.saturating_sub(post_lock_validation_count); + let num_dropped_on_transaction_checks = + post_lock_validation_count.saturating_sub(post_transaction_check_count); + + saturating_add_assign!( + self.count_metrics.num_dropped_on_sanitization, + num_dropped_on_sanitization + ); + saturating_add_assign!( + self.count_metrics.num_dropped_on_validate_locks, + num_dropped_on_lock_validation ); + saturating_add_assign!( + self.count_metrics.num_dropped_on_receive_transaction_checks, + num_dropped_on_transaction_checks + ); + } + } +} + +#[derive(Default)] +struct SchedulerCountMetrics { + interval: AtomicInterval, + + /// Number of packets received. + num_received: usize, + /// Number of packets buffered. + num_buffered: usize, + + /// Number of transactions scheduled. + num_scheduled: usize, + /// Number of transactions that were unschedulable. + num_unschedulable: usize, + /// Number of transactions that were filtered out during scheduling. + num_schedule_filtered_out: usize, + /// Number of completed transactions received from workers. + num_finished: usize, + /// Number of transactions that were retryable. + num_retryable: usize, + + /// Number of transactions that were immediately dropped on receive. + num_dropped_on_receive: usize, + /// Number of transactions that were dropped due to sanitization failure. + num_dropped_on_sanitization: usize, + /// Number of transactions that were dropped due to failed lock validation. + num_dropped_on_validate_locks: usize, + /// Number of transactions that were dropped due to failed transaction + /// checks during receive. + num_dropped_on_receive_transaction_checks: usize, + /// Number of transactions that were dropped due to clearing. + num_dropped_on_clear: usize, + /// Number of transactions that were dropped due to age and status checks. + num_dropped_on_age_and_status: usize, + /// Number of transactions that were dropped due to exceeded capacity. + num_dropped_on_capacity: usize, +} + +impl SchedulerCountMetrics { + fn maybe_report_and_reset(&mut self, should_report: bool) { + const REPORT_INTERVAL_MS: u64 = 1000; + if self.interval.should_update(REPORT_INTERVAL_MS) { + if should_report { + self.report(); + } + self.reset(); + } + } + + fn report(&self) { + datapoint_info!( + "banking_stage_scheduler_counts", + ("num_received", self.num_received, i64), + ("num_buffered", self.num_buffered, i64), + ("num_scheduled", self.num_scheduled, i64), + ("num_unschedulable", self.num_unschedulable, i64), + ( + "num_schedule_filtered_out", + self.num_schedule_filtered_out, + i64 + ), + ("num_finished", self.num_finished, i64), + ("num_retryable", self.num_retryable, i64), + ("num_dropped_on_receive", self.num_dropped_on_receive, i64), + ( + "num_dropped_on_sanitization", + self.num_dropped_on_sanitization, + i64 + ), + ( + "num_dropped_on_validate_locks", + self.num_dropped_on_validate_locks, + i64 + ), + ( + "num_dropped_on_receive_transaction_checks", + self.num_dropped_on_receive_transaction_checks, + i64 + ), + ("num_dropped_on_clear", self.num_dropped_on_clear, i64), + ( + "num_dropped_on_age_and_status", + self.num_dropped_on_age_and_status, + i64 + ), + ("num_dropped_on_capacity", self.num_dropped_on_capacity, i64) + ); + } + + fn has_data(&self) -> bool { + self.num_received != 0 + || self.num_buffered != 0 + || self.num_scheduled != 0 + || self.num_unschedulable != 0 + || self.num_schedule_filtered_out != 0 + || self.num_finished != 0 + || self.num_retryable != 0 + || self.num_dropped_on_receive != 0 + || self.num_dropped_on_sanitization != 0 + || self.num_dropped_on_validate_locks != 0 + || self.num_dropped_on_receive_transaction_checks != 0 + || self.num_dropped_on_clear != 0 + || self.num_dropped_on_age_and_status != 0 + || self.num_dropped_on_capacity != 0 + } + + fn reset(&mut self) { + self.num_received = 0; + self.num_buffered = 0; + self.num_scheduled = 0; + self.num_unschedulable = 0; + self.num_schedule_filtered_out = 0; + self.num_finished = 0; + self.num_retryable = 0; + self.num_dropped_on_receive = 0; + self.num_dropped_on_sanitization = 0; + self.num_dropped_on_validate_locks = 0; + self.num_dropped_on_receive_transaction_checks = 0; + self.num_dropped_on_clear = 0; + self.num_dropped_on_age_and_status = 0; + self.num_dropped_on_capacity = 0; + } +} + +#[derive(Default)] +struct SchedulerTimingMetrics { + interval: AtomicInterval, + /// Time spent making processing decisions. + decision_time_us: u64, + /// Time spent receiving packets. + receive_time_us: u64, + /// Time spent buffering packets. + buffer_time_us: u64, + /// Time spent filtering transactions during scheduling. + schedule_filter_time_us: u64, + /// Time spent scheduling transactions. + schedule_time_us: u64, + /// Time spent clearing transactions from the container. + clear_time_us: u64, + /// Time spent cleaning expired or processed transactions from the container. + clean_time_us: u64, + /// Time spent receiving completed transactions. + receive_completed_time_us: u64, +} + +impl SchedulerTimingMetrics { + fn maybe_report_and_reset(&mut self, should_report: bool) { + const REPORT_INTERVAL_MS: u64 = 1000; + if self.interval.should_update(REPORT_INTERVAL_MS) { + if should_report { + self.report(); + } + self.reset(); } } + + fn report(&self) { + datapoint_info!( + "banking_stage_scheduler_timing", + ("decision_time_us", self.decision_time_us, i64), + ("receive_time_us", self.receive_time_us, i64), + ("buffer_time_us", self.buffer_time_us, i64), + ("schedule_filter_time_us", self.schedule_filter_time_us, i64), + ("schedule_time_us", self.schedule_time_us, i64), + ("clear_time_us", self.clear_time_us, i64), + ("clean_time_us", self.clean_time_us, i64), + ( + "receive_completed_time_us", + self.receive_completed_time_us, + i64 + ) + ); + } + + fn reset(&mut self) { + self.decision_time_us = 0; + self.receive_time_us = 0; + self.buffer_time_us = 0; + self.schedule_filter_time_us = 0; + self.schedule_time_us = 0; + self.clear_time_us = 0; + self.clean_time_us = 0; + self.receive_completed_time_us = 0; + } } #[cfg(test)] @@ -190,7 +576,7 @@ mod tests { }, solana_perf::packet::{to_packet_batches, PacketBatch, NUM_PACKETS}, solana_poh::poh_recorder::{PohRecorder, Record, WorkingBankEntry}, - solana_runtime::{bank::Bank, bank_forks::BankForks}, + solana_runtime::bank::Bank, solana_sdk::{ compute_budget::ComputeBudgetInstruction, hash::Hash, message::Message, poh_config::PohConfig, pubkey::Pubkey, signature::Keypair, signer::Signer, @@ -222,9 +608,7 @@ mod tests { fn create_test_frame(num_threads: usize) -> (TestFrame, SchedulerController) { let GenesisConfigInfo { genesis_config, .. } = create_slow_genesis_config(10_000); - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); - let bank_forks = BankForks::new_rw_arc(bank); - let bank = bank_forks.read().unwrap().working_bank(); + let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()) @@ -266,6 +650,7 @@ mod tests { packet_deserializer, bank_forks, PrioGraphScheduler::new(consume_work_senders, finished_consume_work_receiver), + vec![], // no actual workers with metrics to report, this can be empty ); (test_frame, scheduler_controller) diff --git a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs index 76807653315117..10401a88eff405 100644 --- a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs +++ b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs @@ -119,12 +119,13 @@ impl TransactionStateContainer { } /// Insert a new transaction into the container's queues and maps. + /// Returns `true` if a packet was dropped due to capacity limits. pub(crate) fn insert_new_transaction( &mut self, transaction_id: TransactionId, transaction_ttl: SanitizedTransactionTTL, transaction_priority_details: TransactionPriorityDetails, - ) { + ) -> bool { let priority_id = TransactionPriorityId::new(transaction_priority_details.priority, transaction_id); self.id_to_transaction_state.insert( @@ -151,12 +152,15 @@ impl TransactionStateContainer { /// Pushes a transaction id into the priority queue. If the queue is full, the lowest priority /// transaction will be dropped (removed from the queue and map). - pub(crate) fn push_id_into_queue(&mut self, priority_id: TransactionPriorityId) { + /// Returns `true` if a packet was dropped due to capacity limits. + pub(crate) fn push_id_into_queue(&mut self, priority_id: TransactionPriorityId) -> bool { if self.remaining_queue_capacity() == 0 { let popped_id = self.priority_queue.push_pop_min(priority_id); self.remove_by_id(&popped_id.id); + true } else { self.priority_queue.push(priority_id); + false } } diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs index 03b3e583326a71..840a2cf860239c 100644 --- a/core/src/banking_stage/unprocessed_transaction_storage.rs +++ b/core/src/banking_stage/unprocessed_transaction_storage.rs @@ -1014,7 +1014,7 @@ mod tests { mint_keypair, .. } = create_genesis_config(10); - let current_bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let current_bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let simple_transactions: Vec = (0..256) .map(|_id| { diff --git a/core/src/cluster_info_vote_listener.rs b/core/src/cluster_info_vote_listener.rs index 3af93fecf963dc..257b2c381c0135 100644 --- a/core/src/cluster_info_vote_listener.rs +++ b/core/src/cluster_info_vote_listener.rs @@ -44,6 +44,7 @@ use { vote_transaction::VoteTransaction, }, std::{ + cmp::max, collections::{HashMap, HashSet}, iter::repeat, sync::{ @@ -499,6 +500,7 @@ impl ClusterInfoVoteListener { ) -> Result<()> { let mut confirmation_verifier = OptimisticConfirmationVerifier::new(bank_forks.read().unwrap().root()); + let mut latest_vote_slot_per_validator = HashMap::new(); let mut last_process_root = Instant::now(); let duplicate_confirmed_slot_sender = Some(duplicate_confirmed_slot_sender); let mut vote_processing_time = Some(VoteProcessingTiming::default()); @@ -533,6 +535,7 @@ impl ClusterInfoVoteListener { &bank_notification_sender, &duplicate_confirmed_slot_sender, &mut vote_processing_time, + &mut latest_vote_slot_per_validator, ); match confirmed_slots { Ok(confirmed_slots) => { @@ -573,6 +576,7 @@ impl ClusterInfoVoteListener { &None, &None, &mut None, + &mut HashMap::new(), ) } @@ -588,6 +592,7 @@ impl ClusterInfoVoteListener { bank_notification_sender: &Option, duplicate_confirmed_slot_sender: &Option, vote_processing_time: &mut Option, + latest_vote_slot_per_validator: &mut HashMap, ) -> Result { let mut sel = Select::new(); sel.recv(gossip_vote_txs_receiver); @@ -617,6 +622,7 @@ impl ClusterInfoVoteListener { bank_notification_sender, duplicate_confirmed_slot_sender, vote_processing_time, + latest_vote_slot_per_validator, )); } remaining_wait_time = remaining_wait_time.saturating_sub(start.elapsed()); @@ -639,6 +645,7 @@ impl ClusterInfoVoteListener { is_gossip_vote: bool, bank_notification_sender: &Option, duplicate_confirmed_slot_sender: &Option, + latest_vote_slot_per_validator: &mut HashMap, ) { if vote.is_empty() { return; @@ -646,6 +653,10 @@ impl ClusterInfoVoteListener { let (last_vote_slot, last_vote_hash) = vote.last_voted_slot_hash().unwrap(); + let latest_vote_slot = latest_vote_slot_per_validator + .entry(*vote_pubkey) + .or_insert(0); + let root = root_bank.slot(); let mut is_new_vote = false; let vote_slots = vote.slots(); @@ -724,6 +735,14 @@ impl ClusterInfoVoteListener { is_new_vote = is_new; } + if slot < *latest_vote_slot { + // Important that we filter after the `last_vote_slot` check, as even if this vote + // is old, we still need to track optimistic confirmations. + // However it is fine to filter the rest of the slots for the propagated check tracking below, + // as the propagated check is able to roll up votes for descendants unlike optimistic confirmation. + continue; + } + diff.entry(slot) .or_default() .entry(*vote_pubkey) @@ -733,6 +752,8 @@ impl ClusterInfoVoteListener { .or_insert(is_gossip_vote); } + *latest_vote_slot = max(*latest_vote_slot, last_vote_slot); + if is_new_vote { subscriptions.notify_vote(*vote_pubkey, vote, vote_transaction_signature); let _ = verified_vote_sender.send((*vote_pubkey, vote_slots)); @@ -751,6 +772,7 @@ impl ClusterInfoVoteListener { bank_notification_sender: &Option, duplicate_confirmed_slot_sender: &Option, vote_processing_time: &mut Option, + latest_vote_slot_per_validator: &mut HashMap, ) -> ThresholdConfirmedSlots { let mut diff: HashMap> = HashMap::new(); let mut new_optimistic_confirmed_slots = vec![]; @@ -777,6 +799,7 @@ impl ClusterInfoVoteListener { is_gossip, bank_notification_sender, duplicate_confirmed_slot_sender, + latest_vote_slot_per_validator, ); } gossip_vote_txn_processing_time.stop(); @@ -875,6 +898,7 @@ mod tests { use { super::*, crate::banking_trace::BankingTracer, + itertools::Itertools, solana_perf::packet, solana_rpc::optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank, solana_runtime::{ @@ -890,7 +914,10 @@ mod tests { signature::{Keypair, Signature, Signer}, }, solana_vote::vote_sender_types::ReplayVoteSender, - solana_vote_program::{vote_state::Vote, vote_transaction}, + solana_vote_program::{ + vote_state::{Vote, VoteStateUpdate}, + vote_transaction, + }, std::{ collections::BTreeSet, iter::repeat_with, @@ -987,6 +1014,7 @@ mod tests { let (verified_vote_sender, _verified_vote_receiver) = unbounded(); let (gossip_verified_vote_hash_sender, _gossip_verified_vote_hash_receiver) = unbounded(); let (replay_votes_sender, replay_votes_receiver) = unbounded(); + let mut latest_vote_slot_per_validator = HashMap::new(); let GenesisConfigInfo { genesis_config, .. } = genesis_utils::create_genesis_config_with_vote_accounts( @@ -1022,6 +1050,7 @@ mod tests { &None, &None, &mut None, + &mut latest_vote_slot_per_validator, ) .unwrap(); @@ -1054,6 +1083,7 @@ mod tests { &None, &None, &mut None, + &mut latest_vote_slot_per_validator, ) .unwrap(); @@ -1105,6 +1135,7 @@ mod tests { let (replay_votes_sender, replay_votes_receiver) = unbounded(); let (gossip_verified_vote_hash_sender, gossip_verified_vote_hash_receiver) = unbounded(); let (verified_vote_sender, verified_vote_receiver) = unbounded(); + let mut latest_vote_slot_per_validator = HashMap::new(); let GenesisConfigInfo { genesis_config, .. } = genesis_utils::create_genesis_config_with_vote_accounts( @@ -1137,6 +1168,7 @@ mod tests { &None, &None, &mut None, + &mut latest_vote_slot_per_validator, ) .unwrap(); @@ -1172,7 +1204,7 @@ mod tests { assert!(slot_hash_votes.contains(&pubkey)); } - // Check that the received votes were pushed to other commponents + // Check that the received votes were pushed to other components // subscribing via `verified_vote_receiver` let all_expected_slots: BTreeSet<_> = gossip_vote_slots .clone() @@ -1255,6 +1287,7 @@ mod tests { let (gossip_verified_vote_hash_sender, _gossip_verified_vote_hash_receiver) = unbounded(); let (verified_vote_sender, verified_vote_receiver) = unbounded(); let (_replay_votes_sender, replay_votes_receiver) = unbounded(); + let mut latest_vote_slot_per_validator = HashMap::new(); let mut expected_votes = vec![]; let num_voters_per_slot = 2; @@ -1295,10 +1328,11 @@ mod tests { &None, &None, &mut None, + &mut latest_vote_slot_per_validator, ) .unwrap(); - // Check that the received votes were pushed to other commponents + // Check that the received votes were pushed to other components // subscribing via a channel let received_votes: Vec<_> = verified_vote_receiver.try_iter().collect(); assert_eq!(received_votes.len(), validator_voting_keypairs.len()); @@ -1340,6 +1374,7 @@ mod tests { let (gossip_verified_vote_hash_sender, _gossip_verified_vote_hash_receiver) = unbounded(); let (replay_votes_sender, replay_votes_receiver): (ReplayVoteSender, ReplayVoteReceiver) = unbounded(); + let mut latest_vote_slot_per_validator = HashMap::new(); let vote_slot = 1; let vote_bank_hash = Hash::default(); @@ -1396,6 +1431,7 @@ mod tests { &None, &None, &mut None, + &mut latest_vote_slot_per_validator, ); } let slot_vote_tracker = vote_tracker.get_slot_vote_tracker(vote_slot).unwrap(); @@ -1454,6 +1490,7 @@ mod tests { Arc::new(RwLock::new(BlockCommitmentCache::default())), optimistically_confirmed_bank, )); + let mut latest_vote_slot_per_validator = HashMap::new(); // Send a vote to process, should add a reference to the pubkey for that voter // in the tracker @@ -1489,6 +1526,7 @@ mod tests { &None, &None, &mut None, + &mut latest_vote_slot_per_validator, ); // Setup next epoch @@ -1536,6 +1574,7 @@ mod tests { &None, &None, &mut None, + &mut latest_vote_slot_per_validator, ); } @@ -1761,4 +1800,103 @@ mod tests { .previously_sent_to_bank_votes .is_empty()); } + + #[test] + fn test_track_new_votes_filter() { + let validator_keypairs: Vec<_> = + (0..2).map(|_| ValidatorVoteKeypairs::new_rand()).collect(); + + let GenesisConfigInfo { genesis_config, .. } = + genesis_utils::create_genesis_config_with_vote_accounts( + 10_000, + &validator_keypairs, + vec![100; validator_keypairs.len()], + ); + let bank = Bank::new_for_tests(&genesis_config); + let exit = Arc::new(AtomicBool::new(false)); + let bank_forks = BankForks::new_rw_arc(bank); + let bank = bank_forks.read().unwrap().get(0).unwrap(); + let vote_tracker = VoteTracker::default(); + let optimistically_confirmed_bank = + OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); + let max_complete_rewards_slot = Arc::new(AtomicU64::default()); + let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( + exit, + max_complete_transaction_status_slot, + max_complete_rewards_slot, + bank_forks, + Arc::new(RwLock::new(BlockCommitmentCache::default())), + optimistically_confirmed_bank, + )); + let mut latest_vote_slot_per_validator = HashMap::new(); + + let (verified_vote_sender, _verified_vote_receiver) = unbounded(); + let (gossip_verified_vote_hash_sender, _gossip_verified_vote_hash_receiver) = unbounded(); + let mut diff = HashMap::default(); + let mut new_optimistic_confirmed_slots = vec![]; + + let validator0_keypairs = &validator_keypairs[0]; + let (vote_pubkey, vote, _, signature) = vote_parser::parse_vote_transaction( + &vote_transaction::new_vote_state_update_transaction( + VoteStateUpdate::from(vec![(1, 3), (2, 2), (6, 1)]), + Hash::default(), + &validator0_keypairs.node_keypair, + &validator0_keypairs.vote_keypair, + &validator0_keypairs.vote_keypair, + None, + ), + ) + .unwrap(); + + ClusterInfoVoteListener::track_new_votes_and_notify_confirmations( + vote, + &vote_pubkey, + signature, + &vote_tracker, + &bank, + &subscriptions, + &verified_vote_sender, + &gossip_verified_vote_hash_sender, + &mut diff, + &mut new_optimistic_confirmed_slots, + true, /* is gossip */ + &None, + &None, + &mut latest_vote_slot_per_validator, + ); + assert_eq!(diff.keys().copied().sorted().collect_vec(), vec![1, 2, 6]); + + // Vote on a new slot, only those later than 6 should show up. 4 is skipped. + diff.clear(); + let (vote_pubkey, vote, _, signature) = vote_parser::parse_vote_transaction( + &vote_transaction::new_vote_state_update_transaction( + VoteStateUpdate::from(vec![(1, 6), (2, 5), (3, 4), (4, 3), (7, 2), (8, 1)]), + Hash::default(), + &validator0_keypairs.node_keypair, + &validator0_keypairs.vote_keypair, + &validator0_keypairs.vote_keypair, + None, + ), + ) + .unwrap(); + + ClusterInfoVoteListener::track_new_votes_and_notify_confirmations( + vote, + &vote_pubkey, + signature, + &vote_tracker, + &bank, + &subscriptions, + &verified_vote_sender, + &gossip_verified_vote_hash_sender, + &mut diff, + &mut new_optimistic_confirmed_slots, + true, /* is gossip */ + &None, + &None, + &mut latest_vote_slot_per_validator, + ); + assert_eq!(diff.keys().copied().sorted().collect_vec(), vec![7, 8]); + } } diff --git a/core/src/commitment_service.rs b/core/src/commitment_service.rs index 84242b44c6433a..e6d12d1b08b917 100644 --- a/core/src/commitment_service.rs +++ b/core/src/commitment_service.rs @@ -263,6 +263,20 @@ mod tests { }, }; + fn new_bank_from_parent_with_bank_forks( + bank_forks: &RwLock, + parent: Arc, + collector_id: &Pubkey, + slot: Slot, + ) -> Arc { + let bank = Bank::new_from_parent(parent, collector_id, slot); + bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler() + } + #[test] fn test_get_highest_super_majority_root() { assert_eq!(get_highest_super_majority_root(vec![], 10), 0); @@ -508,14 +522,18 @@ mod tests { vec![100; 1], ); - let bank0 = Bank::new_for_tests(&genesis_config); - let bank_forks = BankForks::new_rw_arc(bank0); + let (_bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); // Fill bank_forks with banks with votes landing in the next slot // Create enough banks such that vote account will root slots 0 and 1 for x in 0..33 { let previous_bank = bank_forks.read().unwrap().get(x).unwrap(); - let bank = Bank::new_from_parent(previous_bank.clone(), &Pubkey::default(), x + 1); + let bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + previous_bank.clone(), + &Pubkey::default(), + x + 1, + ); let vote = vote_transaction::new_vote_transaction( vec![x], previous_bank.hash(), @@ -526,7 +544,6 @@ mod tests { None, ); bank.process_transaction(&vote).unwrap(); - bank_forks.write().unwrap().insert(bank); } let working_bank = bank_forks.read().unwrap().working_bank(); @@ -543,7 +560,12 @@ mod tests { // Add an additional bank/vote that will root slot 2 let bank33 = bank_forks.read().unwrap().get(33).unwrap(); - let bank34 = Bank::new_from_parent(bank33.clone(), &Pubkey::default(), 34); + let bank34 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank33.clone(), + &Pubkey::default(), + 34, + ); let vote33 = vote_transaction::new_vote_transaction( vec![33], bank33.hash(), @@ -554,7 +576,6 @@ mod tests { None, ); bank34.process_transaction(&vote33).unwrap(); - bank_forks.write().unwrap().insert(bank34); let working_bank = bank_forks.read().unwrap().working_bank(); let root = get_vote_account_root_slot( @@ -587,8 +608,12 @@ mod tests { // Add a forked bank. Because the vote for bank 33 landed in the non-ancestor, the vote // account's root (and thus the highest_super_majority_root) rolls back to slot 1 let bank33 = bank_forks.read().unwrap().get(33).unwrap(); - let bank35 = Bank::new_from_parent(bank33, &Pubkey::default(), 35); - bank_forks.write().unwrap().insert(bank35); + let _bank35 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank33, + &Pubkey::default(), + 35, + ); let working_bank = bank_forks.read().unwrap().working_bank(); let ancestors = working_bank.status_cache_ancestors(); @@ -613,7 +638,12 @@ mod tests { // continues normally for x in 35..=37 { let previous_bank = bank_forks.read().unwrap().get(x).unwrap(); - let bank = Bank::new_from_parent(previous_bank.clone(), &Pubkey::default(), x + 1); + let bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + previous_bank.clone(), + &Pubkey::default(), + x + 1, + ); let vote = vote_transaction::new_vote_transaction( vec![x], previous_bank.hash(), @@ -624,7 +654,6 @@ mod tests { None, ); bank.process_transaction(&vote).unwrap(); - bank_forks.write().unwrap().insert(bank); } let working_bank = bank_forks.read().unwrap().working_bank(); diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 72a0c39bc35730..f23325f9beb72e 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -52,7 +52,7 @@ use { pub enum ThresholdDecision { #[default] PassedThreshold, - FailedThreshold(/* Observed stake */ u64), + FailedThreshold(/* vote depth */ u64, /* Observed stake */ u64), } impl ThresholdDecision { @@ -141,6 +141,7 @@ impl SwitchForkDecision { } } +const VOTE_THRESHOLD_DEPTH_SHALLOW: usize = 4; pub const VOTE_THRESHOLD_DEPTH: usize = 8; pub const SWITCH_FORK_THRESHOLD: f64 = 0.38; @@ -1042,46 +1043,88 @@ impl Tower { self.last_switch_threshold_check.is_none() } - /// Performs threshold check for `slot` - /// - /// If it passes the check returns None, otherwise returns Some(fork_stake) - pub fn check_vote_stake_threshold( + /// Checks a single vote threshold for `slot` + fn check_vote_stake_threshold( + threshold_vote: Option<&Lockout>, + vote_state_before_applying_vote: &VoteState, + threshold_depth: usize, + threshold_size: f64, + slot: Slot, + voted_stakes: &HashMap, + total_stake: u64, + ) -> ThresholdDecision { + let Some(threshold_vote) = threshold_vote else { + // Tower isn't that deep. + return ThresholdDecision::PassedThreshold; + }; + let Some(fork_stake) = voted_stakes.get(&threshold_vote.slot()) else { + // We haven't seen any votes on this fork yet, so no stake + return ThresholdDecision::FailedThreshold(threshold_depth as u64, 0); + }; + + let lockout = *fork_stake as f64 / total_stake as f64; + trace!( + "fork_stake slot: {}, threshold_vote slot: {}, lockout: {} fork_stake: {} total_stake: {}", + slot, + threshold_vote.slot(), + lockout, + fork_stake, + total_stake + ); + if threshold_vote.confirmation_count() as usize > threshold_depth { + for old_vote in &vote_state_before_applying_vote.votes { + if old_vote.slot() == threshold_vote.slot() + && old_vote.confirmation_count() == threshold_vote.confirmation_count() + { + // If you bounce back to voting on the main fork after not + // voting for a while, your latest vote N on the main fork + // might pop off a lot of the stake of votes in the tower. + // This stake would have rolled up to earlier votes in the + // tower, so skip the stake check. + return ThresholdDecision::PassedThreshold; + } + } + } + if lockout > threshold_size { + return ThresholdDecision::PassedThreshold; + } + ThresholdDecision::FailedThreshold(threshold_depth as u64, *fork_stake) + } + + /// Performs vote threshold checks for `slot` + pub fn check_vote_stake_thresholds( &self, slot: Slot, voted_stakes: &VotedStakes, total_stake: Stake, ) -> ThresholdDecision { + // Generate the vote state assuming this vote is included. let mut vote_state = self.vote_state.clone(); process_slot_vote_unchecked(&mut vote_state, slot); - let lockout = vote_state.nth_recent_lockout(self.threshold_depth); - if let Some(lockout) = lockout { - if let Some(fork_stake) = voted_stakes.get(&lockout.slot()) { - let lockout_stake = *fork_stake as f64 / total_stake as f64; - trace!( - "fork_stake slot: {}, vote slot: {}, lockout: {} fork_stake: {} total_stake: {}", - slot, lockout.slot(), lockout_stake, fork_stake, total_stake - ); - if lockout.confirmation_count() as usize > self.threshold_depth { - for old_vote in &self.vote_state.votes { - if old_vote.slot() == lockout.slot() - && old_vote.confirmation_count() == lockout.confirmation_count() - { - return ThresholdDecision::PassedThreshold; - } - } - } - if lockout_stake > self.threshold_size { - return ThresholdDecision::PassedThreshold; - } - ThresholdDecision::FailedThreshold(*fork_stake) - } else { - // We haven't seen any votes on this fork yet, so no stake - ThresholdDecision::FailedThreshold(0) + // Assemble all the vote thresholds and depths to check. + let vote_thresholds_and_depths = vec![ + (VOTE_THRESHOLD_DEPTH_SHALLOW, SWITCH_FORK_THRESHOLD), + (self.threshold_depth, self.threshold_size), + ]; + + // Check one by one. If any threshold fails, return failure. + for (threshold_depth, threshold_size) in vote_thresholds_and_depths { + if let ThresholdDecision::FailedThreshold(vote_depth, stake) = + Self::check_vote_stake_threshold( + vote_state.nth_recent_lockout(threshold_depth), + &self.vote_state, + threshold_depth, + threshold_size, + slot, + voted_stakes, + total_stake, + ) + { + return ThresholdDecision::FailedThreshold(vote_depth, stake); } - } else { - ThresholdDecision::PassedThreshold } + ThresholdDecision::PassedThreshold } /// Update lockouts for all the ancestors @@ -2297,7 +2340,7 @@ pub mod test { fn test_check_vote_threshold_without_votes() { let tower = Tower::new_for_tests(1, 0.67); let stakes = vec![(0, 1)].into_iter().collect(); - assert!(tower.check_vote_stake_threshold(0, &stakes, 2).passed()); + assert!(tower.check_vote_stake_thresholds(0, &stakes, 2).passed()); } #[test] @@ -2310,7 +2353,7 @@ pub mod test { tower.record_vote(i, Hash::default()); } assert!(!tower - .check_vote_stake_threshold(MAX_LOCKOUT_HISTORY as u64 + 1, &stakes, 2,) + .check_vote_stake_thresholds(MAX_LOCKOUT_HISTORY as u64 + 1, &stakes, 2) .passed()); } @@ -2426,14 +2469,56 @@ pub mod test { let mut tower = Tower::new_for_tests(1, 0.67); let stakes = vec![(0, 1)].into_iter().collect(); tower.record_vote(0, Hash::default()); - assert!(!tower.check_vote_stake_threshold(1, &stakes, 2).passed()); + assert!(!tower.check_vote_stake_thresholds(1, &stakes, 2).passed()); } #[test] fn test_check_vote_threshold_above_threshold() { let mut tower = Tower::new_for_tests(1, 0.67); let stakes = vec![(0, 2)].into_iter().collect(); tower.record_vote(0, Hash::default()); - assert!(tower.check_vote_stake_threshold(1, &stakes, 2).passed()); + assert!(tower.check_vote_stake_thresholds(1, &stakes, 2).passed()); + } + + #[test] + fn test_check_vote_thresholds_above_thresholds() { + let mut tower = Tower::new_for_tests(VOTE_THRESHOLD_DEPTH, 0.67); + let stakes = vec![(0, 3), (VOTE_THRESHOLD_DEPTH_SHALLOW as u64, 2)] + .into_iter() + .collect(); + for slot in 0..VOTE_THRESHOLD_DEPTH { + tower.record_vote(slot as Slot, Hash::default()); + } + assert!(tower + .check_vote_stake_thresholds(VOTE_THRESHOLD_DEPTH.try_into().unwrap(), &stakes, 4) + .passed()); + } + + #[test] + fn test_check_vote_threshold_deep_below_threshold() { + let mut tower = Tower::new_for_tests(VOTE_THRESHOLD_DEPTH, 0.67); + let stakes = vec![(0, 6), (VOTE_THRESHOLD_DEPTH_SHALLOW as u64, 4)] + .into_iter() + .collect(); + for slot in 0..VOTE_THRESHOLD_DEPTH { + tower.record_vote(slot as Slot, Hash::default()); + } + assert!(!tower + .check_vote_stake_thresholds(VOTE_THRESHOLD_DEPTH.try_into().unwrap(), &stakes, 10) + .passed()); + } + + #[test] + fn test_check_vote_threshold_shallow_below_threshold() { + let mut tower = Tower::new_for_tests(VOTE_THRESHOLD_DEPTH, 0.67); + let stakes = vec![(0, 7), (VOTE_THRESHOLD_DEPTH_SHALLOW as u64, 1)] + .into_iter() + .collect(); + for slot in 0..VOTE_THRESHOLD_DEPTH { + tower.record_vote(slot as Slot, Hash::default()); + } + assert!(!tower + .check_vote_stake_thresholds(VOTE_THRESHOLD_DEPTH.try_into().unwrap(), &stakes, 10) + .passed()); } #[test] @@ -2443,7 +2528,7 @@ pub mod test { tower.record_vote(0, Hash::default()); tower.record_vote(1, Hash::default()); tower.record_vote(2, Hash::default()); - assert!(tower.check_vote_stake_threshold(6, &stakes, 2).passed()); + assert!(tower.check_vote_stake_thresholds(6, &stakes, 2).passed()); } #[test] @@ -2451,7 +2536,7 @@ pub mod test { let mut tower = Tower::new_for_tests(1, 0.67); let stakes = HashMap::new(); tower.record_vote(0, Hash::default()); - assert!(!tower.check_vote_stake_threshold(1, &stakes, 2).passed()); + assert!(!tower.check_vote_stake_thresholds(1, &stakes, 2).passed()); } #[test] @@ -2462,7 +2547,7 @@ pub mod test { tower.record_vote(0, Hash::default()); tower.record_vote(1, Hash::default()); tower.record_vote(2, Hash::default()); - assert!(tower.check_vote_stake_threshold(6, &stakes, 2,).passed()); + assert!(tower.check_vote_stake_thresholds(6, &stakes, 2).passed()); } #[test] @@ -2526,7 +2611,7 @@ pub mod test { &mut LatestValidatorVotesForFrozenBanks::default(), ); assert!(tower - .check_vote_stake_threshold(vote_to_evaluate, &voted_stakes, total_stake,) + .check_vote_stake_thresholds(vote_to_evaluate, &voted_stakes, total_stake) .passed()); // CASE 2: Now we want to evaluate a vote for slot VOTE_THRESHOLD_DEPTH + 1. This slot @@ -2546,7 +2631,7 @@ pub mod test { &mut LatestValidatorVotesForFrozenBanks::default(), ); assert!(!tower - .check_vote_stake_threshold(vote_to_evaluate, &voted_stakes, total_stake,) + .check_vote_stake_thresholds(vote_to_evaluate, &voted_stakes, total_stake) .passed()); } diff --git a/core/src/consensus/heaviest_subtree_fork_choice.rs b/core/src/consensus/heaviest_subtree_fork_choice.rs index 4b58ee78b99da7..9a67069c2dfefd 100644 --- a/core/src/consensus/heaviest_subtree_fork_choice.rs +++ b/core/src/consensus/heaviest_subtree_fork_choice.rs @@ -16,6 +16,7 @@ use { }, std::{ borrow::Borrow, + cmp::Ordering, collections::{ btree_set::Iter, hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet, VecDeque, }, @@ -94,10 +95,16 @@ struct ForkInfo { // Amount of stake that has voted for this slot and the subtree // rooted at this slot stake_voted_subtree: ForkWeight, + // Tree height for the subtree rooted at this slot + height: usize, // Best slot in the subtree rooted at this slot, does not // have to be a direct child in `children`. This is the slot whose subtree // is the heaviest. best_slot: SlotHashKey, + // Deepest slot in the subtree rooted at this slot. This is the slot + // with the greatest tree height. This metric does not discriminate invalid + // forks, unlike `best_slot` + deepest_slot: SlotHashKey, parent: Option, children: BTreeSet, // The latest ancestor of this node that has been marked invalid. If the slot @@ -285,16 +292,30 @@ impl HeaviestSubtreeForkChoice { .map(|fork_info| fork_info.best_slot) } + pub fn deepest_slot(&self, key: &SlotHashKey) -> Option { + self.fork_infos + .get(key) + .map(|fork_info| fork_info.deepest_slot) + } + pub fn best_overall_slot(&self) -> SlotHashKey { self.best_slot(&self.tree_root).unwrap() } + pub fn deepest_overall_slot(&self) -> SlotHashKey { + self.deepest_slot(&self.tree_root).unwrap() + } + pub fn stake_voted_subtree(&self, key: &SlotHashKey) -> Option { self.fork_infos .get(key) .map(|fork_info| fork_info.stake_voted_subtree) } + pub fn height(&self, key: &SlotHashKey) -> Option { + self.fork_infos.get(key).map(|fork_info| fork_info.height) + } + pub fn tree_root(&self) -> SlotHashKey { self.tree_root } @@ -404,8 +425,10 @@ impl HeaviestSubtreeForkChoice { let root_parent_info = ForkInfo { stake_voted_at: 0, stake_voted_subtree: root_info.stake_voted_subtree, - // The `best_slot` does not change + height: root_info.height + 1, + // The `best_slot` and `deepest_slot` do not change best_slot: root_info.best_slot, + deepest_slot: root_info.deepest_slot, children: BTreeSet::from([self.tree_root]), parent: None, latest_invalid_ancestor: None, @@ -435,8 +458,10 @@ impl HeaviestSubtreeForkChoice { .or_insert(ForkInfo { stake_voted_at: 0, stake_voted_subtree: 0, - // The `best_slot` of a leaf is itself + height: 1, + // The `best_slot` and `deepest_slot` of a leaf is itself best_slot: slot_hash_key, + deepest_slot: slot_hash_key, children: BTreeSet::new(), parent, latest_invalid_ancestor: parent_latest_invalid_ancestor, @@ -459,8 +484,8 @@ impl HeaviestSubtreeForkChoice { .insert(slot_hash_key); // Propagate leaf up the tree to any ancestors who considered the previous leaf - // the `best_slot` - self.propagate_new_leaf(&slot_hash_key, &parent) + // the `best_slot`, as well as any deepest slot info + self.propagate_new_leaf(&slot_hash_key, &parent); } // Returns true if the given `maybe_best_child` is the heaviest among the children @@ -492,6 +517,41 @@ impl HeaviestSubtreeForkChoice { true } + // Returns true if the given `maybe_deepest_child` is the deepest among the children + // of the parent. Breaks ties by stake, then slot # (lower is heavier). + fn is_deepest_child(&self, maybe_deepest_child: &SlotHashKey) -> bool { + let maybe_deepest_child_weight = self.stake_voted_subtree(maybe_deepest_child).unwrap(); + let maybe_deepest_child_height = self.height(maybe_deepest_child).unwrap(); + let parent = self.parent(maybe_deepest_child); + // If there's no parent, this must be the root + if parent.is_none() { + return true; + } + for child in self.children(&parent.unwrap()).unwrap() { + let child_height = self + .height(child) + .expect("child must exist in `self.fork_infos`"); + let child_weight = self + .stake_voted_subtree(child) + .expect("child must exist in `self.fork_infos`"); + + match ( + child_height.cmp(&maybe_deepest_child_height), + child_weight.cmp(&maybe_deepest_child_weight), + child.cmp(maybe_deepest_child), + ) { + (Ordering::Greater, _, _) => return false, + // Tiebreak by stake + (Ordering::Equal, Ordering::Greater, _) => return false, + // Tiebreak by slot # + (Ordering::Equal, Ordering::Equal, Ordering::Less) => return false, + _ => (), + } + } + + true + } + pub fn all_slots_stake_voted_subtree(&self) -> impl Iterator { self.fork_infos .iter() @@ -509,29 +569,35 @@ impl HeaviestSubtreeForkChoice { /// Returns the subtree originating from `slot_hash_key` pub fn split_off(&mut self, slot_hash_key: &SlotHashKey) -> Self { assert_ne!(self.tree_root, *slot_hash_key); - let mut split_tree_root = { + let (mut split_tree_root, parent) = { let node_to_split_at = self .fork_infos .get_mut(slot_hash_key) .expect("Slot hash key must exist in tree"); - let split_tree_fork_info = node_to_split_at.clone(); - // Remove stake to be aggregated up the tree - node_to_split_at.stake_voted_subtree = 0; - node_to_split_at.stake_voted_at = 0; - // Mark this node as invalid so that it cannot be chosen as best child - node_to_split_at.latest_invalid_ancestor = Some(slot_hash_key.0); - split_tree_fork_info + ( + node_to_split_at.clone(), + node_to_split_at + .parent + .expect("Split node is not tree root"), + ) }; let mut update_operations: UpdateOperations = BTreeMap::new(); - // Aggregate up to the root + // Insert aggregate operations up to the root self.insert_aggregate_operations(&mut update_operations, *slot_hash_key); + // Remove child link so that this slot cannot be chosen as best or deepest + assert!(self + .fork_infos + .get_mut(&parent) + .expect("Parent must exist in fork_infos") + .children + .remove(slot_hash_key)); + // Aggregate self.process_update_operations(update_operations); // Remove node + all children and add to new tree let mut split_tree_fork_infos = HashMap::new(); let mut to_visit = vec![*slot_hash_key]; - while let Some(current_node) = to_visit.pop() { let current_fork_info = self .fork_infos @@ -657,6 +723,10 @@ impl HeaviestSubtreeForkChoice { }) } + /// To be called when `slot_hash_key` has been added to `self.fork_infos`, before any + /// aggregate update operations have taken place. + /// + /// Will propagate update `best_slot` and `deepest_slot` to ancestors. fn propagate_new_leaf( &mut self, slot_hash_key: &SlotHashKey, @@ -665,9 +735,7 @@ impl HeaviestSubtreeForkChoice { let parent_best_slot_hash_key = self .best_slot(parent_slot_hash_key) .expect("parent must exist in self.fork_infos after its child leaf was created"); - - // If this new leaf is the direct parent's best child, then propagate - // it up the tree + // If this new leaf is the direct parent's best child, then propagate it up the tree if self.is_best_child(slot_hash_key) { let mut ancestor = Some(*parent_slot_hash_key); loop { @@ -683,6 +751,24 @@ impl HeaviestSubtreeForkChoice { ancestor = ancestor_fork_info.parent; } } + // Propagate the deepest slot up the tree + let mut ancestor = Some(*parent_slot_hash_key); + let mut current_child = *slot_hash_key; + let mut current_height = 1; + loop { + if ancestor.is_none() { + break; + } + if !self.is_deepest_child(¤t_child) { + break; + } + let ancestor_fork_info = self.fork_infos.get_mut(&ancestor.unwrap()).unwrap(); + ancestor_fork_info.deepest_slot = *slot_hash_key; + ancestor_fork_info.height = current_height + 1; + current_child = ancestor.unwrap(); + current_height = ancestor_fork_info.height; + ancestor = ancestor_fork_info.parent; + } } fn insert_aggregate_operations( @@ -757,18 +843,23 @@ impl HeaviestSubtreeForkChoice { fn aggregate_slot(&mut self, slot_hash_key: SlotHashKey) { let mut stake_voted_subtree; + let mut deepest_child_height = 0; let mut best_slot_hash_key = slot_hash_key; + let mut deepest_slot_hash_key = slot_hash_key; let mut is_duplicate_confirmed = false; if let Some(fork_info) = self.fork_infos.get(&slot_hash_key) { stake_voted_subtree = fork_info.stake_voted_at; let mut best_child_stake_voted_subtree = 0; let mut best_child_slot_key = slot_hash_key; + let mut deepest_child_stake_voted_subtree = 0; + let mut deepest_child_slot_key = slot_hash_key; for child_key in &fork_info.children { let child_fork_info = self .fork_infos .get(child_key) .expect("Child must exist in fork_info map"); let child_stake_voted_subtree = child_fork_info.stake_voted_subtree; + let child_height = child_fork_info.height; is_duplicate_confirmed |= child_fork_info.is_duplicate_confirmed; // Child forks that are not candidates still contribute to the weight @@ -804,6 +895,28 @@ impl HeaviestSubtreeForkChoice { best_child_slot_key = *child_key; best_slot_hash_key = child_fork_info.best_slot; } + + match ( + deepest_child_slot_key == slot_hash_key, + child_height.cmp(&deepest_child_height), + child_stake_voted_subtree.cmp(&deepest_child_stake_voted_subtree), + child_key.cmp(&deepest_child_slot_key) + ) { + // First child + (true, _, _, _) | + // or deeper child + (_, Ordering::Greater, _, _) | + // or tie break by stake weight + (_, Ordering::Equal, Ordering::Greater, _) | + // or tie break by slot # + (_, Ordering::Equal, Ordering::Equal, Ordering::Less) => { + deepest_child_height = child_height; + deepest_child_stake_voted_subtree = child_stake_voted_subtree; + deepest_child_slot_key = *child_key; + deepest_slot_hash_key = child_fork_info.deepest_slot; + }, + _ => () + } } } else { return; @@ -820,7 +933,9 @@ impl HeaviestSubtreeForkChoice { fork_info.set_duplicate_confirmed(); } fork_info.stake_voted_subtree = stake_voted_subtree; + fork_info.height = deepest_child_height + 1; fork_info.best_slot = best_slot_hash_key; + fork_info.deepest_slot = deepest_slot_hash_key; } /// Mark that `valid_slot` on the fork starting at `fork_to_modify` has been marked @@ -1019,7 +1134,49 @@ impl HeaviestSubtreeForkChoice { .and_then(|last_voted_slot_hash| { match self.is_candidate(&last_voted_slot_hash) { Some(true) => self.best_slot(&last_voted_slot_hash), - Some(false) => None, + Some(false) => { + // In this case our last voted fork has been marked invalid because + // it contains a duplicate block. It is critical that we continue to + // build on it as long as there exists at least 1 non duplicate fork. + // This is because there is a chance that this fork is actually duplicate + // confirmed but not observed because there is no block containing the + // required votes. + // + // Scenario 1: + // Slot 0 - Slot 1 (90%) + // | + // - Slot 1' + // | + // - Slot 2 (10%) + // + // Imagine that 90% of validators voted for Slot 1, but because of the existence + // of Slot 1', Slot 1 is marked as invalid in fork choice. It is impossible to reach + // the required switch threshold for these validators to switch off of Slot 1 to Slot 2. + // In this case it is important for someone to build a Slot 3 off of Slot 1 that contains + // the votes for Slot 1. At this point they will see that the fork off of Slot 1 is duplicate + // confirmed, and the rest of the network can repair Slot 1, and mark it is a valid candidate + // allowing fork choice to converge. + // + // This will only occur after Slot 2 has been created, in order to resolve the following + // scenario: + // + // Scenario 2: + // Slot 0 - Slot 1 (30%) + // | + // - Slot 1' (30%) + // + // In this scenario only 60% of the network has voted before the duplicate proof for Slot 1 and 1' + // was viewed. Neither version of the slot will reach the duplicate confirmed threshold, so it is + // critical that a new fork Slot 2 from Slot 0 is created to allow the the validators on Slot 1 and + // Slot 1' to switch. Since the `best_slot` is an ancestor of the last vote (Slot 0 is ancestor of last + // vote Slot 1 or Slot 1'), we will trigger `SwitchForkDecision::FailedSwitchDuplicateRollback`, which + // will create an alternate fork off of Slot 0. Once this alternate fork is created, the `best_slot` + // will be Slot 2, at which point we will be in Scenario 1 and continue building off of Slot 1 or Slot 1'. + // + // For more details see the case for + // `SwitchForkDecision::FailedSwitchDuplicateRollback` in `ReplayStage::select_vote_and_reset_forks`. + self.deepest_slot(&last_voted_slot_hash) + } None => { if !tower.is_stray_last_vote() { // Unless last vote is stray and stale, self.is_candidate(last_voted_slot_hash) must return @@ -1126,9 +1283,39 @@ impl ForkChoice for HeaviestSubtreeForkChoice { .get_with_checked_hash(self.best_overall_slot()) .unwrap(), self.heaviest_slot_on_same_voted_fork(tower) - .map(|slot_hash| { - // BankForks should only contain one valid version of this slot - r_bank_forks.get_with_checked_hash(slot_hash).unwrap() + .and_then(|slot_hash| { + #[allow(clippy::manual_filter)] + if let Some(bank) = r_bank_forks.get(slot_hash.0) { + if bank.hash() != slot_hash.1 { + // It is possible that our last vote was for an invalid fork + // and we have repaired and replayed the correct version of the fork. + // In this case the hash for the heaviest bank on our voted fork + // will no longer be matching what we have replayed. + // + // Because we have dumped and repaired a new version, it is impossible + // for our last voted fork to become duplicate confirmed as the state + // machine will never dump and repair a block that has not been observed + // as duplicate confirmed. Therefore it is safe to never build on this + // invalid fork. + None + } else { + Some(bank) + } + } else { + // It is possible that our last vote was for an invalid fork + // and we are in the middle of dumping and repairing such fork. + // In that case, the `heaviest_slot_on_same_voted_fork` has a chance to + // be for a slot that we currently do not have in our bank forks, so we + // return None. + // + // We are guaranteed that we will eventually repair a duplicate confirmed version + // of this slot because the state machine will never dump a slot unless it has + // observed a duplicate confirmed version of the slot. + // + // Therefore there is no chance that our last voted fork will ever become + // duplicate confirmed, so it is safe to never build on it. + None + } }), ) } @@ -1323,6 +1510,14 @@ mod test { .0, 5 ); + assert_eq!( + heaviest_subtree_fork_choice + .deepest_slot(&(2, Hash::default())) + .unwrap() + .0, + 5 + ); + assert!(heaviest_subtree_fork_choice .parent(&(2, Hash::default())) .is_none()); @@ -1516,7 +1711,7 @@ mod test { // Vote for slot 2 heaviest_subtree_fork_choice.add_votes( - [(vote_pubkeys[0], (1, Hash::default()))].iter(), + [(vote_pubkeys[0], (2, Hash::default()))].iter(), bank.epoch_stakes_map(), bank.epoch_schedule(), ); @@ -1671,6 +1866,7 @@ mod test { mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, duplicate_leaves_descended_from_5, + _, ) = setup_duplicate_forks(); // Add a child to one of the duplicates @@ -1715,7 +1911,7 @@ mod test { fn test_propagate_new_leaf() { let mut heaviest_subtree_fork_choice = setup_forks(); - // Add a leaf 10, it should be the best choice + // Add a leaf 10, it should be the best and deepest choice heaviest_subtree_fork_choice .add_new_leaf_slot((10, Hash::default()), Some((4, Hash::default()))); let ancestors = heaviest_subtree_fork_choice @@ -1723,9 +1919,10 @@ mod test { .chain(std::iter::once((10, Hash::default()))); for a in ancestors { assert_eq!(heaviest_subtree_fork_choice.best_slot(&a).unwrap().0, 10); + assert_eq!(heaviest_subtree_fork_choice.deepest_slot(&a).unwrap().0, 10); } - // Add a smaller leaf 9, it should be the best choice + // Add a smaller leaf 9, it should be the best and deepest choice heaviest_subtree_fork_choice .add_new_leaf_slot((9, Hash::default()), Some((4, Hash::default()))); let ancestors = heaviest_subtree_fork_choice @@ -1733,9 +1930,10 @@ mod test { .chain(std::iter::once((9, Hash::default()))); for a in ancestors { assert_eq!(heaviest_subtree_fork_choice.best_slot(&a).unwrap().0, 9); + assert_eq!(heaviest_subtree_fork_choice.deepest_slot(&a).unwrap().0, 9); } - // Add a higher leaf 11, should not change the best choice + // Add a higher leaf 11, should not change the best or deepest choice heaviest_subtree_fork_choice .add_new_leaf_slot((11, Hash::default()), Some((4, Hash::default()))); let ancestors = heaviest_subtree_fork_choice @@ -1743,6 +1941,7 @@ mod test { .chain(std::iter::once((9, Hash::default()))); for a in ancestors { assert_eq!(heaviest_subtree_fork_choice.best_slot(&a).unwrap().0, 9); + assert_eq!(heaviest_subtree_fork_choice.deepest_slot(&a).unwrap().0, 9); } // Add a vote for the other branch at slot 3. @@ -1760,6 +1959,8 @@ mod test { // Because slot 1 now sees the child branch at slot 3 has non-zero // weight, adding smaller leaf slot 8 in the other child branch at slot 2 // should not propagate past slot 1 + // Similarly, both forks have the same tree height so we should tie break by + // stake weight choosing 6 as the deepest slot when possible. heaviest_subtree_fork_choice .add_new_leaf_slot((8, Hash::default()), Some((4, Hash::default()))); let ancestors = heaviest_subtree_fork_choice @@ -1771,6 +1972,10 @@ mod test { heaviest_subtree_fork_choice.best_slot(&a).unwrap().0, best_slot ); + assert_eq!( + heaviest_subtree_fork_choice.deepest_slot(&a).unwrap().0, + best_slot + ); } // Add vote for slot 8, should now be the best slot (has same weight @@ -1781,9 +1986,12 @@ mod test { bank.epoch_schedule(), ); assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 8); + // Deepest overall is now 8 as well + assert_eq!(heaviest_subtree_fork_choice.deepest_overall_slot().0, 8); // Because slot 4 now sees the child leaf 8 has non-zero // weight, adding smaller leaf slots should not propagate past slot 4 + // Similarly by tiebreak, 8 should be the deepest slot heaviest_subtree_fork_choice .add_new_leaf_slot((7, Hash::default()), Some((4, Hash::default()))); let ancestors = heaviest_subtree_fork_choice @@ -1791,9 +1999,10 @@ mod test { .chain(std::iter::once((8, Hash::default()))); for a in ancestors { assert_eq!(heaviest_subtree_fork_choice.best_slot(&a).unwrap().0, 8); + assert_eq!(heaviest_subtree_fork_choice.deepest_slot(&a).unwrap().0, 8); } - // All the leaves should think they are their own best choice + // All the leaves should think they are their own best and deepest choice for leaf in [8, 9, 10, 11].iter() { assert_eq!( heaviest_subtree_fork_choice @@ -1802,6 +2011,13 @@ mod test { .0, *leaf ); + assert_eq!( + heaviest_subtree_fork_choice + .deepest_slot(&(*leaf, Hash::default())) + .unwrap() + .0, + *leaf + ); } } @@ -1891,6 +2107,28 @@ mod test { .0, 6 ); + // The deepest leaf only tiebreaks by slot # when tree heights are equal + assert_eq!( + heaviest_subtree_fork_choice + .deepest_slot(&(1, Hash::default())) + .unwrap() + .0, + 6 + ); + assert_eq!( + heaviest_subtree_fork_choice + .deepest_slot(&(2, Hash::default())) + .unwrap() + .0, + 4 + ); + assert_eq!( + heaviest_subtree_fork_choice + .deepest_slot(&(3, Hash::default())) + .unwrap() + .0, + 6 + ); // Update the weights that have voted *exactly* at each slot, the // branch containing slots {5, 6} has weight 11, so should be heavier @@ -1917,7 +2155,9 @@ mod test { // The best path is now 0 -> 1 -> 3 -> 5 -> 6, so leaf 6 // should be the best choice + // It is still the deepest choice assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 6); + assert_eq!(heaviest_subtree_fork_choice.deepest_overall_slot().0, 6); // Verify `stake_voted_at` for slot in 0..=6 { @@ -2003,6 +2243,15 @@ mod test { } }; + let expected_deepest_slot = + |slot, _heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice| -> Slot { + if [2, 4].contains(&slot) { + 4 + } else { + 6 + } + }; + check_process_update_correctness( &mut heaviest_subtree_fork_choice, &pubkey_votes, @@ -2010,6 +2259,7 @@ mod test { &bank, stake, expected_best_slot, + expected_deepest_slot, ); // Everyone makes newer votes @@ -2044,6 +2294,7 @@ mod test { &bank, stake, expected_best_slot, + expected_deepest_slot, ); } @@ -2255,8 +2506,12 @@ mod test { #[test] fn test_add_votes_duplicate_tie() { - let (mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, _) = - setup_duplicate_forks(); + let ( + mut heaviest_subtree_fork_choice, + duplicate_leaves_descended_from_4, + _, + duplicate_leaves_descended_from_6, + ) = setup_duplicate_forks(); let stake = 10; let num_validators = 2; let (bank, vote_pubkeys) = @@ -2278,16 +2533,23 @@ mod test { ), expected_best_slot_hash ); - assert_eq!( heaviest_subtree_fork_choice.best_overall_slot(), expected_best_slot_hash ); + + // we tie break the duplicate_leaves_descended_from_6 and pick the smaller one + // for deepest + let expected_deepest_slot_hash = duplicate_leaves_descended_from_6[0]; assert_eq!( heaviest_subtree_fork_choice - .stake_voted_subtree(&duplicate_leaves_descended_from_4[1]) + .deepest_slot(&(3, Hash::default())) .unwrap(), - stake + expected_deepest_slot_hash + ); + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + expected_deepest_slot_hash ); // Adding the same vote again will not do anything @@ -2314,6 +2576,10 @@ mod test { .unwrap(), stake ); + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + expected_deepest_slot_hash + ); // All common ancestors should have subtree voted stake == 2 * stake, but direct // voted stake == 0 @@ -2338,8 +2604,12 @@ mod test { #[test] fn test_add_votes_duplicate_greater_hash_ignored() { - let (mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, _) = - setup_duplicate_forks(); + let ( + mut heaviest_subtree_fork_choice, + duplicate_leaves_descended_from_4, + _, + duplicate_leaves_descended_from_6, + ) = setup_duplicate_forks(); let stake = 10; let num_validators = 2; let (bank, vote_pubkeys) = @@ -2361,6 +2631,13 @@ mod test { ), expected_best_slot_hash ); + // we tie break the duplicate_leaves_descended_from_6 and pick the smaller one + // for deepest + let expected_deepest_slot_hash = duplicate_leaves_descended_from_6[0]; + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + expected_deepest_slot_hash + ); // Adding a duplicate vote for a validator, for another a greater bank hash, // should be ignored as we prioritize the smaller bank hash. Thus nothing // should change. @@ -2374,6 +2651,10 @@ mod test { ), expected_best_slot_hash ); + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + expected_deepest_slot_hash + ); // Still only has one validator voting on it assert_eq!( @@ -2411,8 +2692,12 @@ mod test { #[test] fn test_add_votes_duplicate_smaller_hash_prioritized() { - let (mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, _) = - setup_duplicate_forks(); + let ( + mut heaviest_subtree_fork_choice, + duplicate_leaves_descended_from_4, + _, + duplicate_leaves_descended_from_6, + ) = setup_duplicate_forks(); let stake = 10; let num_validators = 2; let (bank, vote_pubkeys) = @@ -2434,6 +2719,11 @@ mod test { ), expected_best_slot_hash ); + let expected_deepest_slot_hash = duplicate_leaves_descended_from_6[0]; + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + expected_deepest_slot_hash + ); // BEFORE, both validators voting on this leaf assert_eq!( @@ -2477,6 +2767,10 @@ mod test { .unwrap(), stake, ); + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + expected_deepest_slot_hash + ); // The other leaf now has one of the votes assert_eq!( @@ -2515,7 +2809,7 @@ mod test { #[test] fn test_add_votes_duplicate_then_outdated() { - let (mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, _) = + let (mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, _, _) = setup_duplicate_forks(); let stake = 10; let num_validators = 3; @@ -2578,7 +2872,7 @@ mod test { expected_best_slot_hash ); - // All the stake dirctly voting on the duplicates have been outdated + // All the stake directly voting on the duplicates have been outdated for (i, duplicate_leaf) in duplicate_leaves_descended_from_4.iter().enumerate() { assert_eq!( heaviest_subtree_fork_choice @@ -2641,10 +2935,11 @@ mod test { #[test] fn test_add_votes_duplicate_zero_stake() { - let (mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, _): ( + let (mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, _, _): ( HeaviestSubtreeForkChoice, Vec, Vec, + Vec, ) = setup_duplicate_forks(); let stake = 0; @@ -3094,6 +3389,10 @@ mod test { ), (expected_best_slot, Hash::default()), ); + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + (expected_best_slot, Hash::default()), + ); // Simulate a vote on slot 5 let last_voted_slot_hash = (5, Hash::default()); @@ -3124,10 +3423,10 @@ mod test { assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 3); // After marking the last vote in the tower as invalid, `heaviest_slot_on_same_voted_fork()` - // should disregard all descendants of that invalid vote + // should instead use the deepest slot metric, which is still 6 assert_eq!( heaviest_subtree_fork_choice.heaviest_slot_on_same_voted_fork(&tower), - None + Some((6, Hash::default())) ); // Adding another descendant to the invalid candidate won't @@ -3149,10 +3448,14 @@ mod test { (invalid_slot_ancestor, Hash::default()), ); - // This shouldn't update the `heaviest_slot_on_same_voted_fork` either - assert!(heaviest_subtree_fork_choice - .heaviest_slot_on_same_voted_fork(&tower) - .is_none()); + // However this should update the `heaviest_slot_on_same_voted_fork` since we use + // deepest metric for invalid forks + assert_eq!( + heaviest_subtree_fork_choice + .heaviest_slot_on_same_voted_fork(&tower) + .unwrap(), + new_leaf7, + ); // Adding a descendant to the ancestor of the invalid candidate *should* update // the best slot though, since the ancestor is on the heaviest fork @@ -3162,9 +3465,12 @@ mod test { assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), new_leaf8,); // Should not update the `heaviest_slot_on_same_voted_fork` because the new leaf // is not descended from the last vote - assert!(heaviest_subtree_fork_choice - .heaviest_slot_on_same_voted_fork(&tower) - .is_none()); + assert_eq!( + heaviest_subtree_fork_choice + .heaviest_slot_on_same_voted_fork(&tower) + .unwrap(), + new_leaf7 + ); // If we mark slot a descendant of `invalid_candidate` as valid, then that // should also mark `invalid_candidate` as valid, and the best slot should @@ -3198,6 +3504,7 @@ mod test { mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, duplicate_leaves_descended_from_5, + duplicate_leaves_descended_from_6, ) = setup_duplicate_forks(); let stake = 100; let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys_for_tests(3, stake); @@ -3216,6 +3523,11 @@ mod test { ), duplicate_leaves_descended_from_4[0] ); + // Deepest slot should be the smallest leaf descended from 6 + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + duplicate_leaves_descended_from_6[0], + ); // If we mark slot 4 as invalid, the ancestor 2 should be the heaviest, not // the other branch at slot 5 @@ -3225,6 +3537,11 @@ mod test { heaviest_subtree_fork_choice.best_overall_slot(), (2, Hash::default()) ); + // Samallest duplicate from 6 should still be deepest + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + duplicate_leaves_descended_from_6[0], + ); ( heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, @@ -3526,11 +3843,11 @@ mod test { } // Mark the larger duplicate slot as confirmed, all slots should no longer - // have any unconfirmed duplicate ancestors, and should be marked as duplciate confirmed + // have any unconfirmed duplicate ancestors, and should be marked as duplicate confirmed heaviest_subtree_fork_choice.mark_fork_valid_candidate(&larger_duplicate_slot.slot_hash()); for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() { let slot = slot_hash_key.0; - // All slots <= the latest duplciate confirmed slot are ancestors of + // All slots <= the latest duplicate confirmed slot are ancestors of // that slot, so they should all be marked duplicate confirmed assert_eq!( heaviest_subtree_fork_choice @@ -3561,7 +3878,7 @@ mod test { heaviest_subtree_fork_choice.mark_fork_valid_candidate(&smaller_duplicate_slot.slot_hash()); for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() { let slot = slot_hash_key.0; - // All slots <= the latest duplciate confirmed slot are ancestors of + // All slots <= the latest duplicate confirmed slot are ancestors of // that slot, so they should all be marked duplicate confirmed assert_eq!( heaviest_subtree_fork_choice @@ -3716,12 +4033,84 @@ mod test { assert_eq!(4, tree.best_overall_slot().0); } + #[test] + fn test_split_off_on_deepest_path() { + let mut heaviest_subtree_fork_choice = setup_forks(); + + assert_eq!(6, heaviest_subtree_fork_choice.deepest_overall_slot().0); + + let tree = heaviest_subtree_fork_choice.split_off(&(6, Hash::default())); + assert_eq!(4, heaviest_subtree_fork_choice.deepest_overall_slot().0); + assert_eq!(6, tree.deepest_overall_slot().0); + + let tree = heaviest_subtree_fork_choice.split_off(&(3, Hash::default())); + assert_eq!(4, heaviest_subtree_fork_choice.deepest_overall_slot().0); + assert_eq!(5, tree.deepest_overall_slot().0); + + let tree = heaviest_subtree_fork_choice.split_off(&(1, Hash::default())); + assert_eq!(0, heaviest_subtree_fork_choice.deepest_overall_slot().0); + assert_eq!(4, tree.deepest_overall_slot().0); + } + + #[test] + fn test_split_off_on_deepest_path_complicated() { + let mut heaviest_subtree_fork_choice = setup_complicated_forks(); + assert_eq!(23, heaviest_subtree_fork_choice.deepest_overall_slot().0); + assert_eq!( + 9, + heaviest_subtree_fork_choice + .height(&(0, Hash::default())) + .unwrap() + ); + assert_eq!( + 3, + heaviest_subtree_fork_choice + .height(&(9, Hash::default())) + .unwrap() + ); + assert_eq!( + 7, + heaviest_subtree_fork_choice + .height(&(12, Hash::default())) + .unwrap() + ); + + // Take out the 13 branch, 34 should now be deepest + let tree = heaviest_subtree_fork_choice.split_off(&(13, Hash::default())); + assert_eq!(34, heaviest_subtree_fork_choice.deepest_overall_slot().0); + assert_eq!( + 5, + heaviest_subtree_fork_choice + .height(&(0, Hash::default())) + .unwrap() + ); + assert_eq!( + 3, + heaviest_subtree_fork_choice + .height(&(9, Hash::default())) + .unwrap() + ); + assert_eq!( + 1, + heaviest_subtree_fork_choice + .height(&(12, Hash::default())) + .unwrap() + ); + + // New tree should have updated heights but still think 23 is the heaviest + assert_eq!(23, tree.deepest_overall_slot().0); + assert_eq!(6, tree.height(&(13, Hash::default())).unwrap()); + assert_eq!(2, tree.height(&(18, Hash::default())).unwrap()); + assert_eq!(1, tree.height(&(25, Hash::default())).unwrap()); + } + #[test] fn test_split_off_with_dups() { let ( mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, duplicate_leaves_descended_from_5, + duplicate_leaves_descended_from_6, ) = setup_duplicate_forks(); let stake = 10; @@ -3751,13 +4140,23 @@ mod test { heaviest_subtree_fork_choice.best_overall_slot(), expected_best_slot_hash ); + let expected_deepest_slot_hash = duplicate_leaves_descended_from_6[0]; + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + expected_deepest_slot_hash + ); let tree = heaviest_subtree_fork_choice.split_off(&expected_best_slot_hash); assert_eq!( heaviest_subtree_fork_choice.best_overall_slot(), duplicate_leaves_descended_from_4[1] ); + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + expected_deepest_slot_hash + ); assert_eq!(tree.best_overall_slot(), expected_best_slot_hash); + assert_eq!(tree.deepest_overall_slot(), expected_best_slot_hash); } #[test] @@ -3766,6 +4165,7 @@ mod test { mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, duplicate_leaves_descended_from_5, + duplicate_leaves_descended_from_6, ) = setup_duplicate_forks(); let stake = 10; @@ -3795,13 +4195,25 @@ mod test { heaviest_subtree_fork_choice.best_overall_slot(), expected_best_slot_hash ); + let expected_deepest_slot_hash = duplicate_leaves_descended_from_6[0]; + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + expected_deepest_slot_hash + ); + let tree = heaviest_subtree_fork_choice.split_off(&(2, Hash::default())); assert_eq!( heaviest_subtree_fork_choice.best_overall_slot(), duplicate_leaves_descended_from_5[0] ); + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + expected_deepest_slot_hash + ); + assert_eq!(tree.best_overall_slot(), expected_best_slot_hash); + assert_eq!(tree.deepest_overall_slot(), expected_best_slot_hash,); } #[test] @@ -4031,6 +4443,7 @@ mod test { HeaviestSubtreeForkChoice, Vec, Vec, + Vec, ) { /* Build fork structure: @@ -4044,6 +4457,8 @@ mod test { / \ slot 5 slot 10 slot 10 / | \ slot 6 slot 10 slot 10 + / \ + slot 10 slot 10 */ let mut heaviest_subtree_fork_choice = setup_forks(); @@ -4056,8 +4471,13 @@ mod test { std::iter::repeat_with(|| (duplicate_slot, Hash::new_unique())) .take(2) .collect::>(); + let mut duplicate_leaves_descended_from_6 = + std::iter::repeat_with(|| (duplicate_slot, Hash::new_unique())) + .take(2) + .collect::>(); duplicate_leaves_descended_from_4.sort(); duplicate_leaves_descended_from_5.sort(); + duplicate_leaves_descended_from_6.sort(); // Add versions of leaf 10, some with different ancestors, some with the same // ancestors @@ -4069,6 +4489,10 @@ mod test { heaviest_subtree_fork_choice .add_new_leaf_slot(*duplicate_leaf, Some((5, Hash::default()))); } + for duplicate_leaf in &duplicate_leaves_descended_from_6 { + heaviest_subtree_fork_choice + .add_new_leaf_slot(*duplicate_leaf, Some((6, Hash::default()))); + } let mut dup_children = (&heaviest_subtree_fork_choice) .children(&(4, Hash::default())) @@ -4085,23 +4509,34 @@ mod test { .collect(); dup_children.sort(); assert_eq!(dup_children, duplicate_leaves_descended_from_5); + let mut dup_children: Vec<_> = (&heaviest_subtree_fork_choice) + .children(&(6, Hash::default())) + .unwrap() + .copied() + .filter(|(slot, _)| *slot == duplicate_slot) + .collect(); + dup_children.sort(); + assert_eq!(dup_children, duplicate_leaves_descended_from_6); ( heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, duplicate_leaves_descended_from_5, + duplicate_leaves_descended_from_6, ) } - fn check_process_update_correctness( + fn check_process_update_correctness( heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, pubkey_votes: &[(Pubkey, SlotHashKey)], slots_range: Range, bank: &Bank, stake: u64, mut expected_best_slot: F, + mut expected_deepest_slot: G, ) where F: FnMut(Slot, &HeaviestSubtreeForkChoice) -> Slot, + G: FnMut(Slot, &HeaviestSubtreeForkChoice) -> Slot, { let unique_votes: HashSet = pubkey_votes.iter().map(|(_, (slot, _))| *slot).collect(); let vote_ancestors: HashMap> = unique_votes @@ -4171,6 +4606,13 @@ mod test { .unwrap() .0 ); + assert_eq!( + expected_deepest_slot(slot, heaviest_subtree_fork_choice), + heaviest_subtree_fork_choice + .deepest_slot(&(slot, Hash::default())) + .unwrap() + .0 + ); } } } diff --git a/core/src/consensus/tower_storage.rs b/core/src/consensus/tower_storage.rs index 2e03d2006ef08e..61f3c07245105c 100644 --- a/core/src/consensus/tower_storage.rs +++ b/core/src/consensus/tower_storage.rs @@ -317,7 +317,7 @@ impl TowerStorage for EtcdTowerStorage { for op_response in response.op_responses() { if let etcd_client::TxnOpResponse::Get(get_response) = op_response { - if let Some(kv) = get_response.kvs().get(0) { + if let Some(kv) = get_response.kvs().first() { return bincode::deserialize_from(kv.value()) .map_err(|e| e.into()) .and_then(|t: SavedTowerVersions| t.try_into_tower(node_pubkey)); diff --git a/core/src/repair/ancestor_hashes_service.rs b/core/src/repair/ancestor_hashes_service.rs index 978d0c074c3904..e980ddb46b4745 100644 --- a/core/src/repair/ancestor_hashes_service.rs +++ b/core/src/repair/ancestor_hashes_service.rs @@ -1347,7 +1347,12 @@ mod test { fn new(bank_forks: Arc>) -> Self { let ancestor_hashes_request_statuses = Arc::new(DashMap::new()); let ancestor_hashes_request_socket = Arc::new(UdpSocket::bind("0.0.0.0:0").unwrap()); - let epoch_schedule = *bank_forks.read().unwrap().root_bank().epoch_schedule(); + let epoch_schedule = bank_forks + .read() + .unwrap() + .root_bank() + .epoch_schedule() + .clone(); let keypair = Keypair::new(); let requester_cluster_info = Arc::new(ClusterInfo::new( Node::new_localhost_with_pubkey(&keypair.pubkey()).info, diff --git a/core/src/repair/duplicate_repair_status.rs b/core/src/repair/duplicate_repair_status.rs index 9d58a5c682f27d..53c2bd64761858 100644 --- a/core/src/repair/duplicate_repair_status.rs +++ b/core/src/repair/duplicate_repair_status.rs @@ -1123,7 +1123,7 @@ pub mod tests { let request_slot = 100; let mut test_setup = setup_add_response_test_pruned(request_slot, 10); - // Insert all the correct ancestory + // Insert all the correct ancestry let tree = test_setup .correct_ancestors_response .iter() diff --git a/core/src/repair/quic_endpoint.rs b/core/src/repair/quic_endpoint.rs index c6f2e00df53a26..89f9de78491101 100644 --- a/core/src/repair/quic_endpoint.rs +++ b/core/src/repair/quic_endpoint.rs @@ -408,11 +408,16 @@ async fn handle_connection( )); match futures::future::try_join(send_requests_task, recv_requests_task).await { Err(err) => error!("handle_connection: {remote_pubkey}, {remote_address}, {err:?}"), - Ok(((), Err(err))) => { - debug!("recv_requests_task: {remote_pubkey}, {remote_address}, {err:?}"); - record_error(&err, &stats); + Ok(out) => { + if let (Err(ref err), _) = out { + debug!("send_requests_task: {remote_pubkey}, {remote_address}, {err:?}"); + record_error(err, &stats); + } + if let (_, Err(ref err)) = out { + debug!("recv_requests_task: {remote_pubkey}, {remote_address}, {err:?}"); + record_error(err, &stats); + } } - Ok(((), Ok(()))) => (), } drop_connection(remote_pubkey, &connection, &cache).await; if let Entry::Occupied(entry) = router.write().await.entry(remote_address) { @@ -513,15 +518,27 @@ async fn send_requests_task( connection: Connection, mut receiver: AsyncReceiver, stats: Arc, -) { - while let Some(request) = receiver.recv().await { - tokio::task::spawn(send_request_task( - endpoint.clone(), - remote_address, - connection.clone(), - request, - stats.clone(), - )); +) -> Result<(), Error> { + tokio::pin! { + let connection_closed = connection.closed(); + } + loop { + tokio::select! { + biased; + request = receiver.recv() => { + match request { + None => return Ok(()), + Some(request) => tokio::task::spawn(send_request_task( + endpoint.clone(), + remote_address, + connection.clone(), + request, + stats.clone(), + )), + }; + } + err = &mut connection_closed => return Err(Error::from(err)), + } } } diff --git a/core/src/repair/repair_weight.rs b/core/src/repair/repair_weight.rs index 6838021d7574c7..7e65cfaa232658 100644 --- a/core/src/repair/repair_weight.rs +++ b/core/src/repair/repair_weight.rs @@ -338,7 +338,7 @@ impl RepairWeight { } Some(TreeRoot::PrunedRoot(subtree_root)) => { // Even if these orphaned slots were previously pruned, they should be added back to - // `self.trees` as we are no longer sure of their ancestory. + // `self.trees` as we are no longer sure of their ancestry. // After they are repaired there is a chance that they are now part of the rooted path. // This is possible for a duplicate slot with multiple ancestors, if the // version we had pruned before had the wrong ancestor, and the correct version is @@ -892,7 +892,7 @@ impl RepairWeight { ); } - /// Finds any ancestors avaiable from `blockstore` for `slot`. + /// Finds any ancestors available from `blockstore` for `slot`. /// Ancestor search is stopped when finding one that chains to any /// tree in `self.trees` or `self.pruned_trees` or if the ancestor is < self.root. /// @@ -2201,21 +2201,21 @@ mod test { let (blockstore, _, mut repair_weight) = setup_orphan_repair_weight(); // Ancestor of slot 4 is slot 2, with an existing subtree rooted at 0 - // because there wass a vote for a descendant + // because there was a vote for a descendant assert_eq!( repair_weight.find_ancestor_subtree_of_slot(&blockstore, 4), (VecDeque::from([2]), Some(TreeRoot::Root(0))) ); // Ancestors of 5 are [1, 3], with an existing subtree rooted at 0 - // because there wass a vote for a descendant + // because there was a vote for a descendant assert_eq!( repair_weight.find_ancestor_subtree_of_slot(&blockstore, 5), (VecDeque::from([1, 3]), Some(TreeRoot::Root(0))) ); // Ancestors of slot 23 are [20, 22], with an existing subtree of 20 - // because there wass a vote for 20 + // because there was a vote for 20 assert_eq!( repair_weight.find_ancestor_subtree_of_slot(&blockstore, 23), (VecDeque::from([20, 22]), Some(TreeRoot::Root(20))) @@ -2553,7 +2553,7 @@ mod test { let stake = 100; let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys_for_tests(10, stake); let mut epoch_stakes = bank.epoch_stakes_map().clone(); - let mut epoch_schedule = *bank.epoch_schedule(); + let mut epoch_schedule = bank.epoch_schedule().clone(); // Simulate epoch boundary at slot 10, where half of the stake deactivates // Additional epoch boundary at slot 20, where 30% of the stake reactivates diff --git a/core/src/repair/serve_repair.rs b/core/src/repair/serve_repair.rs index 2662d487f13b0e..a12848f2e78df8 100644 --- a/core/src/repair/serve_repair.rs +++ b/core/src/repair/serve_repair.rs @@ -965,7 +965,7 @@ impl ServeRepair { stats.dropped_requests_outbound_bandwidth += 1; continue; } - // Bypass ping/pong check for requests comming from QUIC endpoint. + // Bypass ping/pong check for requests coming from QUIC endpoint. if !matches!(&request, RepairProtocol::Pong(_)) && response_sender.is_none() { let (check, ping_pkt) = Self::check_ping_cache(ping_cache, &request, &from_addr, &identity_keypair); diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 2bfb72da52d4c0..92577a22016d85 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -67,6 +67,7 @@ use { }, solana_sdk::{ clock::{BankId, Slot, MAX_PROCESSING_AGE, NUM_CONSECUTIVE_LEADER_SLOTS}, + feature_set, genesis_config::ClusterType, hash::Hash, pubkey::Pubkey, @@ -114,6 +115,7 @@ pub enum HeaviestForkFailures { LockedOut(u64), FailedThreshold( Slot, + /* vote depth */ u64, /* Observed stake */ u64, /* Total stake */ u64, ), @@ -1227,8 +1229,12 @@ impl ReplayStage { let duplicate_slots = blockstore .duplicate_slots_iterator(bank_forks.root_bank().slot()) .unwrap(); - let duplicate_slot_hashes = duplicate_slots - .filter_map(|slot| bank_forks.bank_hash(slot).map(|hash| (slot, hash))); + let duplicate_slot_hashes = duplicate_slots.filter_map(|slot| { + let bank = bank_forks.get(slot)?; + bank.feature_set + .is_active(&feature_set::consume_blockstore_duplicate_proofs::id()) + .then_some((slot, bank.hash())) + }); ( bank_forks.root_bank(), bank_forks.frozen_banks().values().cloned().collect(), @@ -1349,14 +1355,23 @@ impl ReplayStage { ); } - // Should not dump slots for which we were the leader if Some(*my_pubkey) == leader_schedule_cache.slot_leader_at(*duplicate_slot, None) { - panic!("We are attempting to dump a block that we produced. \ - This indicates that we are producing duplicate blocks, \ - or that there is a bug in our runtime/replay code which \ - causes us to compute different bank hashes than the rest of the cluster. \ - We froze slot {duplicate_slot} with hash {frozen_hash:?} while the cluster hash is {correct_hash}"); + if let Some(bank) = bank_forks.read().unwrap().get(*duplicate_slot) { + bank_hash_details::write_bank_hash_details_file(&bank) + .map_err(|err| { + warn!("Unable to write bank hash details file: {err}"); + }) + .ok(); + } else { + warn!("Unable to get bank for slot {duplicate_slot} from bank forks \ + while attempting to write bank hash details file"); + } + panic!("We are attempting to dump a block that we produced. \ + This indicates that we are producing duplicate blocks, \ + or that there is a bug in our runtime/replay code which \ + causes us to compute different bank hashes than the rest of the cluster. \ + We froze slot {duplicate_slot} with hash {frozen_hash:?} while the cluster hash is {correct_hash}"); } let attempt_no = purge_repair_slot_counter @@ -1507,7 +1522,11 @@ impl ReplayStage { let bank = w_bank_forks .remove(*slot) .expect("BankForks should not have been purged yet"); - let _ = bank_hash_details::write_bank_hash_details_file(&bank); + bank_hash_details::write_bank_hash_details_file(&bank) + .map_err(|err| { + warn!("Unable to write bank hash details file: {err}"); + }) + .ok(); ((*slot, bank.bank_id()), bank) }) .unzip() @@ -2096,7 +2115,11 @@ impl ReplayStage { ); // If we previously marked this slot as duplicate in blockstore, let the state machine know - if !duplicate_slots_tracker.contains(&slot) && blockstore.get_duplicate_slot(slot).is_some() + if bank + .feature_set + .is_active(&feature_set::consume_blockstore_duplicate_proofs::id()) + && !duplicate_slots_tracker.contains(&slot) + && blockstore.get_duplicate_slot(slot).is_some() { let duplicate_state = DuplicateState::new_from_state( slot, @@ -2906,7 +2929,10 @@ impl ReplayStage { SlotStateUpdate::BankFrozen(bank_frozen_state), ); // If we previously marked this slot as duplicate in blockstore, let the state machine know - if !duplicate_slots_tracker.contains(&bank.slot()) + if bank + .feature_set + .is_active(&feature_set::consume_blockstore_duplicate_proofs::id()) + && !duplicate_slots_tracker.contains(&bank.slot()) && blockstore.get_duplicate_slot(bank.slot()).is_some() { let duplicate_state = DuplicateState::new_from_state( @@ -3292,7 +3318,7 @@ impl ReplayStage { .expect("All frozen banks must exist in the Progress map"); stats.vote_threshold = - tower.check_vote_stake_threshold(slot, &stats.voted_stakes, stats.total_stake); + tower.check_vote_stake_thresholds(slot, &stats.voted_stakes, stats.total_stake); stats.is_locked_out = tower.is_locked_out( slot, ancestors @@ -3633,9 +3659,10 @@ impl ReplayStage { if is_locked_out { failure_reasons.push(HeaviestForkFailures::LockedOut(candidate_vote_bank.slot())); } - if let ThresholdDecision::FailedThreshold(fork_stake) = vote_threshold { + if let ThresholdDecision::FailedThreshold(vote_depth, fork_stake) = vote_threshold { failure_reasons.push(HeaviestForkFailures::FailedThreshold( candidate_vote_bank.slot(), + vote_depth, fork_stake, total_threshold_stake, )); @@ -4147,6 +4174,20 @@ pub(crate) mod tests { trees::{tr, Tree}, }; + fn new_bank_from_parent_with_bank_forks( + bank_forks: &RwLock, + parent: Arc, + collector_id: &Pubkey, + slot: Slot, + ) -> Arc { + let bank = Bank::new_from_parent(parent, collector_id, slot); + bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler() + } + #[test] fn test_is_partition_detected() { let (VoteSimulator { bank_forks, .. }, _) = setup_default_forks(1, None::); @@ -4885,8 +4926,7 @@ pub(crate) mod tests { bank0.register_default_tick_for_test(); } bank0.freeze(); - let arc_bank0 = Arc::new(bank0); - let bank_forks = BankForks::new_from_banks(&[arc_bank0], 0); + let bank_forks = BankForks::new_rw_arc(bank0); let exit = Arc::new(AtomicBool::new(false)); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); @@ -4920,7 +4960,12 @@ pub(crate) mod tests { for i in 1..=3 { let prev_bank = bank_forks.read().unwrap().get(i - 1).unwrap(); let slot = prev_bank.slot() + 1; - let bank = Bank::new_from_parent(prev_bank, &Pubkey::default(), slot); + let bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + prev_bank, + &Pubkey::default(), + slot, + ); let _res = bank.transfer( 10, &genesis_config_info.mint_keypair, @@ -4929,7 +4974,7 @@ pub(crate) mod tests { for _ in 0..genesis_config.ticks_per_slot { bank.register_default_tick_for_test(); } - bank_forks.write().unwrap().insert(bank); + let arc_bank = bank_forks.read().unwrap().get(i).unwrap(); leader_vote(i - 1, &arc_bank, &leader_voting_pubkey); ReplayStage::update_commitment_cache( @@ -5006,7 +5051,7 @@ pub(crate) mod tests { let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); bank0 .transfer( bank0.get_minimum_balance_for_rent_exemption(0), @@ -5015,7 +5060,11 @@ pub(crate) mod tests { ) .unwrap(); - let bank1 = Arc::new(Bank::new_from_parent(bank0, &Pubkey::default(), 1)); + let bank1 = bank_forks + .write() + .unwrap() + .insert(Bank::new_from_parent(bank0, &Pubkey::default(), 1)) + .clone_without_scheduler(); let slot = bank1.slot(); let (entries, test_signatures) = create_test_transaction_entries( @@ -5077,10 +5126,6 @@ pub(crate) mod tests { None, ); - let bank1 = Bank::new_from_parent(bank0.clone(), &my_node_pubkey, 1); - bank1.process_transaction(&vote_tx).unwrap(); - bank1.freeze(); - // Test confirmations let ancestors = bank_forks.read().unwrap().ancestors(); let mut frozen_banks: Vec<_> = bank_forks @@ -5121,8 +5166,16 @@ pub(crate) mod tests { assert!(confirmed_forks.is_empty()); } + let bank1 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank0.clone(), + &my_node_pubkey, + 1, + ); + bank1.process_transaction(&vote_tx).unwrap(); + bank1.freeze(); + // Insert the bank that contains a vote for slot 0, which confirms slot 0 - bank_forks.write().unwrap().insert(bank1); progress.insert( 1, ForkProgress::new(bank0.last_blockhash(), None, None, 0, 0), @@ -6512,8 +6565,9 @@ pub(crate) mod tests { ); // 4 should be the heaviest slot, but should not be votable - // because of lockout. 5 is no longer valid due to it being a duplicate. - let (vote_fork, reset_fork, _) = run_compute_and_select_forks( + // because of lockout. 5 is no longer valid due to it being a duplicate, however we still + // reset onto 5. + let (vote_fork, reset_fork, heaviest_fork_failures) = run_compute_and_select_forks( &bank_forks, &mut progress, &mut tower, @@ -6522,7 +6576,41 @@ pub(crate) mod tests { None, ); assert!(vote_fork.is_none()); - assert!(reset_fork.is_none()); + assert_eq!(reset_fork, Some(5)); + assert_eq!( + heaviest_fork_failures, + vec![ + HeaviestForkFailures::FailedSwitchThreshold(4, 0, 10000), + HeaviestForkFailures::LockedOut(4) + ] + ); + + // Continue building on 5 + let forks = tr(5) / (tr(6) / (tr(7) / (tr(8) / (tr(9)))) / tr(10)); + vote_simulator.bank_forks = bank_forks; + vote_simulator.progress = progress; + vote_simulator.fill_bank_forks(forks, &HashMap::>::new(), true); + let (bank_forks, mut progress) = (vote_simulator.bank_forks, vote_simulator.progress); + // 4 is still the heaviest slot, but not votable because of lockout. + // 9 is the deepest slot from our last voted fork (5), so it is what we should + // reset to. + let (vote_fork, reset_fork, heaviest_fork_failures) = run_compute_and_select_forks( + &bank_forks, + &mut progress, + &mut tower, + &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.latest_validator_votes_for_frozen_banks, + None, + ); + assert!(vote_fork.is_none()); + assert_eq!(reset_fork, Some(9)); + assert_eq!( + heaviest_fork_failures, + vec![ + HeaviestForkFailures::FailedSwitchThreshold(4, 0, 10000), + HeaviestForkFailures::LockedOut(4) + ] + ); // If slot 5 is marked as confirmed, it becomes the heaviest bank on same slot again let mut duplicate_slots_to_repair = DuplicateSlotsToRepair::default(); @@ -6544,12 +6632,16 @@ pub(crate) mod tests { &mut purge_repair_slot_counter, SlotStateUpdate::DuplicateConfirmed(duplicate_confirmed_state), ); + // The confirmed hash is detected in `progress`, which means // it's confirmation on the replayed block. This means we have // the right version of the block, so `duplicate_slots_to_repair` // should be empty assert!(duplicate_slots_to_repair.is_empty()); - let (vote_fork, reset_fork, _) = run_compute_and_select_forks( + + // We should still reset to slot 9 as it's the heaviest on the now valid + // fork. + let (vote_fork, reset_fork, heaviest_fork_failures) = run_compute_and_select_forks( &bank_forks, &mut progress, &mut tower, @@ -6557,9 +6649,40 @@ pub(crate) mod tests { &mut vote_simulator.latest_validator_votes_for_frozen_banks, None, ); + assert!(vote_fork.is_none()); + assert_eq!(reset_fork.unwrap(), 9); + assert_eq!( + heaviest_fork_failures, + vec![ + HeaviestForkFailures::FailedSwitchThreshold(4, 0, 10000), + HeaviestForkFailures::LockedOut(4) + ] + ); + + // Resetting our forks back to how it was should allow us to reset to our + // last vote which was previously marked as invalid and now duplicate confirmed + let bank6_hash = bank_forks.read().unwrap().bank_hash(6).unwrap(); + let _ = vote_simulator + .heaviest_subtree_fork_choice + .split_off(&(6, bank6_hash)); // Should now pick 5 as the heaviest fork from last vote again. + let (vote_fork, reset_fork, heaviest_fork_failures) = run_compute_and_select_forks( + &bank_forks, + &mut progress, + &mut tower, + &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.latest_validator_votes_for_frozen_banks, + None, + ); assert!(vote_fork.is_none()); assert_eq!(reset_fork.unwrap(), 5); + assert_eq!( + heaviest_fork_failures, + vec![ + HeaviestForkFailures::FailedSwitchThreshold(4, 0, 10000), + HeaviestForkFailures::LockedOut(4) + ] + ); } #[test] @@ -6860,7 +6983,7 @@ pub(crate) mod tests { let mut epoch_slots_frozen_slots = EpochSlotsFrozenSlots::default(); // Mark fork choice branch as invalid so select forks below doesn't panic - // on a nonexistent `heaviest_bank_on_same_fork` after we dump the duplciate fork. + // on a nonexistent `heaviest_bank_on_same_fork` after we dump the duplicate fork. let duplicate_confirmed_state = DuplicateConfirmedState::new_from_state( duplicate_confirmed_bank2_hash, || progress.is_dead(2).unwrap_or(false), @@ -7238,7 +7361,12 @@ pub(crate) mod tests { let (voting_sender, voting_receiver) = unbounded(); // Simulate landing a vote for slot 0 landing in slot 1 - let bank1 = Arc::new(Bank::new_from_parent(bank0.clone(), &Pubkey::default(), 1)); + let bank1 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank0.clone(), + &Pubkey::default(), + 1, + ); bank1.fill_bank_with_ticks_for_tests(); tower.record_bank_vote(&bank0); ReplayStage::push_vote( @@ -7279,7 +7407,12 @@ pub(crate) mod tests { // Trying to refresh the vote for bank 0 in bank 1 or bank 2 won't succeed because // the last vote has landed already - let bank2 = Arc::new(Bank::new_from_parent(bank1.clone(), &Pubkey::default(), 2)); + let bank2 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank1.clone(), + &Pubkey::default(), + 2, + ); bank2.fill_bank_with_ticks_for_tests(); bank2.freeze(); for refresh_bank in &[&bank1, &bank2] { @@ -7373,8 +7506,12 @@ pub(crate) mod tests { let mut parent_bank = bank2.clone(); for _ in 0..MAX_PROCESSING_AGE { let slot = parent_bank.slot() + 1; - parent_bank = - Arc::new(Bank::new_from_parent(parent_bank, &Pubkey::default(), slot)); + parent_bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + parent_bank, + &Pubkey::default(), + slot, + ); parent_bank.fill_bank_with_ticks_for_tests(); parent_bank.freeze(); } @@ -7429,11 +7566,12 @@ pub(crate) mod tests { // Processing the vote transaction should be valid let expired_bank_child_slot = expired_bank.slot() + 1; - let expired_bank_child = Arc::new(Bank::new_from_parent( + let expired_bank_child = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), expired_bank.clone(), &Pubkey::default(), expired_bank_child_slot, - )); + ); expired_bank_child.process_transaction(vote_tx).unwrap(); let vote_account = expired_bank_child .get_vote_account(&my_vote_pubkey) @@ -7449,11 +7587,12 @@ pub(crate) mod tests { // 1) The vote for slot 1 hasn't landed // 2) The latest refresh vote transaction's recent blockhash (the sibling's hash) doesn't exist // This will still not refresh because `MAX_VOTE_REFRESH_INTERVAL_MILLIS` has not expired yet - let expired_bank_sibling = Arc::new(Bank::new_from_parent( + let expired_bank_sibling = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), bank2, &Pubkey::default(), expired_bank_child.slot() + 1, - )); + ); expired_bank_sibling.fill_bank_with_ticks_for_tests(); expired_bank_sibling.freeze(); // Set the last refresh to now, shouldn't refresh because the last refresh just happened. @@ -7541,7 +7680,12 @@ pub(crate) mod tests { parent_bank.last_blockhash() ); assert_eq!(tower.last_voted_slot().unwrap(), parent_bank.slot()); - let bank = Bank::new_from_parent(parent_bank, &Pubkey::default(), my_slot); + let bank = new_bank_from_parent_with_bank_forks( + bank_forks, + parent_bank, + &Pubkey::default(), + my_slot, + ); bank.fill_bank_with_ticks_for_tests(); if make_it_landing { bank.process_transaction(vote_tx).unwrap(); @@ -7557,7 +7701,6 @@ pub(crate) mod tests { 0, ) }); - bank_forks.write().unwrap().insert(bank); bank_forks.read().unwrap().get(my_slot).unwrap() } @@ -7598,8 +7741,12 @@ pub(crate) mod tests { // Add a new fork starting from 0 with bigger slot number, we assume it has a bigger // weight, but we cannot switch because of lockout. let other_fork_slot = 1; - let other_fork_bank = - Bank::new_from_parent(bank0.clone(), &Pubkey::default(), other_fork_slot); + let other_fork_bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank0.clone(), + &Pubkey::default(), + other_fork_slot, + ); other_fork_bank.fill_bank_with_ticks_for_tests(); other_fork_bank.freeze(); progress.entry(other_fork_slot).or_insert_with(|| { @@ -7612,7 +7759,6 @@ pub(crate) mod tests { 0, ) }); - bank_forks.write().unwrap().insert(other_fork_bank); let (voting_sender, voting_receiver) = unbounded(); let mut cursor = Cursor::default(); @@ -7658,7 +7804,12 @@ pub(crate) mod tests { let last_voted_slot = tower.last_voted_slot().unwrap(); while new_bank.is_in_slot_hashes_history(&last_voted_slot) { let new_slot = new_bank.slot() + 1; - let bank = Bank::new_from_parent(new_bank, &Pubkey::default(), new_slot); + let bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + new_bank, + &Pubkey::default(), + new_slot, + ); bank.fill_bank_with_ticks_for_tests(); bank.freeze(); progress.entry(new_slot).or_insert_with(|| { @@ -7671,7 +7822,6 @@ pub(crate) mod tests { 0, ) }); - bank_forks.write().unwrap().insert(bank); new_bank = bank_forks.read().unwrap().get(new_slot).unwrap(); } let tip_of_voted_fork = new_bank.slot(); diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index 62733953cc724f..fd72b8b8eebb3b 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -10,7 +10,9 @@ use { solana_perf::packet::{PacketBatch, PacketBatchRecycler, PacketFlags, PACKETS_PER_BATCH}, solana_runtime::bank_forks::BankForks, solana_sdk::{ - clock::DEFAULT_MS_PER_SLOT, + clock::{Slot, DEFAULT_MS_PER_SLOT}, + epoch_schedule::EpochSchedule, + feature_set::{self, FeatureSet}, packet::{Meta, PACKET_DATA_SIZE}, pubkey::Pubkey, }, @@ -50,12 +52,20 @@ impl ShredFetchStage { .as_ref() .map(|(_, cluster_info)| cluster_info.keypair().clone()); - let (mut last_root, mut last_slot, mut slots_per_epoch) = { + let ( + mut last_root, + mut slots_per_epoch, + mut feature_set, + mut epoch_schedule, + mut last_slot, + ) = { let bank_forks_r = bank_forks.read().unwrap(); let root_bank = bank_forks_r.root_bank(); ( root_bank.slot(), root_bank.get_slots_in_epoch(root_bank.epoch()), + root_bank.feature_set.clone(), + root_bank.epoch_schedule().clone(), bank_forks_r.highest_slot(), ) }; @@ -69,6 +79,8 @@ impl ShredFetchStage { last_slot = bank_forks_r.highest_slot(); bank_forks_r.root_bank() }; + feature_set = root_bank.feature_set.clone(); + epoch_schedule = root_bank.epoch_schedule().clone(); last_root = root_bank.slot(); slots_per_epoch = root_bank.get_slots_in_epoch(root_bank.epoch()); keypair = repair_context @@ -92,10 +104,19 @@ impl ShredFetchStage { // Limit shreds to 2 epochs away. let max_slot = last_slot + 2 * slots_per_epoch; + let should_drop_legacy_shreds = + |shred_slot| should_drop_legacy_shreds(shred_slot, &feature_set, &epoch_schedule); let turbine_disabled = turbine_disabled.load(Ordering::Relaxed); for packet in packet_batch.iter_mut().filter(|p| !p.meta().discard()) { if turbine_disabled - || should_discard_shred(packet, last_root, max_slot, shred_version, &mut stats) + || should_discard_shred( + packet, + last_root, + max_slot, + shred_version, + should_drop_legacy_shreds, + &mut stats, + ) { packet.meta_mut().set_discard(true); } else { @@ -373,6 +394,22 @@ pub(crate) fn receive_repair_quic_packets( } } +#[must_use] +fn should_drop_legacy_shreds( + shred_slot: Slot, + feature_set: &FeatureSet, + epoch_schedule: &EpochSchedule, +) -> bool { + match feature_set.activated_slot(&feature_set::drop_legacy_shreds::id()) { + None => false, + Some(feature_slot) => { + let feature_epoch = epoch_schedule.get_epoch(feature_slot); + let shred_epoch = epoch_schedule.get_epoch(shred_slot); + feature_epoch < shred_epoch + } + } +} + #[cfg(test)] mod tests { use { @@ -413,6 +450,7 @@ mod tests { last_root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats, )); let coding = solana_ledger::shred::Shredder::generate_coding_shreds( @@ -426,6 +464,7 @@ mod tests { last_root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats, )); } @@ -447,6 +486,7 @@ mod tests { last_root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats, )); assert_eq!(stats.index_overrun, 1); @@ -468,12 +508,18 @@ mod tests { 3, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats, )); assert_eq!(stats.slot_out_of_range, 1); assert!(should_discard_shred( - &packet, last_root, max_slot, /*shred_version:*/ 345, &mut stats, + &packet, + last_root, + max_slot, + 345, // shred_version + |_| false, // should_drop_legacy_shreds + &mut stats, )); assert_eq!(stats.shred_version_mismatch, 1); @@ -483,6 +529,7 @@ mod tests { last_root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats, )); @@ -504,6 +551,7 @@ mod tests { last_root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats, )); @@ -515,6 +563,7 @@ mod tests { last_root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats, )); } diff --git a/core/src/system_monitor_service.rs b/core/src/system_monitor_service.rs index 3749fbf9ae3697..bec85780fc3ede 100644 --- a/core/src/system_monitor_service.rs +++ b/core/src/system_monitor_service.rs @@ -393,6 +393,29 @@ pub struct SystemMonitorStatsReportConfig { pub report_os_disk_stats: bool, } +#[cfg_attr(not(target_os = "linux"), allow(dead_code))] +enum InterestingLimit { + Recommend(i64), + QueryOnly, +} + +#[cfg(target_os = "linux")] +const INTERESTING_LIMITS: &[(&str, InterestingLimit)] = &[ + ("net.core.rmem_max", InterestingLimit::Recommend(134217728)), + ( + "net.core.rmem_default", + InterestingLimit::Recommend(134217728), + ), + ("net.core.wmem_max", InterestingLimit::Recommend(134217728)), + ( + "net.core.wmem_default", + InterestingLimit::Recommend(134217728), + ), + ("vm.max_map_count", InterestingLimit::Recommend(1000000)), + ("net.core.optmem_max", InterestingLimit::QueryOnly), + ("net.core.netdev_max_backlog", InterestingLimit::QueryOnly), +]; + impl SystemMonitorService { pub fn new(exit: Arc, config: SystemMonitorStatsReportConfig) -> Self { info!("Starting SystemMonitorService"); @@ -406,27 +429,8 @@ impl SystemMonitorService { Self { thread_hdl } } - #[cfg_attr(not(target_os = "linux"), allow(dead_code))] - fn linux_get_recommended_network_limits() -> HashMap<&'static str, i64> { - // Reference: https://medium.com/@CameronSparr/increase-os-udp-buffers-to-improve-performance-51d167bb1360 - let mut recommended_limits: HashMap<&str, i64> = HashMap::default(); - recommended_limits.insert("net.core.rmem_max", 134217728); - recommended_limits.insert("net.core.rmem_default", 134217728); - recommended_limits.insert("net.core.wmem_max", 134217728); - recommended_limits.insert("net.core.wmem_default", 134217728); - recommended_limits.insert("vm.max_map_count", 1000000); - - // Additionally collect the following limits - recommended_limits.insert("net.core.optmem_max", 0); - recommended_limits.insert("net.core.netdev_max_backlog", 0); - - recommended_limits - } - #[cfg(target_os = "linux")] - fn linux_get_current_network_limits( - recommended_limits: &HashMap<&'static str, i64>, - ) -> HashMap<&'static str, i64> { + fn linux_get_current_network_limits() -> Vec<(&'static str, &'static InterestingLimit, i64)> { use sysctl::Sysctl; fn sysctl_read(name: &str) -> Result { @@ -435,47 +439,48 @@ impl SystemMonitorService { Ok(val) } - let mut current_limits: HashMap<&str, i64> = HashMap::default(); - for (key, _) in recommended_limits.iter() { - let current_val = match sysctl_read(key) { - Ok(val) => val.parse::().unwrap(), - Err(e) => { - error!("Failed to query value for {}: {}", key, e); - -1 - } - }; - current_limits.insert(key, current_val); + fn normalize_err(key: &str, error: E) -> String { + format!("Failed to query value for {}: {}", key, error) } - current_limits + INTERESTING_LIMITS + .iter() + .map(|(key, interesting_limit)| { + let current_value = sysctl_read(key) + .map_err(|e| normalize_err(key, e)) + .and_then(|val| val.parse::().map_err(|e| normalize_err(key, e))) + .unwrap_or_else(|e| { + error!("{}", e); + -1 + }); + (*key, interesting_limit, current_value) + }) + .collect::>() } #[cfg_attr(not(target_os = "linux"), allow(dead_code))] fn linux_report_network_limits( - current_limits: &HashMap<&str, i64>, - recommended_limits: &HashMap<&'static str, i64>, + current_limits: &[(&'static str, &'static InterestingLimit, i64)], ) -> bool { - let mut check_failed = false; - for (key, recommended_val) in recommended_limits.iter() { - let current_val = *current_limits.get(key).unwrap_or(&-1); - if current_val < *recommended_val { - datapoint_warn!("os-config", (key, current_val, i64)); - warn!( - " {}: recommended={} current={}, too small", - key, recommended_val, current_val - ); - check_failed = true; - } else { - datapoint_info!("os-config", (key, current_val, i64)); - info!( - " {}: recommended={} current={}", - key, recommended_val, current_val - ); - } - } - if check_failed { - datapoint_warn!("os-config", ("network_limit_test_failed", 1, i64)); - } - !check_failed + current_limits + .iter() + .map(|(key, interesting_limit, current_value)| { + datapoint_warn!("os-config", (key, *current_value, i64)); + match interesting_limit { + InterestingLimit::Recommend(recommended_value) if current_value < recommended_value => { + warn!(" {key}: recommended={recommended_value} current={current_value}, too small"); + false + } + InterestingLimit::Recommend(recommended_value) => { + info!(" {key}: recommended={recommended_value} current={current_value}"); + true + } + InterestingLimit::QueryOnly => { + info!(" {key}: report-only -- current={current_value}"); + true + } + } + }) + .all(|good| good) } #[cfg(not(target_os = "linux"))] @@ -487,9 +492,8 @@ impl SystemMonitorService { #[cfg(target_os = "linux")] pub fn check_os_network_limits() -> bool { datapoint_info!("os-config", ("platform", platform_id(), String)); - let recommended_limits = Self::linux_get_recommended_network_limits(); - let current_limits = Self::linux_get_current_network_limits(&recommended_limits); - Self::linux_report_network_limits(¤t_limits, &recommended_limits) + let current_limits = Self::linux_get_current_network_limits(); + Self::linux_report_network_limits(¤t_limits) } #[cfg(target_os = "linux")] diff --git a/core/src/tpu.rs b/core/src/tpu.rs index e6db8dc60db9e2..0456a33a8d91f4 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -31,10 +31,10 @@ use { rpc_subscriptions::RpcSubscriptions, }, solana_runtime::{bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache}, - solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Keypair}, + solana_sdk::{clock::Slot, pubkey::Pubkey, quic::NotifyKeyUpdate, signature::Keypair}, solana_streamer::{ nonblocking::quic::DEFAULT_WAIT_FOR_CHUNK_TIMEOUT, - quic::{spawn_server, MAX_STAKED_CONNECTIONS, MAX_UNSTAKED_CONNECTIONS}, + quic::{spawn_server, SpawnServerResult, MAX_STAKED_CONNECTIONS, MAX_UNSTAKED_CONNECTIONS}, streamer::StakedNodes, }, solana_turbine::broadcast_stage::{BroadcastStage, BroadcastStageType}, @@ -111,7 +111,7 @@ impl Tpu { prioritization_fee_cache: &Arc, block_production_method: BlockProductionMethod, _generator_config: Option, /* vestigial code for replay invalidator */ - ) -> Self { + ) -> (Self, Vec>) { let TpuSockets { transactions: transactions_sockets, transaction_forwards: tpu_forwards_sockets, @@ -148,7 +148,11 @@ impl Tpu { let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote(); - let (_, tpu_quic_t) = spawn_server( + let SpawnServerResult { + endpoint: _, + thread: tpu_quic_t, + key_updater, + } = spawn_server( "quic_streamer_tpu", transactions_quic_sockets, keypair, @@ -168,7 +172,11 @@ impl Tpu { ) .unwrap(); - let (_, tpu_forwards_quic_t) = spawn_server( + let SpawnServerResult { + endpoint: _, + thread: tpu_forwards_quic_t, + key_updater: forwards_key_updater, + } = spawn_server( "quic_streamer_tpu_forwards", transactions_forwards_quic_sockets, keypair, @@ -259,19 +267,22 @@ impl Tpu { turbine_quic_endpoint_sender, ); - Self { - fetch_stage, - sigverify_stage, - vote_sigverify_stage, - banking_stage, - cluster_info_vote_listener, - broadcast_stage, - tpu_quic_t, - tpu_forwards_quic_t, - tpu_entry_notifier, - staked_nodes_updater_service, - tracer_thread_hdl, - } + ( + Self { + fetch_stage, + sigverify_stage, + vote_sigverify_stage, + banking_stage, + cluster_info_vote_listener, + broadcast_stage, + tpu_quic_t, + tpu_forwards_quic_t, + tpu_entry_notifier, + staked_nodes_updater_service, + tracer_thread_hdl, + }, + vec![key_updater, forwards_key_updater], + ) } pub fn join(self) -> thread::Result<()> { diff --git a/core/src/tpu_entry_notifier.rs b/core/src/tpu_entry_notifier.rs index 730a3b14fa5818..22994455e88814 100644 --- a/core/src/tpu_entry_notifier.rs +++ b/core/src/tpu_entry_notifier.rs @@ -29,6 +29,7 @@ impl TpuEntryNotifier { .spawn(move || { let mut current_slot = 0; let mut current_index = 0; + let mut current_transaction_index = 0; loop { if exit.load(Ordering::Relaxed) { break; @@ -41,6 +42,7 @@ impl TpuEntryNotifier { &broadcast_entry_sender, &mut current_slot, &mut current_index, + &mut current_transaction_index, ) { break; } @@ -57,11 +59,13 @@ impl TpuEntryNotifier { broadcast_entry_sender: &Sender, current_slot: &mut u64, current_index: &mut usize, + current_transaction_index: &mut usize, ) -> Result<(), RecvTimeoutError> { let (bank, (entry, tick_height)) = entry_receiver.recv_timeout(Duration::from_secs(1))?; let slot = bank.slot(); let index = if slot != *current_slot { *current_index = 0; + *current_transaction_index = 0; *current_slot = slot; 0 } else { @@ -78,11 +82,13 @@ impl TpuEntryNotifier { slot, index, entry: entry_summary, + starting_transaction_index: *current_transaction_index, }) { warn!( "Failed to send slot {slot:?} entry {index:?} from Tpu to EntryNotifierService, error {err:?}", ); } + *current_transaction_index += entry.transactions.len(); if let Err(err) = broadcast_entry_sender.send((bank, (entry, tick_height))) { warn!( diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 214fae3dceac0f..2fe7e08dd60f8b 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -196,7 +196,12 @@ impl Tvu { let (dumped_slots_sender, dumped_slots_receiver) = unbounded(); let (popular_pruned_forks_sender, popular_pruned_forks_receiver) = unbounded(); let window_service = { - let epoch_schedule = *bank_forks.read().unwrap().working_bank().epoch_schedule(); + let epoch_schedule = bank_forks + .read() + .unwrap() + .working_bank() + .epoch_schedule() + .clone(); let repair_info = RepairInfo { bank_forks: bank_forks.clone(), epoch_schedule, diff --git a/core/src/validator.rs b/core/src/validator.rs index d73bf58e868697..13c454631625a0 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -108,7 +108,7 @@ use { clock::Slot, epoch_schedule::MAX_LEADER_SCHEDULE_EPOCH_OFFSET, exit::Exit, - genesis_config::GenesisConfig, + genesis_config::{ClusterType, GenesisConfig}, hash::Hash, pubkey::Pubkey, shred_version::compute_shred_version, @@ -118,6 +118,7 @@ use { solana_send_transaction_service::send_transaction_service, solana_streamer::{socket::SocketAddrSpace, streamer::StakedNodes}, solana_turbine::{self, broadcast_stage::BroadcastStageType}, + solana_unified_scheduler_pool::DefaultSchedulerPool, solana_vote_program::vote_state, solana_wen_restart::wen_restart::wait_for_wen_restart, std::{ @@ -144,6 +145,7 @@ const WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT: u64 = 80; pub enum BlockVerificationMethod { #[default] BlockstoreProcessor, + UnifiedScheduler, } impl BlockVerificationMethod { @@ -168,6 +170,7 @@ impl BlockVerificationMethod { pub enum BlockProductionMethod { #[default] ThreadLocalMultiIterator, + CentralScheduler, } impl BlockProductionMethod { @@ -468,12 +471,12 @@ pub struct Validator { blockstore_metric_report_service: BlockstoreMetricReportService, accounts_background_service: AccountsBackgroundService, accounts_hash_verifier: AccountsHashVerifier, - turbine_quic_endpoint: Endpoint, + turbine_quic_endpoint: Option, turbine_quic_endpoint_runtime: Option, - turbine_quic_endpoint_join_handle: solana_turbine::quic_endpoint::AsyncTryJoinHandle, - repair_quic_endpoint: Endpoint, + turbine_quic_endpoint_join_handle: Option, + repair_quic_endpoint: Option, repair_quic_endpoint_runtime: Option, - repair_quic_endpoint_join_handle: repair::quic_endpoint::AsyncTryJoinHandle, + repair_quic_endpoint_join_handle: Option, } impl Validator { @@ -747,13 +750,7 @@ impl Validator { let (snapshot_package_sender, snapshot_packager_service) = if config.snapshot_config.should_generate_snapshots() { - // filler accounts make snapshots invalid for use - // so, do not publish that we have snapshots - let enable_gossip_push = config - .accounts_db_config - .as_ref() - .map(|config| config.filler_accounts_config.count == 0) - .unwrap_or(true); + let enable_gossip_push = true; let (snapshot_package_sender, snapshot_package_receiver) = crossbeam_channel::unbounded(); let snapshot_packager_service = SnapshotPackagerService::new( @@ -818,6 +815,24 @@ impl Validator { // (by both replay stage and banking stage) let prioritization_fee_cache = Arc::new(PrioritizationFeeCache::default()); + match &config.block_verification_method { + BlockVerificationMethod::BlockstoreProcessor => { + info!("no scheduler pool is installed for block verification..."); + } + BlockVerificationMethod::UnifiedScheduler => { + let scheduler_pool = DefaultSchedulerPool::new_dyn( + config.runtime_config.log_messages_bytes_limit, + transaction_status_sender.clone(), + Some(replay_vote_sender.clone()), + prioritization_fee_cache.clone(), + ); + bank_forks + .write() + .unwrap() + .install_scheduler_pool(scheduler_pool); + } + } + let leader_schedule_cache = Arc::new(leader_schedule_cache); let entry_notification_sender = entry_notifier_service .as_ref() @@ -1085,13 +1100,6 @@ impl Validator { exit.clone(), ); - *admin_rpc_service_post_init.write().unwrap() = Some(AdminRpcRequestMetadataPostInit { - bank_forks: bank_forks.clone(), - cluster_info: cluster_info.clone(), - vote_account: *vote_account, - repair_whitelist: config.repair_whitelist.clone(), - }); - let waited_for_supermajority = wait_for_supermajority( config, Some(&mut process_blockstore), @@ -1162,58 +1170,74 @@ impl Validator { // Outside test-validator crate, we always need a tokio runtime (and // the respective handle) to initialize the turbine QUIC endpoint. let current_runtime_handle = tokio::runtime::Handle::try_current(); - let turbine_quic_endpoint_runtime = current_runtime_handle.is_err().then(|| { - tokio::runtime::Builder::new_multi_thread() - .enable_all() - .thread_name("solTurbineQuic") - .build() - .unwrap() - }); + let turbine_quic_endpoint_runtime = (current_runtime_handle.is_err() + && genesis_config.cluster_type != ClusterType::MainnetBeta) + .then(|| { + tokio::runtime::Builder::new_multi_thread() + .enable_all() + .thread_name("solTurbineQuic") + .build() + .unwrap() + }); let (turbine_quic_endpoint_sender, turbine_quic_endpoint_receiver) = unbounded(); let ( turbine_quic_endpoint, turbine_quic_endpoint_sender, turbine_quic_endpoint_join_handle, - ) = solana_turbine::quic_endpoint::new_quic_endpoint( - turbine_quic_endpoint_runtime - .as_ref() - .map(TokioRuntime::handle) - .unwrap_or_else(|| current_runtime_handle.as_ref().unwrap()), - &identity_keypair, - node.sockets.tvu_quic, - node.info - .tvu(Protocol::QUIC) - .map_err(|err| format!("Invalid QUIC TVU address: {err:?}"))? - .ip(), - turbine_quic_endpoint_sender, - bank_forks.clone(), - ) - .unwrap(); - - // Repair quic endpoint. - let repair_quic_endpoint_runtime = current_runtime_handle.is_err().then(|| { - tokio::runtime::Builder::new_multi_thread() - .enable_all() - .thread_name("solRepairQuic") - .build() - .unwrap() - }); - let (repair_quic_endpoint, repair_quic_endpoint_sender, repair_quic_endpoint_join_handle) = - repair::quic_endpoint::new_quic_endpoint( - repair_quic_endpoint_runtime + ) = if genesis_config.cluster_type == ClusterType::MainnetBeta { + let (sender, _receiver) = tokio::sync::mpsc::channel(1); + (None, sender, None) + } else { + solana_turbine::quic_endpoint::new_quic_endpoint( + turbine_quic_endpoint_runtime .as_ref() .map(TokioRuntime::handle) .unwrap_or_else(|| current_runtime_handle.as_ref().unwrap()), &identity_keypair, - node.sockets.serve_repair_quic, + node.sockets.tvu_quic, node.info - .serve_repair(Protocol::QUIC) - .map_err(|err| format!("Invalid QUIC serve-repair address: {err:?}"))? + .tvu(Protocol::QUIC) + .map_err(|err| format!("Invalid QUIC TVU address: {err:?}"))? .ip(), - repair_quic_endpoint_sender, + turbine_quic_endpoint_sender, bank_forks.clone(), ) - .unwrap(); + .map(|(endpoint, sender, join_handle)| (Some(endpoint), sender, Some(join_handle))) + .unwrap() + }; + + // Repair quic endpoint. + let repair_quic_endpoint_runtime = (current_runtime_handle.is_err() + && genesis_config.cluster_type != ClusterType::MainnetBeta) + .then(|| { + tokio::runtime::Builder::new_multi_thread() + .enable_all() + .thread_name("solRepairQuic") + .build() + .unwrap() + }); + let (repair_quic_endpoint, repair_quic_endpoint_sender, repair_quic_endpoint_join_handle) = + if genesis_config.cluster_type == ClusterType::MainnetBeta { + let (sender, _receiver) = tokio::sync::mpsc::channel(1); + (None, sender, None) + } else { + repair::quic_endpoint::new_quic_endpoint( + repair_quic_endpoint_runtime + .as_ref() + .map(TokioRuntime::handle) + .unwrap_or_else(|| current_runtime_handle.as_ref().unwrap()), + &identity_keypair, + node.sockets.serve_repair_quic, + node.info + .serve_repair(Protocol::QUIC) + .map_err(|err| format!("Invalid QUIC serve-repair address: {err:?}"))? + .ip(), + repair_quic_endpoint_sender, + bank_forks.clone(), + ) + .map(|(endpoint, sender, join_handle)| (Some(endpoint), sender, Some(join_handle))) + .unwrap() + }; let in_wen_restart = config.wen_restart_proto_path.is_some() && !waited_for_supermajority; let tower = match process_blockstore.process_to_create_tower() { @@ -1300,7 +1324,7 @@ impl Validator { }; } - let tpu = Tpu::new( + let (tpu, mut key_notifies) = Tpu::new( &cluster_info, &poh_recorder, entry_receiver, @@ -1351,6 +1375,16 @@ impl Validator { ); *start_progress.write().unwrap() = ValidatorStartProgress::Running; + key_notifies.push(connection_cache); + + *admin_rpc_service_post_init.write().unwrap() = Some(AdminRpcRequestMetadataPostInit { + bank_forks: bank_forks.clone(), + cluster_info: cluster_info.clone(), + vote_account: *vote_account, + repair_whitelist: config.repair_whitelist.clone(), + notifies: key_notifies, + }); + Ok(Self { stats_reporter_service, gossip_service, @@ -1496,14 +1530,18 @@ impl Validator { } self.gossip_service.join().expect("gossip_service"); - repair::quic_endpoint::close_quic_endpoint(&self.repair_quic_endpoint); + if let Some(repair_quic_endpoint) = &self.repair_quic_endpoint { + repair::quic_endpoint::close_quic_endpoint(repair_quic_endpoint); + } self.serve_repair_service .join() .expect("serve_repair_service"); - self.repair_quic_endpoint_runtime - .map(|runtime| runtime.block_on(self.repair_quic_endpoint_join_handle)) - .transpose() - .unwrap(); + if let Some(repair_quic_endpoint_join_handle) = self.repair_quic_endpoint_join_handle { + self.repair_quic_endpoint_runtime + .map(|runtime| runtime.block_on(repair_quic_endpoint_join_handle)) + .transpose() + .unwrap(); + }; self.stats_reporter_service .join() .expect("stats_reporter_service"); @@ -1516,13 +1554,17 @@ impl Validator { self.accounts_hash_verifier .join() .expect("accounts_hash_verifier"); - solana_turbine::quic_endpoint::close_quic_endpoint(&self.turbine_quic_endpoint); + if let Some(turbine_quic_endpoint) = &self.turbine_quic_endpoint { + solana_turbine::quic_endpoint::close_quic_endpoint(turbine_quic_endpoint); + } self.tpu.join().expect("tpu"); self.tvu.join().expect("tvu"); - self.turbine_quic_endpoint_runtime - .map(|runtime| runtime.block_on(self.turbine_quic_endpoint_join_handle)) - .transpose() - .unwrap(); + if let Some(turbine_quic_endpoint_join_handle) = self.turbine_quic_endpoint_join_handle { + self.turbine_quic_endpoint_runtime + .map(|runtime| runtime.block_on(turbine_quic_endpoint_join_handle)) + .transpose() + .unwrap(); + } self.completed_data_sets_service .join() .expect("completed_data_sets_service"); @@ -1806,7 +1848,8 @@ fn load_blockstore( .map(|service| service.sender()), accounts_update_notifier, exit, - ); + ) + .map_err(|err| err.to_string())?; // Before replay starts, set the callbacks in each of the banks in BankForks so that // all dropped banks come through the `pruned_banks_receiver` channel. This way all bank diff --git a/core/src/vote_simulator.rs b/core/src/vote_simulator.rs index d8986d90e5db76..58d8a40d2eb4c6 100644 --- a/core/src/vote_simulator.rs +++ b/core/src/vote_simulator.rs @@ -64,6 +64,8 @@ impl VoteSimulator { latest_validator_votes_for_frozen_banks: LatestValidatorVotesForFrozenBanks::default(), } } + + #[cfg(feature = "dev-context-only-utils")] pub fn fill_bank_forks( &mut self, forks: Tree, @@ -84,6 +86,12 @@ impl VoteSimulator { let parent = *walk.get_parent().unwrap().data(); let parent_bank = self.bank_forks.read().unwrap().get(parent).unwrap(); let new_bank = Bank::new_from_parent(parent_bank.clone(), &Pubkey::default(), slot); + let new_bank = self + .bank_forks + .write() + .unwrap() + .insert(new_bank) + .clone_without_scheduler(); self.progress .entry(slot) .or_insert_with(|| ForkProgress::new(Hash::default(), None, None, 0, 0)); @@ -131,7 +139,6 @@ impl VoteSimulator { Some((new_bank.parent_slot(), new_bank.parent_hash())), ); } - self.bank_forks.write().unwrap().insert(new_bank); walk.forward(); } @@ -357,7 +364,7 @@ pub fn initialize_state( ); genesis_config.poh_config.hashes_per_tick = Some(2); - let bank0 = Bank::new_for_tests(&genesis_config); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); for pubkey in validator_keypairs_map.keys() { bank0.transfer(10_000, &mint_keypair, pubkey).unwrap(); @@ -372,7 +379,6 @@ pub fn initialize_state( 0, ForkProgress::new_from_bank(&bank0, bank0.collector_id(), &Pubkey::default(), None, 0, 0), ); - let bank_forks = BankForks::new_rw_arc(bank0); let heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new_from_bank_forks(bank_forks.clone()); (bank_forks, progress, heaviest_subtree_fork_choice) diff --git a/core/src/window_service.rs b/core/src/window_service.rs index a68a20e2078471..49418c82683a5d 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -1,6 +1,7 @@ //! `window_service` handles the data plane incoming shreds, storing them in //! blockstore and retransmitting where required //! + use { crate::{ cluster_info_vote_listener::VerifiedVoteReceiver, @@ -28,7 +29,12 @@ use { solana_metrics::inc_new_counter_error, solana_perf::packet::{Packet, PacketBatch}, solana_rayon_threadlimit::get_thread_count, - solana_sdk::clock::Slot, + solana_runtime::bank_forks::BankForks, + solana_sdk::{ + clock::{Slot, DEFAULT_MS_PER_SLOT}, + feature_set, + }, + solana_turbine::cluster_nodes, std::{ cmp::Reverse, collections::{HashMap, HashSet}, @@ -142,12 +148,54 @@ fn run_check_duplicate( blockstore: &Blockstore, shred_receiver: &Receiver, duplicate_slots_sender: &DuplicateSlotSender, + bank_forks: &RwLock, ) -> Result<()> { + let mut root_bank = bank_forks.read().unwrap().root_bank(); + let mut last_updated = Instant::now(); let check_duplicate = |shred: PossibleDuplicateShred| -> Result<()> { + if last_updated.elapsed().as_millis() as u64 > DEFAULT_MS_PER_SLOT { + // Grabs bank forks lock once a slot + last_updated = Instant::now(); + root_bank = bank_forks.read().unwrap().root_bank(); + } let shred_slot = shred.slot(); + let send_index_and_erasure_conflicts = cluster_nodes::check_feature_activation( + &feature_set::index_erasure_conflict_duplicate_proofs::id(), + shred_slot, + &root_bank, + ); + let merkle_conflict_duplicate_proofs = cluster_nodes::check_feature_activation( + &feature_set::merkle_conflict_duplicate_proofs::id(), + shred_slot, + &root_bank, + ); let (shred1, shred2) = match shred { - PossibleDuplicateShred::LastIndexConflict(shred, conflict) => (shred, conflict), - PossibleDuplicateShred::ErasureConflict(shred, conflict) => (shred, conflict), + PossibleDuplicateShred::LastIndexConflict(shred, conflict) + | PossibleDuplicateShred::ErasureConflict(shred, conflict) => { + if send_index_and_erasure_conflicts { + (shred, conflict) + } else { + return Ok(()); + } + } + PossibleDuplicateShred::MerkleRootConflict(shred, conflict) => { + if merkle_conflict_duplicate_proofs { + // Although this proof can be immediately stored on detection, we wait until + // here in order to check the feature flag, as storage in blockstore can + // preclude the detection of other duplicate proofs in this slot + if blockstore.has_duplicate_shreds_in_slot(shred_slot) { + return Ok(()); + } + blockstore.store_duplicate_slot( + shred_slot, + conflict.clone(), + shred.clone().into_payload(), + )?; + (shred, conflict) + } else { + return Ok(()); + } + } PossibleDuplicateShred::Exists(shred) => { // Unlike the other cases we have to wait until here to decide to handle the duplicate and store // in blockstore. This is because the duplicate could have been part of the same insert batch, @@ -342,6 +390,7 @@ impl WindowService { let outstanding_requests = Arc::>::default(); let cluster_info = repair_info.cluster_info.clone(); + let bank_forks = repair_info.bank_forks.clone(); let repair_service = RepairService::new( blockstore.clone(), @@ -366,6 +415,7 @@ impl WindowService { blockstore.clone(), duplicate_receiver, duplicate_slots_sender, + bank_forks, ); let t_insert = Self::start_window_insert_thread( @@ -392,6 +442,7 @@ impl WindowService { blockstore: Arc, duplicate_receiver: Receiver, duplicate_slots_sender: DuplicateSlotSender, + bank_forks: Arc>, ) -> JoinHandle<()> { let handle_error = || { inc_new_counter_error!("solana-check-duplicate-error", 1, 1); @@ -405,6 +456,7 @@ impl WindowService { &blockstore, &duplicate_receiver, &duplicate_slots_sender, + &bank_forks, ) { if Self::should_exit_on_error(e, &handle_error) { break; @@ -507,9 +559,11 @@ mod test { solana_gossip::contact_info::ContactInfo, solana_ledger::{ blockstore::{make_many_slot_entries, Blockstore}, + genesis_utils::create_genesis_config, get_tmp_ledger_path_auto_delete, shred::{ProcessShredsStats, Shredder}, }, + solana_runtime::bank::Bank, solana_sdk::{ hash::Hash, signature::{Keypair, Signer}, @@ -556,6 +610,8 @@ mod test { #[test] fn test_run_check_duplicate() { let ledger_path = get_tmp_ledger_path_auto_delete!(); + let genesis_config = create_genesis_config(10_000).genesis_config; + let bank_forks = BankForks::new_rw_arc(Bank::new_for_tests(&genesis_config)); let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); let (sender, receiver) = unbounded(); let (duplicate_slot_sender, duplicate_slot_receiver) = unbounded(); @@ -587,6 +643,7 @@ mod test { &blockstore, &receiver, &duplicate_slot_sender, + &bank_forks, ) .unwrap(); @@ -616,6 +673,8 @@ mod test { Arc::new(keypair), SocketAddrSpace::Unspecified, )); + let genesis_config = create_genesis_config(10_000).genesis_config; + let bank_forks = BankForks::new_rw_arc(Bank::new_for_tests(&genesis_config)); // Start duplicate thread receiving and inserting duplicates let t_check_duplicate = WindowService::start_check_duplicate_thread( @@ -624,6 +683,7 @@ mod test { blockstore.clone(), duplicate_shred_receiver, duplicate_slot_sender, + bank_forks, ); let handle_duplicate = |shred| { diff --git a/core/tests/epoch_accounts_hash.rs b/core/tests/epoch_accounts_hash.rs index 2ee63e6ef68708..6a62ccb5a98674 100755 --- a/core/tests/epoch_accounts_hash.rs +++ b/core/tests/epoch_accounts_hash.rs @@ -20,7 +20,7 @@ use { AbsRequestHandlers, AbsRequestSender, AccountsBackgroundService, DroppedSlotsReceiver, PrunedBanksRequestHandler, SnapshotRequestHandler, }, - bank::{epoch_accounts_hash_utils, Bank, BankTestConfig}, + bank::{epoch_accounts_hash_utils, Bank}, bank_forks::BankForks, genesis_utils::{self, GenesisConfigInfo}, runtime_config::RuntimeConfig, @@ -32,7 +32,6 @@ use { solana_sdk::{ clock::Slot, epoch_schedule::EpochSchedule, - feature_set, native_token::LAMPORTS_PER_SOL, pubkey::Pubkey, signature::{Keypair, Signer}, @@ -114,10 +113,8 @@ impl TestEnvironment { ..snapshot_config }; - let bank_forks = BankForks::new_rw_arc(Bank::new_for_tests_with_config( - &genesis_config_info.genesis_config, - BankTestConfig::default(), - )); + let bank_forks = + BankForks::new_rw_arc(Bank::new_for_tests(&genesis_config_info.genesis_config)); bank_forks .write() .unwrap() @@ -145,9 +142,6 @@ impl TestEnvironment { Arc::clone(&bank_forks), ); let bank = bank_forks.read().unwrap().working_bank(); - assert!(bank - .feature_set - .is_active(&feature_set::epoch_accounts_hash::id())); assert!(epoch_accounts_hash_utils::is_enabled_this_epoch(&bank)); bank.set_startup_verification_complete(); diff --git a/core/tests/fork-selection.rs b/core/tests/fork-selection.rs index eead4942bdddeb..5581d853656e37 100644 --- a/core/tests/fork-selection.rs +++ b/core/tests/fork-selection.rs @@ -1,6 +1,6 @@ //! Fork Selection Simulation //! -//! Description of the algorithm can be found in [docs/src/cluster/managing-forks.md](docs/src/cluster/managing-forks.md). +//! Description of the algorithm can be found in [Managing Forks](https://docs.solanalabs.com/consensus/managing-forks). //! //! A test library function exists for configuring networks. //! ``` diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 71e46f2b66b2d2..83af4558dfc19f 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -73,7 +73,7 @@ struct SnapshotTestConfig { full_snapshot_archives_dir: TempDir, bank_snapshots_dir: TempDir, accounts_dir: PathBuf, - // as the underscore prefix indicates, this isn't explictly used; but it's needed to keep + // as the underscore prefix indicates, this isn't explicitly used; but it's needed to keep // TempDir::drop from running to retain that dir for the duration of test _accounts_tmp_dir: TempDir, } @@ -196,7 +196,7 @@ fn run_bank_forks_snapshot_n( f: F, set_root_interval: u64, ) where - F: Fn(&mut Bank, &Keypair), + F: Fn(&Bank, &Keypair), { solana_logger::setup(); // Set up snapshotting config @@ -221,13 +221,13 @@ fn run_bank_forks_snapshot_n( accounts_package_sender, }; for slot in 1..=last_slot { - let mut bank = Bank::new_from_parent( + let bank = Bank::new_from_parent( bank_forks.read().unwrap().get(slot - 1).unwrap().clone(), &Pubkey::default(), slot, ); - f(&mut bank, mint_keypair); let bank = bank_forks.write().unwrap().insert(bank); + f(bank.clone_without_scheduler().as_ref(), mint_keypair); // Set root to make sure we don't end up with too many account storage entries // and to allow snapshotting of bank and the purging logic on status_cache to // kick in @@ -399,6 +399,11 @@ fn test_concurrent_snapshot_packaging( &Pubkey::default(), parent_slot + 1, ); + let bank = bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler(); let slot = bank.slot(); let key1 = Keypair::new().pubkey(); let tx = system_transaction::transfer(mint_keypair, &key1, 1, genesis_config.hash()); @@ -439,7 +444,6 @@ fn test_concurrent_snapshot_packaging( ); accounts_package_sender.send(accounts_package).unwrap(); - bank_forks.write().unwrap().insert(bank); if slot == saved_slot { // Find the relevant snapshot storages let snapshot_storage_files: HashSet<_> = bank_forks @@ -631,7 +635,7 @@ fn test_slots_to_snapshot(snapshot_version: SnapshotVersion, cluster_type: Clust .unwrap() .set_root(current_bank.slot(), &request_sender, None); - // Since the accounts background services are not runnning, EpochAccountsHash + // Since the accounts background services are not running, EpochAccountsHash // calculation requests will not be handled. To prevent banks from hanging during // Bank::freeze() due to waiting for EAH to complete, just set the EAH to Valid. let epoch_accounts_hash_manager = ¤t_bank @@ -758,6 +762,8 @@ fn test_bank_forks_incremental_snapshot( let bank = { let parent = bank_forks.read().unwrap().get(slot - 1).unwrap(); let bank = Bank::new_from_parent(parent, &Pubkey::default(), slot); + let bank_scheduler = bank_forks.write().unwrap().insert(bank); + let bank = bank_scheduler.clone_without_scheduler(); let key = solana_sdk::pubkey::new_rand(); let tx = system_transaction::transfer(mint_keypair, &key, 1, bank.last_blockhash()); @@ -771,7 +777,7 @@ fn test_bank_forks_incremental_snapshot( bank.register_unique_tick(); } - bank_forks.write().unwrap().insert(bank) + bank_scheduler }; // Set root to make sure we don't end up with too many account storage entries @@ -1062,6 +1068,11 @@ fn test_snapshots_with_background_services( &Pubkey::default(), slot, ); + let bank = bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler(); let key = solana_sdk::pubkey::new_rand(); let tx = system_transaction::transfer(mint_keypair, &key, 1, bank.last_blockhash()); @@ -1074,8 +1085,6 @@ fn test_snapshots_with_background_services( while !bank.is_complete() { bank.register_unique_tick(); } - - bank_forks.write().unwrap().insert(bank); } // Call `BankForks::set_root()` to cause snapshots to be taken diff --git a/cost-model/Cargo.toml b/cost-model/Cargo.toml index 6142f9be876215..4a8b159bbf4cb6 100644 --- a/cost-model/Cargo.toml +++ b/cost-model/Cargo.toml @@ -32,6 +32,7 @@ name = "solana_cost_model" [dev-dependencies] solana-logger = { workspace = true } +solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } static_assertions = { workspace = true } test-case = { workspace = true } diff --git a/cost-model/src/cost_model.rs b/cost-model/src/cost_model.rs index bb3e296d6dcbe0..ba01ed9fe993a5 100644 --- a/cost-model/src/cost_model.rs +++ b/cost-model/src/cost_model.rs @@ -11,12 +11,12 @@ use { solana_program_runtime::{ compute_budget::DEFAULT_HEAP_COST, compute_budget_processor::{ - process_compute_budget_instructions, ComputeBudgetLimits, - DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, MAX_COMPUTE_UNIT_LIMIT, + process_compute_budget_instructions, DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + MAX_COMPUTE_UNIT_LIMIT, }, }, solana_sdk::{ - borsh0_10::try_from_slice_unchecked, + borsh1::try_from_slice_unchecked, compute_budget::{self, ComputeBudgetInstruction}, feature_set::{include_loaded_accounts_data_size_in_fee_calculation, FeatureSet}, fee::FeeStructure, @@ -53,24 +53,6 @@ impl CostModel { } } - // Calculate cost of loaded accounts size in the same way heap cost is charged at - // rate of 8cu per 32K. Citing `program_runtime\src\compute_budget.rs`: "(cost of - // heap is about) 0.5us per 32k at 15 units/us rounded up" - // - // Before feature `support_set_loaded_accounts_data_size_limit_ix` is enabled, or - // if user doesn't use compute budget ix `set_loaded_accounts_data_size_limit_ix` - // to set limit, `compute_budget.loaded_accounts_data_size_limit` is set to default - // limit of 64MB; which will convert to (64M/32K)*8CU = 16_000 CUs - // - pub fn calculate_loaded_accounts_data_size_cost( - compute_budget_limits: &ComputeBudgetLimits, - ) -> u64 { - FeeStructure::calculate_memory_usage_cost( - usize::try_from(compute_budget_limits.loaded_accounts_bytes).unwrap(), - DEFAULT_HEAP_COST, - ) - } - fn get_signature_cost(transaction: &SanitizedTransaction) -> u64 { transaction.signatures().len() as u64 * SIGNATURE_COST } @@ -133,10 +115,8 @@ impl CostModel { // if failed to process compute_budget instructions, the transaction will not be executed // by `bank`, therefore it should be considered as no execution cost by cost model. - match process_compute_budget_instructions( - transaction.message().program_instructions_iter(), - feature_set, - ) { + match process_compute_budget_instructions(transaction.message().program_instructions_iter()) + { Ok(compute_budget_limits) => { // if tx contained user-space instructions and a more accurate estimate available correct it, // where "user-space instructions" must be specifically checked by @@ -150,8 +130,10 @@ impl CostModel { if feature_set .is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()) { - loaded_accounts_data_size_cost = - Self::calculate_loaded_accounts_data_size_cost(&compute_budget_limits); + loaded_accounts_data_size_cost = FeeStructure::calculate_memory_usage_cost( + usize::try_from(compute_budget_limits.loaded_accounts_bytes).unwrap(), + DEFAULT_HEAP_COST, + ) } } Err(_) => { @@ -588,7 +570,7 @@ mod tests { } #[test] - fn test_cost_model_calculate_cost_enabled_feature_with_limit() { + fn test_cost_model_calculate_cost_with_limit() { let (mint_keypair, start_hash) = test_setup(); let to_keypair = Keypair::new(); let data_limit = 32 * 1024u32; @@ -626,75 +608,6 @@ mod tests { ); } - #[test] - fn test_cost_model_calculate_cost_disabled_feature_with_limit() { - let (mint_keypair, start_hash) = test_setup(); - let to_keypair = Keypair::new(); - let data_limit = 32 * 1024u32; - let tx = - SanitizedTransaction::from_transaction_for_tests(Transaction::new_signed_with_payer( - &[ - system_instruction::transfer(&mint_keypair.pubkey(), &to_keypair.pubkey(), 2), - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_limit), - ], - Some(&mint_keypair.pubkey()), - &[&mint_keypair], - start_hash, - )); - - let feature_set = FeatureSet::default(); - assert!(!feature_set.is_active(&include_loaded_accounts_data_size_in_fee_calculation::id())); - let expected_account_cost = WRITE_LOCK_UNITS * 2; - // with features all disabled, builtins and loaded account size don't cost CU - let expected_execution_cost = 0; - let expected_loaded_accounts_data_size_cost = 0; - - let tx_cost = CostModel::calculate_cost(&tx, &feature_set); - assert_eq!(expected_account_cost, tx_cost.write_lock_cost()); - assert_eq!(expected_execution_cost, tx_cost.builtins_execution_cost()); - assert_eq!(2, tx_cost.writable_accounts().len()); - assert_eq!( - expected_loaded_accounts_data_size_cost, - tx_cost.loaded_accounts_data_size_cost() - ); - } - - #[allow(clippy::field_reassign_with_default)] - #[test] - fn test_calculate_loaded_accounts_data_size_cost() { - let mut compute_budget_limits = ComputeBudgetLimits::default(); - - // accounts data size are priced in block of 32K, ... - - // ... requesting less than 32K should still be charged as one block - compute_budget_limits.loaded_accounts_bytes = 31 * 1024; - assert_eq!( - DEFAULT_HEAP_COST, - CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget_limits) - ); - - // ... requesting exact 32K should be charged as one block - compute_budget_limits.loaded_accounts_bytes = 32 * 1024; - assert_eq!( - DEFAULT_HEAP_COST, - CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget_limits) - ); - - // ... requesting slightly above 32K should be charged as 2 block - compute_budget_limits.loaded_accounts_bytes = 33 * 1024; - assert_eq!( - DEFAULT_HEAP_COST * 2, - CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget_limits) - ); - - // ... requesting exact 64K should be charged as 2 block - compute_budget_limits.loaded_accounts_bytes = 64 * 1024; - assert_eq!( - DEFAULT_HEAP_COST * 2, - CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget_limits) - ); - } - #[test] fn test_transaction_cost_with_mix_instruction_without_compute_budget() { let (mint_keypair, start_hash) = test_setup(); diff --git a/cost-model/src/cost_tracker.rs b/cost-model/src/cost_tracker.rs index efdd86512d2039..9d322d009c62f2 100644 --- a/cost-model/src/cost_tracker.rs +++ b/cost-model/src/cost_tracker.rs @@ -186,7 +186,9 @@ impl CostTracker { if self.vote_cost.saturating_add(cost) > self.vote_cost_limit { return Err(CostTrackerError::WouldExceedVoteMaxLimit); } - } else if self.block_cost.saturating_add(cost) > self.block_cost_limit { + } + + if self.block_cost.saturating_add(cost) > self.block_cost_limit { // check against the total package cost return Err(CostTrackerError::WouldExceedBlockMaxLimit); } diff --git a/docs/.prettierignore b/docs/.prettierignore index 62cecd8a574b09..12ef0727eb2c62 100644 --- a/docs/.prettierignore +++ b/docs/.prettierignore @@ -2,6 +2,3 @@ build html static - -# prettier interferes with the json response too much -src/developing/clients/jsonrpc-api.md diff --git a/docs/README.md b/docs/README.md index b28d3e44cddf46..ceff97a78db556 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,11 +1,20 @@ -# Solana Docs Readme +# Solana Validator Docs Readme -Solana's Docs are built using [Docusaurus v2](https://v2.docusaurus.io/) with `npm`. +This validator's documentation is built using [Docusaurus v2](https://v2.docusaurus.io/) with `npm`. Static content delivery is handled using `vercel`. +> Note: The documentation within this repo is specifically focused on the +> Solana validator client maintained by Solana Labs. The more "common" +> documentation which is generalize to the Solana protocol as a whole, and apply +> to all Solana validator implementations, are maintained within the +> [`developer-content`](https://github.com/solana-foundation/developer-content/) +> repo. Those "common docs" are manged by the Solana Foundation within their +> GitHub organization and are publicly accessible via +> [solana.com/docs](https://solana.com/docs) + ## Local Development -To set up the Solana Docs site locally: +To set up the Solana Validator Docs site locally: - install dependencies using `npm` - build locally via `./build.sh` @@ -30,7 +39,7 @@ The build script generates static content into the `build` directory and can be ./build.sh ``` -Running this build script requires **Docker**, and will auto fetch the [solanalabs/rust](https://hub.docker.com/r/solanalabs/rust) image from Docker hub to compile the desired version of the [Solana CLI](https://docs.solana.com/cli) from source. +Running this build script requires **Docker**, and will auto fetch the [solanalabs/rust](https://hub.docker.com/r/solanalabs/rust) image from Docker hub to compile the desired version of the [Solana CLI](https://docs.solanalabs.com/cli) from source. This build script will also: @@ -75,9 +84,9 @@ The docs are built and published in Travis CI with the `./build.sh` script. On e In each post-commit build, docs are built and published using `vercel` to their respective domain depending on the build branch. -- Master branch docs are published to `edge.docs.solana.com` -- Beta branch docs are published to `beta.docs.solana.com` -- Latest release tag docs are published to `docs.solana.com` +- Master branch docs are published to `edge.docs.solanalabs.com` +- Beta branch docs are published to `beta.docs.solanalabs.com` +- Latest release tag docs are published to `docs.solanalabs.com` ## Common Issues diff --git a/docs/build-cli-usage.sh b/docs/build-cli-usage.sh index ce5f582fa36d26..0917cb4737af9f 100755 --- a/docs/build-cli-usage.sh +++ b/docs/build-cli-usage.sh @@ -15,7 +15,7 @@ out=${1:-src/cli/usage.md} # load the usage file's header cat src/cli/.usage.md.header > "$out" -# Skip generating the usage doc for non deployment commits of the docs +# Skip generating the detailed usage doc for non deployment commits of the docs if [[ -n $CI ]]; then if [[ $CI_BRANCH != $EDGE_CHANNEL* ]] && [[ $CI_BRANCH != $BETA_CHANNEL* ]] && [[ $CI_BRANCH != $STABLE_CHANNEL* ]]; then echo "**NOTE:** The usage doc is only auto-generated during full production deployments of the docs" diff --git a/docs/components/CodeDocBlock.jsx b/docs/components/CodeDocBlock.jsx deleted file mode 100644 index bd0099b99e7830..00000000000000 --- a/docs/components/CodeDocBlock.jsx +++ /dev/null @@ -1,161 +0,0 @@ -import React from "react"; -import Link from "@docusaurus/Link"; -// import clsx from "clsx"; -import styles from "../src/pages/CodeDocBlock.module.css"; - -export function DocBlock({ children }) { - return
{children}
; -} - -export function DocSideBySide({ children }) { - return
{children}
; -} - -export function CodeParams({ children }) { - return
{children}
; -} - -export function CodeSnippets({ children }) { - return ( -
- {/*

Code Sample:

*/} - - {children} -
- ); -} - -/* - Display a single Parameter -*/ -export function Parameter(props) { - const { - name = null, - type = null, - required = null, - optional = null, - children, - } = computeHeader(props); - - return ( -
-

- {name && name} {type && type} {required && required}{" "} - {optional && optional} -

- - {children} -
- ); -} - -/* - Display a single Parameter's field data -*/ -export function Field(props) { - const { - name = null, - type = null, - values = null, - required = null, - defaultValue = null, - optional = null, - children, - } = computeHeader(props); - - return ( -
-

- {name && name} {type && type} {required && required}{" "} - {optional && optional} - {defaultValue && defaultValue} -

- -
- {values && values} - - {children} -
-
- ); -} - -/* - Parse an array of string values to display -*/ -export function Values({ values = null }) { - // format the Parameter's values - if (values && Array.isArray(values) && values?.length) { - values = values.map((value) => ( - - {value} - - )); - } - - return ( -

- Values: {values} -

- ); -} - -/* - Compute the formatted Parameter and Field component's header meta data -*/ -function computeHeader({ - name = null, - type = null, - href = null, - values = null, - required = null, - defaultValue = null, - optional = null, - children, -}) { - // format the Parameter's name - if (name) { - name = {name}; - - if (href) name = {name}; - } - - // format the Parameter's type - if (type) type = {type}; - - // format the Parameter's values - if (values && Array.isArray(values)) { - values = values.map((value) => ( - {value} - )); - } - - // format the `defaultValue` flag - if (defaultValue) { - defaultValue = ( - - Default: {defaultValue.toString()} - - ); - } - - // format the `required` flag - if (required) { - required = required; - } - // format the `optional` flag - else if (optional) { - optional = optional; - } - - return { - name, - type, - href, - values, - required, - defaultValue, - optional, - children, - }; -} diff --git a/docs/components/HomeCtaLinks.jsx b/docs/components/HomeCtaLinks.jsx index 6a7283425ad6a7..71d20014adf500 100644 --- a/docs/components/HomeCtaLinks.jsx +++ b/docs/components/HomeCtaLinks.jsx @@ -6,7 +6,7 @@ export default function HomeCtaLinks() {
-
- {sidebarItems?.length > 0 && ( - - )} - -
{children}
-
- - ); -} -export default CardLayout; - -/* - Create a simple label based on the string of a doc file path -*/ -const computeLabel = (label) => { - label = label.split("/"); - label = label[label?.length - 1]?.replace("-", " "); - label = label.charAt(0).toUpperCase() + label.slice(1); - return label && label; -}; - -/* - Recursively parse the sidebar -*/ -const parseSidebar = (sidebarItems) => { - Object.keys(sidebarItems).forEach((key) => { - if (sidebarItems[key]?.type?.toLowerCase() === "category") { - sidebarItems[key].items = parseSidebar(sidebarItems[key].items); - } else sidebarItems[key] = formatter(sidebarItems[key]); - }); - return sidebarItems; -}; - -/* - Parser to format a sidebar item to be compatible with the `DocSidebar` component -*/ -const formatter = (item) => { - // handle string only document ids - if (typeof item === "string") { - item = { - type: "link", - href: item, - label: computeLabel(item) || item || "[unknown label]", - }; - } - - // handle object style docs - else if (item?.type?.toLowerCase() === "doc") { - item.type = "link"; - item.href = item.id; - item.label = item.label || computeLabel(item.href) || "[unknown label]"; - delete item.id; - } - - // fix for local routing that does not specify starting at the site root - if ( - !( - item?.href?.startsWith("/") || - item?.href?.startsWith("http:") || - item?.href?.startsWith("https") - ) - ) - item.href = `/${item?.href}`; - - return item; -}; diff --git a/docs/publish-docs.sh b/docs/publish-docs.sh index 0ae2d927162acf..3e678c5b53fef9 100755 --- a/docs/publish-docs.sh +++ b/docs/publish-docs.sh @@ -31,22 +31,126 @@ cat > "$CONFIG_FILE" < - -## getConfirmedBlock - -:::warning DEPRECATED -This method is expected to be removed in solana-core v2.0. -**Please use [getBlock](#getblock) instead** -::: - -Returns identity and transaction information about a confirmed block in the ledger - - - - - -### Parameters: - - - slot number, as u64 integer - - - - -Configuration object containing the following fields: - - - - - level of transaction detail to return, either "full", "signatures", or "none" - - - - whether to populate the `rewards` array. - - - - -Encoding format for Account data - - - -
- -- `jsonParsed` encoding attempts to use program-specific instruction parsers to return - more human-readable and explicit data in the `transaction.message.instructions` list. -- If `jsonParsed` is requested but a parser cannot be found, the instruction - falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` fields). - -
- -
- -
- -### Result: - -The result field will be an object with the following fields: - -- `` - if specified block is not confirmed -- `` - if block is confirmed, an object with the following fields: - - `blockhash: ` - the blockhash of this block, as base-58 encoded string - - `previousBlockhash: ` - the blockhash of this block's parent, as base-58 encoded string; if the parent block is not available due to ledger cleanup, this field will return "11111111111111111111111111111111" - - `parentSlot: ` - the slot index of this block's parent - - `transactions: ` - present if "full" transaction details are requested; an array of JSON objects containing: - - `transaction: ` - [Transaction](#transaction-structure) object, either in JSON format or encoded binary data, depending on encoding parameter - - `meta: ` - transaction status metadata object, containing `null` or: - - `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/c0c60386544ec9a9ec7119229f37386d9f070523/sdk/src/transaction/error.rs#L13) - - `fee: ` - fee this transaction was charged, as u64 integer - - `preBalances: ` - array of u64 account balances from before the transaction was processed - - `postBalances: ` - array of u64 account balances after the transaction was processed - - `innerInstructions: ` - List of [inner instructions](#inner-instructions-structure) or `null` if inner instruction recording was not enabled during this transaction - - `preTokenBalances: ` - List of [token balances](#token-balances-structure) from before the transaction was processed or omitted if token balance recording was not yet enabled during this transaction - - `postTokenBalances: ` - List of [token balances](#token-balances-structure) from after the transaction was processed or omitted if token balance recording was not yet enabled during this transaction - - `logMessages: ` - array of string log messages or `null` if log message recording was not enabled during this transaction - - DEPRECATED: `status: ` - Transaction status - - `"Ok": ` - Transaction was successful - - `"Err": ` - Transaction failed with TransactionError - - `signatures: ` - present if "signatures" are requested for transaction details; an array of signatures strings, corresponding to the transaction order in the block - - `rewards: ` - present if rewards are requested; an array of JSON objects containing: - - `pubkey: ` - The public key, as base-58 encoded string, of the account that received the reward - - `lamports: `- number of reward lamports credited or debited by the account, as a i64 - - `postBalance: ` - account balance in lamports after the reward was applied - - `rewardType: ` - type of reward: "fee", "rent", "voting", "staking" - - `commission: ` - vote account commission when the reward was credited, only present for voting and staking rewards - - `blockTime: ` - estimated production time, as Unix timestamp (seconds since the Unix epoch). null if not available - -#### For more details on returned data: - -- [Transaction Structure](#transaction-structure) -- [Inner Instructions Structure](#inner-instructions-structure) -- [Token Balances Structure](#token-balances-structure) - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", "id": 1, - "method": "getConfirmedBlock", - "params": [430, "base64"] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "blockTime": null, - "blockhash": "3Eq21vXNB5s86c62bVuUfTeaMif1N2kUqRPBmGRJhyTA", - "parentSlot": 429, - "previousBlockhash": "mfcyqEXB3DnHXki6KjjmZck6YjmZLvpAByy2fj4nh6B", - "rewards": [], - "transactions": [ - { - "meta": { - "err": null, - "fee": 5000, - "innerInstructions": [], - "logMessages": [], - "postBalances": [499998932500, 26858640, 1, 1, 1], - "postTokenBalances": [], - "preBalances": [499998937500, 26858640, 1, 1, 1], - "preTokenBalances": [], - "status": { - "Ok": null - } - }, - "transaction": [ - "AVj7dxHlQ9IrvdYVIjuiRFs1jLaDMHixgrv+qtHBwz51L4/ImLZhszwiyEJDIp7xeBSpm/TX5B7mYzxa+fPOMw0BAAMFJMJVqLw+hJYheizSoYlLm53KzgT82cDVmazarqQKG2GQsLgiqktA+a+FDR4/7xnDX7rsusMwryYVUdixfz1B1Qan1RcZLwqvxvJl4/t3zHragsUp0L47E24tAFUgAAAABqfVFxjHdMkoVmOYaR1etoteuKObS21cc1VbIQAAAAAHYUgdNXR0u3xNdiTr072z2DVec9EQQ/wNo1OAAAAAAAtxOUhPBp2WSjUNJEgfvy70BbxI00fZyEPvFHNfxrtEAQQEAQIDADUCAAAAAQAAAAAAAACtAQAAAAAAAAdUE18R96XTJCe+YfRfUp6WP+YKCy/72ucOL8AoBFSpAA==", - "base64" - ] - } - ] - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/deprecated/_getConfirmedBlocks.mdx b/docs/src/api/deprecated/_getConfirmedBlocks.mdx deleted file mode 100644 index 5a6b21c12aa8a8..00000000000000 --- a/docs/src/api/deprecated/_getConfirmedBlocks.mdx +++ /dev/null @@ -1,71 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getConfirmedBlocks - -:::warning DEPRECATED -This method is expected to be removed in solana-core v2.0 -**Please use [getBlocks](#getblocks) instead** -::: - -Returns a list of confirmed blocks between two slots - - - - - -### Parameters: - - - start_slot, as u64 integer - - - - -Configuration object containing the following fields: - - - - - -### Result: - -The result field will be an array of u64 integers listing confirmed blocks -between `start_slot` and either `end_slot` - if provided, or latest confirmed block, -inclusive. Max range allowed is 500,000 slots. - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - {"jsonrpc": "2.0","id":1,"method":"getConfirmedBlocks","params":[5, 10]} -' -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": [5, 6, 7, 8, 9, 10], "id": 1 } -``` - - - - diff --git a/docs/src/api/deprecated/_getConfirmedBlocksWithLimit.mdx b/docs/src/api/deprecated/_getConfirmedBlocksWithLimit.mdx deleted file mode 100644 index 3daec0abc25a37..00000000000000 --- a/docs/src/api/deprecated/_getConfirmedBlocksWithLimit.mdx +++ /dev/null @@ -1,78 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getConfirmedBlocksWithLimit - -:::warning DEPRECATED -This method is expected to be removed in solana-core v2.0 -**Please use [getBlocksWithLimit](#getblockswithlimit) instead** -::: - -Returns a list of confirmed blocks starting at the given slot - - - - - -### Parameters: - - - start_slot, as u64 integer - - - - limit, as u64 integer - - - - -Configuration object containing the following fields: - - - - - -### Result: - -The result field will be an array of u64 integers listing confirmed blocks -starting at `start_slot` for up to `limit` blocks, inclusive. - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", "id": 1, - "method": "getConfirmedBlocksWithLimit", - "params": [5, 3] - } -' -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": [5, 6, 7], "id": 1 } -``` - - - - diff --git a/docs/src/api/deprecated/_getConfirmedSignaturesForAddress2.mdx b/docs/src/api/deprecated/_getConfirmedSignaturesForAddress2.mdx deleted file mode 100644 index 7d8c803d061544..00000000000000 --- a/docs/src/api/deprecated/_getConfirmedSignaturesForAddress2.mdx +++ /dev/null @@ -1,114 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getConfirmedSignaturesForAddress2 - -:::warning DEPRECATED -This method is expected to be removed in solana-core v2.0 -**Please use [getSignaturesForAddress](#getsignaturesforaddress) instead** -::: - -Returns signatures for confirmed transactions that include the given address in -their `accountKeys` list. Returns signatures backwards in time from the -provided signature or most recent confirmed block - - - - - -### Parameters: - - - account address, as base-58 encoded string - - - -Configuration object containing the following fields: - - - - - maximum transaction signatures to return (between 1 and 1,000, default: - 1,000). - - - - start searching backwards from this transaction signature. (If not provided - the search starts from the top of the highest max confirmed block.) - - - - search until this transaction signature, if found before limit reached. - - - - -### Result: - -The result field will be an array of ``, ordered -from newest to oldest transaction, containing transaction signature information with the following fields: - -- `signature: ` - transaction signature as base-58 encoded string -- `slot: ` - The slot that contains the block with the transaction -- `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/c0c60386544ec9a9ec7119229f37386d9f070523/sdk/src/transaction/error.rs#L13) -- `memo: ` - Memo associated with the transaction, null if no memo is present -- `blockTime: ` - estimated production time, as Unix timestamp (seconds since the Unix epoch) of when transaction was processed. null if not available. - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", - "id": 1, - "method": "getConfirmedSignaturesForAddress2", - "params": [ - "Vote111111111111111111111111111111111111111", - { - "limit": 1 - } - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": [ - { - "err": null, - "memo": null, - "signature": "5h6xBEauJ3PK6SWCZ1PGjBvj8vDdWG3KpwATGy1ARAXFSDwt8GFXM7W5Ncn16wmqokgpiKRLuS83KUxyZyv2sUYv", - "slot": 114, - "blockTime": null - } - ], - "id": 1 -} -``` - - - - diff --git a/docs/src/api/deprecated/_getConfirmedTransaction.mdx b/docs/src/api/deprecated/_getConfirmedTransaction.mdx deleted file mode 100644 index 9586975df62c6e..00000000000000 --- a/docs/src/api/deprecated/_getConfirmedTransaction.mdx +++ /dev/null @@ -1,133 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getConfirmedTransaction - -:::warning DEPRECATED -This method is expected to be removed in solana-core v2.0 -**Please use [getTransaction](#gettransaction) instead** -::: - -Returns transaction details for a confirmed transaction - - - - - -### Parameters: - - - transaction signature, as base-58 encoded string - - - - -Configuration object containing the following fields: - - - - - -Encoding format for Account data - - - -
- -- `base58` is slow and limited to less than 129 bytes of Account data. -- `jsonParsed` encoding attempts to use program-specific instruction parsers - to return more human-readable and explicit data in the `transaction.message.instructions` list. -- If `jsonParsed` is requested but a parser cannot be found, the instruction - falls back to regular `json` encoding (`accounts`, `data`, and `programIdIndex` fields). - -
- -
- -
- -### Result: - -- `` - if transaction is not found or not confirmed -- `` - if transaction is confirmed, an object with the following fields: - - `slot: ` - the slot this transaction was processed in - - `transaction: ` - [Transaction](#transaction-structure) object, either in JSON format or encoded binary data, depending on encoding parameter - - `blockTime: ` - estimated production time, as Unix timestamp (seconds since the Unix epoch) of when the transaction was processed. null if not available - - `meta: ` - transaction status metadata object: - - `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://docs.rs/solana-sdk/VERSION_FOR_DOCS_RS/solana_sdk/transaction/enum.TransactionError.html) - - `fee: ` - fee this transaction was charged, as u64 integer - - `preBalances: ` - array of u64 account balances from before the transaction was processed - - `postBalances: ` - array of u64 account balances after the transaction was processed - - `innerInstructions: ` - List of [inner instructions](#inner-instructions-structure) or `null` if inner instruction recording was not enabled during this transaction - - `preTokenBalances: ` - List of [token balances](#token-balances-structure) from before the transaction was processed or omitted if token balance recording was not yet enabled during this transaction - - `postTokenBalances: ` - List of [token balances](#token-balances-structure) from after the transaction was processed or omitted if token balance recording was not yet enabled during this transaction - - `logMessages: ` - array of string log messages or `null` if log message recording was not enabled during this transaction - - DEPRECATED: `status: ` - Transaction status - - `"Ok": ` - Transaction was successful - - `"Err": ` - Transaction failed with TransactionError - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", - "id": 1, - "method": "getConfirmedTransaction", - "params": [ - "2nBhEBYYvfaAe16UMNqRHre4YNSskvuYgx3M6E4JP1oDYvZEJHvoPzyUidNgNX5r9sTyN1J9UxtbCXy2rqYcuyuv", - "base64" - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "meta": { - "err": null, - "fee": 5000, - "innerInstructions": [], - "postBalances": [499998932500, 26858640, 1, 1, 1], - "postTokenBalances": [], - "preBalances": [499998937500, 26858640, 1, 1, 1], - "preTokenBalances": [], - "status": { - "Ok": null - } - }, - "slot": 430, - "transaction": [ - "AVj7dxHlQ9IrvdYVIjuiRFs1jLaDMHixgrv+qtHBwz51L4/ImLZhszwiyEJDIp7xeBSpm/TX5B7mYzxa+fPOMw0BAAMFJMJVqLw+hJYheizSoYlLm53KzgT82cDVmazarqQKG2GQsLgiqktA+a+FDR4/7xnDX7rsusMwryYVUdixfz1B1Qan1RcZLwqvxvJl4/t3zHragsUp0L47E24tAFUgAAAABqfVFxjHdMkoVmOYaR1etoteuKObS21cc1VbIQAAAAAHYUgdNXR0u3xNdiTr072z2DVec9EQQ/wNo1OAAAAAAAtxOUhPBp2WSjUNJEgfvy70BbxI00fZyEPvFHNfxrtEAQQEAQIDADUCAAAAAQAAAAAAAACtAQAAAAAAAAdUE18R96XTJCe+YfRfUp6WP+YKCy/72ucOL8AoBFSpAA==", - "base64" - ] - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/deprecated/_getFeeCalculatorForBlockhash.mdx b/docs/src/api/deprecated/_getFeeCalculatorForBlockhash.mdx deleted file mode 100644 index 66b0d954ee5581..00000000000000 --- a/docs/src/api/deprecated/_getFeeCalculatorForBlockhash.mdx +++ /dev/null @@ -1,97 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getFeeCalculatorForBlockhash - -:::warning DEPRECATED -This method is expected to be removed in solana-core v2.0 -**Please use [isBlockhashValid](#isblockhashvalid) or [getFeeForMessage](#getfeeformessage) instead** -::: - -Returns the fee calculator associated with the query blockhash, or `null` if the blockhash has expired - - - - - -### Parameters: - - - query blockhash, as a base-58 encoded string - - - - -Configuration object containing the following fields: - - - - - The minimum slot that the request can be evaluated at - - - - -### Result: - -The result will be an RpcResponse JSON object with `value` equal to: - -- `` - if the query blockhash has expired; or -- `` - otherwise, a JSON object containing: - - `feeCalculator: ` - `FeeCalculator` object describing the cluster fee rate at the queried blockhash - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", - "id": 1, - "method": "getFeeCalculatorForBlockhash", - "params": [ - "GJxqhuxcgfn5Tcj6y3f8X4FeCDd2RQ6SnEMo1AAxrPRZ" - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "context": { - "slot": 221 - }, - "value": { - "feeCalculator": { - "lamportsPerSignature": 5000 - } - } - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/deprecated/_getFeeRateGovernor.mdx b/docs/src/api/deprecated/_getFeeRateGovernor.mdx deleted file mode 100644 index e7b87cda19b981..00000000000000 --- a/docs/src/api/deprecated/_getFeeRateGovernor.mdx +++ /dev/null @@ -1,76 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getFeeRateGovernor - -:::warning DEPRECATED -This method is expected to be removed in solana-core v2.0 -::: - -Returns the fee rate governor information from the root bank - - - - - -### Parameters: - -**None** - -### Result: - -The result will be an RpcResponse JSON object with `value` equal to an `object` with the following fields: - -- `burnPercent: ` - Percentage of fees collected to be destroyed -- `maxLamportsPerSignature: ` - Largest value `lamportsPerSignature` can attain for the next slot -- `minLamportsPerSignature: ` - Smallest value `lamportsPerSignature` can attain for the next slot -- `targetLamportsPerSignature: ` - Desired fee rate for the cluster -- `targetSignaturesPerSlot: ` - Desired signature rate for the cluster - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - {"jsonrpc":"2.0","id":1, "method":"getFeeRateGovernor"} -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "context": { - "slot": 54 - }, - "value": { - "feeRateGovernor": { - "burnPercent": 50, - "maxLamportsPerSignature": 100000, - "minLamportsPerSignature": 5000, - "targetLamportsPerSignature": 10000, - "targetSignaturesPerSlot": 20000 - } - } - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/deprecated/_getFees.mdx b/docs/src/api/deprecated/_getFees.mdx deleted file mode 100644 index 62cd33745483ef..00000000000000 --- a/docs/src/api/deprecated/_getFees.mdx +++ /dev/null @@ -1,92 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getFees - -:::warning DEPRECATED -This method is expected to be removed in solana-core v2.0 -**Please use [getFeeForMessage](#getfeeformessage) instead** -::: - -Returns a recent block hash from the ledger, a fee schedule that can be used to -compute the cost of submitting a transaction using it, and the last slot in -which the blockhash will be valid. - - - - - -### Parameters: - - - Pubkey of account to query, as base-58 encoded string - - - - -Configuration object containing the following fields: - - - - - -### Result: - -The result will be an RpcResponse JSON object with `value` set to a JSON object with the following fields: - -- `blockhash: ` - a Hash as base-58 encoded string -- `feeCalculator: ` - FeeCalculator object, the fee schedule for this block hash -- `lastValidSlot: ` - DEPRECATED - this value is inaccurate and should not be relied upon -- `lastValidBlockHeight: ` - last [block height](../../terminology.md#block-height) at which the blockhash will be valid - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { "jsonrpc":"2.0", "id": 1, "method":"getFees"} -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "context": { - "slot": 1 - }, - "value": { - "blockhash": "CSymwgTNX1j3E4qhKfJAUE41nBWEwXufoYryPbkde5RR", - "feeCalculator": { - "lamportsPerSignature": 5000 - }, - "lastValidSlot": 297, - "lastValidBlockHeight": 296 - } - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/deprecated/_getRecentBlockhash.mdx b/docs/src/api/deprecated/_getRecentBlockhash.mdx deleted file mode 100644 index 456685c3ddf603..00000000000000 --- a/docs/src/api/deprecated/_getRecentBlockhash.mdx +++ /dev/null @@ -1,87 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getRecentBlockhash - -:::warning DEPRECATED -This method is expected to be removed in solana-core v2.0 -**Please use [getLatestBlockhash](#getlatestblockhash) instead** -::: - -Returns a recent block hash from the ledger, and a fee schedule that can be used to compute the cost of submitting a transaction using it. - - - - - -### Parameters: - - - Pubkey of account to query, as base-58 encoded string - - - - -Configuration object containing the following fields: - - - - - -### Result: - -An RpcResponse containing a JSON object consisting of a string blockhash and FeeCalculator JSON object. - -- `RpcResponse` - RpcResponse JSON object with `value` field set to a JSON object including: -- `blockhash: ` - a Hash as base-58 encoded string -- `feeCalculator: ` - FeeCalculator object, the fee schedule for this block hash - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - {"jsonrpc":"2.0","id":1, "method":"getRecentBlockhash"} -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "context": { - "slot": 1 - }, - "value": { - "blockhash": "CSymwgTNX1j3E4qhKfJAUE41nBWEwXufoYryPbkde5RR", - "feeCalculator": { - "lamportsPerSignature": 5000 - } - } - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/deprecated/_getSnapshotSlot.mdx b/docs/src/api/deprecated/_getSnapshotSlot.mdx deleted file mode 100644 index 42ee186d5c3325..00000000000000 --- a/docs/src/api/deprecated/_getSnapshotSlot.mdx +++ /dev/null @@ -1,64 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getSnapshotSlot - -:::warning DEPRECATED -This method is expected to be removed in solana-core v2.0 -**Please use [getHighestSnapshotSlot](#gethighestsnapshotslot) instead** -::: - -Returns the highest slot that the node has a snapshot for - - - - - -### Parameters: - -**None** - -### Result: - -`` - Snapshot slot - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - {"jsonrpc":"2.0","id":1, "method":"getSnapshotSlot"} -' -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": 100, "id": 1 } -``` - -Result when the node has no snapshot: - -```json -{ - "jsonrpc": "2.0", - "error": { "code": -32008, "message": "No snapshot" }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/http.md b/docs/src/api/http.md deleted file mode 100644 index 9edf2c792cd9c5..00000000000000 --- a/docs/src/api/http.md +++ /dev/null @@ -1,417 +0,0 @@ ---- -title: JSON RPC HTTP Methods -displayed_sidebar: apiHttpMethodsSidebar -hide_table_of_contents: true ---- - -Solana nodes accept HTTP requests using the [JSON-RPC 2.0](https://www.jsonrpc.org/specification) specification. - -:::info -For JavaScript applications, use the [@solana/web3.js](https://github.com/solana-labs/solana-web3.js) library as a convenient interface for the RPC methods to interact with a Solana node. - -For an PubSub connection to a Solana node, use the [Websocket API](./websocket.md). -::: - -## RPC HTTP Endpoint - -**Default port:** 8899 e.g. [http://localhost:8899](http://localhost:8899), [http://192.168.1.88:8899](http://192.168.1.88:8899) - -## Request Formatting - -To make a JSON-RPC request, send an HTTP POST request with a `Content-Type: application/json` header. The JSON request data should contain 4 fields: - -- `jsonrpc: ` - set to `"2.0"` -- `id: ` - a unique client-generated identifying integer -- `method: ` - a string containing the method to be invoked -- `params: ` - a JSON array of ordered parameter values - -Example using curl: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", - "id": 1, - "method": "getBalance", - "params": [ - "83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri" - ] - } -' -``` - -The response output will be a JSON object with the following fields: - -- `jsonrpc: ` - matching the request specification -- `id: ` - matching the request identifier -- `result: ` - requested data or success confirmation - -Requests can be sent in batches by sending an array of JSON-RPC request objects as the data for a single POST. - -## Definitions - -- Hash: A SHA-256 hash of a chunk of data. -- Pubkey: The public key of a Ed25519 key-pair. -- Transaction: A list of Solana instructions signed by a client keypair to authorize those actions. -- Signature: An Ed25519 signature of transaction's payload data including instructions. This can be used to identify transactions. - -## Configuring State Commitment - -For preflight checks and transaction processing, Solana nodes choose which bank -state to query based on a commitment requirement set by the client. The -commitment describes how finalized a block is at that point in time. When -querying the ledger state, it's recommended to use lower levels of commitment -to report progress and higher levels to ensure the state will not be rolled back. - -In descending order of commitment (most finalized to least finalized), clients -may specify: - -- `"finalized"` - the node will query the most recent block confirmed by supermajority - of the cluster as having reached maximum lockout, meaning the cluster has - recognized this block as finalized -- `"confirmed"` - the node will query the most recent block that has been voted on by supermajority of the cluster. - - It incorporates votes from gossip and replay. - - It does not count votes on descendants of a block, only direct votes on that block. - - This confirmation level also upholds "optimistic confirmation" guarantees in - release 1.3 and onwards. -- `"processed"` - the node will query its most recent block. Note that the block - may still be skipped by the cluster. - -For processing many dependent transactions in series, it's recommended to use -`"confirmed"` commitment, which balances speed with rollback safety. -For total safety, it's recommended to use`"finalized"` commitment. - -#### Example - -The commitment parameter should be included as the last element in the `params` array: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", - "id": 1, - "method": "getBalance", - "params": [ - "83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri", - { - "commitment": "finalized" - } - ] - } -' -``` - -#### Default: - -If commitment configuration is not provided, the node will default to `"finalized"` commitment - -Only methods that query bank state accept the commitment parameter. They are indicated in the API Reference below. - -#### RpcResponse Structure - -Many methods that take a commitment parameter return an RpcResponse JSON object comprised of two parts: - -- `context` : An RpcResponseContext JSON structure including a `slot` field at which the operation was evaluated. -- `value` : The value returned by the operation itself. - -#### Parsed Responses - -Some methods support an `encoding` parameter, and can return account or -instruction data in parsed JSON format if `"encoding":"jsonParsed"` is requested -and the node has a parser for the owning program. Solana nodes currently support -JSON parsing for the following native and SPL programs: - -| Program | Account State | Instructions | -| ---------------------------- | ------------- | ------------ | -| Address Lookup | v1.15.0 | v1.15.0 | -| BPF Loader | n/a | stable | -| BPF Upgradeable Loader | stable | stable | -| Config | stable | | -| SPL Associated Token Account | n/a | stable | -| SPL Memo | n/a | stable | -| SPL Token | stable | stable | -| SPL Token 2022 | stable | stable | -| Stake | stable | stable | -| Vote | stable | stable | - -The list of account parsers can be found [here](https://github.com/solana-labs/solana/blob/master/account-decoder/src/parse_account_data.rs), and instruction parsers [here](https://github.com/solana-labs/solana/blob/master/transaction-status/src/parse_instruction.rs). - -## Filter criteria - -Some methods support providing a `filters` object to enable pre-filtering the data returned within the RpcResponse JSON object. The following filters exist: - -- `memcmp: object` - compares a provided series of bytes with program account data at a particular offset. Fields: - - - `offset: usize` - offset into program account data to start comparison - - `bytes: string` - data to match, as encoded string - - `encoding: string` - encoding for filter `bytes` data, either "base58" or "base64". Data is limited in size to 128 or fewer decoded bytes.
- **NEW: This field, and base64 support generally, is only available in solana-core v1.14.0 or newer. Please omit when querying nodes on earlier versions** - -- `dataSize: u64` - compares the program account data length with the provided data size - -## Health Check - -Although not a JSON RPC API, a `GET /health` at the RPC HTTP Endpoint provides a -health-check mechanism for use by load balancers or other network -infrastructure. This request will always return a HTTP 200 OK response with a body of -"ok", "behind" or "unknown": - -- `ok`: The node is within `HEALTH_CHECK_SLOT_DISTANCE` slots from the latest cluster confirmed slot -- `behind { distance }`: The node is behind `distance` slots from the latest cluster confirmed slot where `distance > HEALTH_CHECK_SLOT_DISTANCE` -- `unknown`: The node is unable to determine where it stands in relation to the cluster - -## JSON RPC API Reference - -import GetAccountInfo from "./methods/\_getAccountInfo.mdx" - - - -import GetBalance from "./methods/\_getBalance.mdx" - - - -import GetBlock from "./methods/\_getBlock.mdx" - - - -import GetBlockHeight from "./methods/\_getBlockHeight.mdx" - - - -import GetBlockProduction from "./methods/\_getBlockProduction.mdx" - - - -import GetBlockCommitment from "./methods/\_getBlockCommitment.mdx" - - - -import GetBlocks from "./methods/\_getBlocks.mdx" - - - -import GetBlocksWithLimit from "./methods/\_getBlocksWithLimit.mdx" - - - -import GetBlockTime from "./methods/\_getBlockTime.mdx" - - - -import GetClusterNodes from "./methods/\_getClusterNodes.mdx" - - - -import GetEpochInfo from "./methods/\_getEpochInfo.mdx" - - - -import GetEpochSchedule from "./methods/\_getEpochSchedule.mdx" - - - -import GetFeeForMessage from "./methods/\_getFeeForMessage.mdx" - - - -import GetFirstAvailableBlock from "./methods/\_getFirstAvailableBlock.mdx" - - - -import GetGenesisHash from "./methods/\_getGenesisHash.mdx" - - - -import GetHealth from "./methods/\_getHealth.mdx" - - - -import GetHighestSnapshotSlot from "./methods/\_getHighestSnapshotSlot.mdx" - - - -import GetIdentity from "./methods/\_getIdentity.mdx" - - - -import GetInflationGovernor from "./methods/\_getInflationGovernor.mdx" - - - -import GetInflationRate from "./methods/\_getInflationRate.mdx" - - - -import GetInflationReward from "./methods/\_getInflationReward.mdx" - - - -import GetLargestAccounts from "./methods/\_getLargestAccounts.mdx" - - - -import GetLatestBlockhash from "./methods/\_getLatestBlockhash.mdx" - - - -import GetLeaderSchedule from "./methods/\_getLeaderSchedule.mdx" - - - -import GetMaxRetransmitSlot from "./methods/\_getMaxRetransmitSlot.mdx" - - - -import GetMaxShredInsertSlot from "./methods/\_getMaxShredInsertSlot.mdx" - - - -import GetMinimumBalanceForRentExemption from "./methods/\_getMinimumBalanceForRentExemption.mdx" - - - -import GetMultipleAccounts from "./methods/\_getMultipleAccounts.mdx" - - - -import GetProgramAccounts from "./methods/\_getProgramAccounts.mdx" - - - -import GetRecentPerformanceSamples from "./methods/\_getRecentPerformanceSamples.mdx" - - - -import GetRecentPrioritizationFees from "./methods/\_getRecentPrioritizationFees.mdx" - - - -import GetSignaturesForAddress from "./methods/\_getSignaturesForAddress.mdx" - - - -import GetSignatureStatuses from "./methods/\_getSignatureStatuses.mdx" - - - -import GetSlot from "./methods/\_getSlot.mdx" - - - -import GetSlotLeader from "./methods/\_getSlotLeader.mdx" - - - -import GetSlotLeaders from "./methods/\_getSlotLeaders.mdx" - - - -import GetStakeActivation from "./methods/\_getStakeActivation.mdx" - - - -import GetStakeMinimumDelegation from "./methods/\_getStakeMinimumDelegation.mdx" - - - -import GetSupply from "./methods/\_getSupply.mdx" - - - -import GetTokenAccountBalance from "./methods/\_getTokenAccountBalance.mdx" - - - -import GetTokenAccountsByDelegate from "./methods/\_getTokenAccountsByDelegate.mdx" - - - -import GetTokenAccountsByOwner from "./methods/\_getTokenAccountsByOwner.mdx" - - - -import GetTokenLargestAccounts from "./methods/\_getTokenLargestAccounts.mdx" - - - -import GetTokenSupply from "./methods/\_getTokenSupply.mdx" - - - -import GetTransaction from "./methods/\_getTransaction.mdx" - - - -import GetTransactionCount from "./methods/\_getTransactionCount.mdx" - - - -import GetVersion from "./methods/\_getVersion.mdx" - - - -import GetVoteAccounts from "./methods/\_getVoteAccounts.mdx" - - - -import IsBlockhashValid from "./methods/\_isBlockhashValid.mdx" - - - -import MinimumLedgerSlot from "./methods/\_minimumLedgerSlot.mdx" - - - -import RequestAirdrop from "./methods/\_requestAirdrop.mdx" - - - -import SendTransaction from "./methods/\_sendTransaction.mdx" - - - -import SimulateTransaction from "./methods/\_simulateTransaction.mdx" - - - -## JSON RPC API Deprecated Methods - -import GetConfirmedBlock from "./deprecated/\_getConfirmedBlock.mdx" - - - -import GetConfirmedBlocks from "./deprecated/\_getConfirmedBlocks.mdx" - - - -import GetConfirmedBlocksWithLimit from "./deprecated/\_getConfirmedBlocksWithLimit.mdx" - - - -import GetConfirmedSignaturesForAddress2 from "./deprecated/\_getConfirmedSignaturesForAddress2.mdx" - - - -import GetConfirmedTransaction from "./deprecated/\_getConfirmedTransaction.mdx" - - - -import GetFeeCalculatorForBlockhash from "./deprecated/\_getFeeCalculatorForBlockhash.mdx" - - - -import GetFeeRateGovernor from "./deprecated/\_getFeeRateGovernor.mdx" - - - -import GetFees from "./deprecated/\_getFees.mdx" - - - -import GetRecentBlockhash from "./deprecated/\_getRecentBlockhash.mdx" - - - -import GetSnapshotSlot from "./deprecated/\_getSnapshotSlot.mdx" - - diff --git a/docs/src/api/methods/_getAccountInfo.mdx b/docs/src/api/methods/_getAccountInfo.mdx deleted file mode 100644 index 35e55f0a78e262..00000000000000 --- a/docs/src/api/methods/_getAccountInfo.mdx +++ /dev/null @@ -1,138 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getAccountInfo - -Returns all information associated with the account of provided Pubkey - - - - - -### Parameters: - - - Pubkey of account to query, as base-58 encoded string - - - - -Configuration object containing the following fields: - - - - - -Encoding format for Account data - - - -
- -- `base58` is slow and limited to less than 129 bytes of Account data. -- `base64` will return base64 encoded data for Account data of any size. -- `base64+zstd` compresses the Account data using [Zstandard](https://facebook.github.io/zstd/) - and base64-encodes the result. -- `jsonParsed` encoding attempts to use program-specific state parsers to return - more human-readable and explicit account state data. -- If `jsonParsed` is requested but a parser cannot be found, the field falls - back to `base64` encoding, detectable when the `data` field is type `string`. - -
- -
- - - Request a slice of the account's data. - - - `length: ` - number of bytes to return - - `offset: ` - byte offset from which to start reading - -:::info -Data slicing is only available for base58, base64, or base64+zstd encodings. -::: - - - - The minimum slot that the request can be evaluated at - - -
- -### Result: - -The result will be an RpcResponse JSON object with `value` equal to: - -- `` - if the requested account doesn't exist -- `` - otherwise, a JSON object containing: - - `lamports: ` - number of lamports assigned to this account, as a u64 - - `owner: ` - base-58 encoded Pubkey of the program this account has been assigned to - - `data: <[string, encoding]|object>` - data associated with the account, either as encoded binary data or JSON format `{: }` - depending on encoding parameter - - `executable: ` - boolean indicating if the account contains a program \(and is strictly read-only\) - - `rentEpoch: ` - the epoch at which this account will next owe rent, as u64 - - `size: ` - the data size of the account - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", - "id": 1, - "method": "getAccountInfo", - "params": [ - "vines1vzrYbzLMRdu58ou5XTby4qAqVRLmqo36NKPTg", - { - "encoding": "base58" - } - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "context": { - "slot": 1 - }, - "value": { - "data": [ - "11116bv5nS2h3y12kD1yUKeMZvGcKLSjQgX6BeV7u1FrjeJcKfsHRTPuR3oZ1EioKtYGiYxpxMG5vpbZLsbcBYBEmZZcMKaSoGx9JZeAuWf", - "base58" - ], - "executable": false, - "lamports": 1000000000, - "owner": "11111111111111111111111111111111", - "rentEpoch": 2, - "space": 80 - } - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getBalance.mdx b/docs/src/api/methods/_getBalance.mdx deleted file mode 100644 index 0cd0f0c6d0ef23..00000000000000 --- a/docs/src/api/methods/_getBalance.mdx +++ /dev/null @@ -1,78 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getBalance - -Returns the balance of the account of provided Pubkey - - - - - -### Parameters: - - - Pubkey of account to query, as base-58 encoded string - - - - -Configuration object containing the following fields: - - - - - The minimum slot that the request can be evaluated at - - - - -### Result: - -`RpcResponse` - RpcResponse JSON object with `value` field set to the balance - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", "id": 1, - "method": "getBalance", - "params": [ - "83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri" - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { "context": { "slot": 1 }, "value": 0 }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getBlock.mdx b/docs/src/api/methods/_getBlock.mdx deleted file mode 100644 index 6ced795c2eee8f..00000000000000 --- a/docs/src/api/methods/_getBlock.mdx +++ /dev/null @@ -1,288 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getBlock - -Returns identity and transaction information about a confirmed block in the ledger - - - - - -### Parameters: - - - slot number, as u64 integer - - - - -Configuration object containing the following fields: - - -
  • - the default is finalized -
  • -
  • - processed is not supported. -
  • -
    - - - -encoding format for each returned Transaction - - - -
    - -- `jsonParsed` attempts to use program-specific instruction parsers to return - more human-readable and explicit data in the `transaction.message.instructions` list. -- If `jsonParsed` is requested but a parser cannot be found, the instruction - falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` fields). - -
    - -
    - - - -level of transaction detail to return - - - -
    - -- If `accounts` are requested, transaction details only include signatures and - an annotated list of accounts in each transaction. -- Transaction metadata is limited to only: fee, err, pre_balances, - post_balances, pre_token_balances, and post_token_balances. - -
    - -
    - - - -the max transaction version to return in responses. - -
    - -- If the requested block contains a transaction with a higher version, an - error will be returned. -- If this parameter is omitted, only legacy transactions will be returned, and - a block containing any versioned transaction will prompt the error. - -
    - -
    - - - whether to populate the `rewards` array. If parameter not provided, the - default includes rewards. - - -
    - -### Result: - -The result field will be an object with the following fields: - -- `` - if specified block is not confirmed -- `` - if block is confirmed, an object with the following fields: - - `blockhash: ` - the blockhash of this block, as base-58 encoded string - - `previousBlockhash: ` - the blockhash of this block's parent, as base-58 encoded string; if the parent block is not available due to ledger cleanup, this field will return "11111111111111111111111111111111" - - `parentSlot: ` - the slot index of this block's parent - - `transactions: ` - present if "full" transaction details are requested; an array of JSON objects containing: - - `transaction: ` - [Transaction](#transaction-structure) object, either in JSON format or encoded binary data, depending on encoding parameter - - `meta: ` - transaction status metadata object, containing `null` or: - - `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/c0c60386544ec9a9ec7119229f37386d9f070523/sdk/src/transaction/error.rs#L13) - - `fee: ` - fee this transaction was charged, as u64 integer - - `preBalances: ` - array of u64 account balances from before the transaction was processed - - `postBalances: ` - array of u64 account balances after the transaction was processed - - `innerInstructions: ` - List of [inner instructions](#inner-instructions-structure) or `null` if inner instruction recording was not enabled during this transaction - - `preTokenBalances: ` - List of [token balances](#token-balances-structure) from before the transaction was processed or omitted if token balance recording was not yet enabled during this transaction - - `postTokenBalances: ` - List of [token balances](#token-balances-structure) from after the transaction was processed or omitted if token balance recording was not yet enabled during this transaction - - `logMessages: ` - array of string log messages or `null` if log message recording was not enabled during this transaction - - `rewards: ` - transaction-level rewards, populated if rewards are requested; an array of JSON objects containing: - - `pubkey: ` - The public key, as base-58 encoded string, of the account that received the reward - - `lamports: `- number of reward lamports credited or debited by the account, as a i64 - - `postBalance: ` - account balance in lamports after the reward was applied - - `rewardType: ` - type of reward: "fee", "rent", "voting", "staking" - - `commission: ` - vote account commission when the reward was credited, only present for voting and staking rewards - - DEPRECATED: `status: ` - Transaction status - - `"Ok": ` - Transaction was successful - - `"Err": ` - Transaction failed with TransactionError - - `loadedAddresses: ` - Transaction addresses loaded from address lookup tables. Undefined if `maxSupportedTransactionVersion` is not set in request params, or if `jsonParsed` encoding is set in request params. - - `writable: ` - Ordered list of base-58 encoded addresses for writable loaded accounts - - `readonly: ` - Ordered list of base-58 encoded addresses for readonly loaded accounts - - `returnData: ` - the most-recent return data generated by an instruction in the transaction, with the following fields: - - `programId: ` - the program that generated the return data, as base-58 encoded Pubkey - - `data: <[string, encoding]>` - the return data itself, as base-64 encoded binary data - - `computeUnitsConsumed: ` - number of [compute units](developing/programming-model/runtime.md#compute-budget) consumed by the transaction - - `version: <"legacy"|number|undefined>` - Transaction version. Undefined if `maxSupportedTransactionVersion` is not set in request params. - - `signatures: ` - present if "signatures" are requested for transaction details; an array of signatures strings, corresponding to the transaction order in the block - - `rewards: ` - block-level rewards, present if rewards are requested; an array of JSON objects containing: - - `pubkey: ` - The public key, as base-58 encoded string, of the account that received the reward - - `lamports: `- number of reward lamports credited or debited by the account, as a i64 - - `postBalance: ` - account balance in lamports after the reward was applied - - `rewardType: ` - type of reward: "fee", "rent", "voting", "staking" - - `commission: ` - vote account commission when the reward was credited, only present for voting and staking rewards - - `blockTime: ` - estimated production time, as Unix timestamp (seconds since the Unix epoch). null if not available - - `blockHeight: ` - the number of blocks beneath this block - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0","id":1, - "method":"getBlock", - "params": [ - 430, - { - "encoding": "json", - "maxSupportedTransactionVersion":0, - "transactionDetails":"full", - "rewards":false - } - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "blockHeight": 428, - "blockTime": null, - "blockhash": "3Eq21vXNB5s86c62bVuUfTeaMif1N2kUqRPBmGRJhyTA", - "parentSlot": 429, - "previousBlockhash": "mfcyqEXB3DnHXki6KjjmZck6YjmZLvpAByy2fj4nh6B", - "transactions": [ - { - "meta": { - "err": null, - "fee": 5000, - "innerInstructions": [], - "logMessages": [], - "postBalances": [499998932500, 26858640, 1, 1, 1], - "postTokenBalances": [], - "preBalances": [499998937500, 26858640, 1, 1, 1], - "preTokenBalances": [], - "rewards": null, - "status": { - "Ok": null - } - }, - "transaction": { - "message": { - "accountKeys": [ - "3UVYmECPPMZSCqWKfENfuoTv51fTDTWicX9xmBD2euKe", - "AjozzgE83A3x1sHNUR64hfH7zaEBWeMaFuAN9kQgujrc", - "SysvarS1otHashes111111111111111111111111111", - "SysvarC1ock11111111111111111111111111111111", - "Vote111111111111111111111111111111111111111" - ], - "header": { - "numReadonlySignedAccounts": 0, - "numReadonlyUnsignedAccounts": 3, - "numRequiredSignatures": 1 - }, - "instructions": [ - { - "accounts": [1, 2, 3, 0], - "data": "37u9WtQpcm6ULa3WRQHmj49EPs4if7o9f1jSRVZpm2dvihR9C8jY4NqEwXUbLwx15HBSNcP1", - "programIdIndex": 4 - } - ], - "recentBlockhash": "mfcyqEXB3DnHXki6KjjmZck6YjmZLvpAByy2fj4nh6B" - }, - "signatures": [ - "2nBhEBYYvfaAe16UMNqRHre4YNSskvuYgx3M6E4JP1oDYvZEJHvoPzyUidNgNX5r9sTyN1J9UxtbCXy2rqYcuyuv" - ] - } - } - ] - }, - "id": 1 -} -``` - - - - ---- - -#### Transaction Structure - -Transactions are quite different from those on other blockchains. Be sure to review [Anatomy of a Transaction](developing/programming-model/transactions.md) to learn about transactions on Solana. - -The JSON structure of a transaction is defined as follows: - -- `signatures: ` - A list of base-58 encoded signatures applied to the transaction. The list is always of length `message.header.numRequiredSignatures` and not empty. The signature at index `i` corresponds to the public key at index `i` in `message.accountKeys`. The first one is used as the [transaction id](../../terminology.md#transaction-id). -- `message: ` - Defines the content of the transaction. - - `accountKeys: ` - List of base-58 encoded public keys used by the transaction, including by the instructions and for signatures. The first `message.header.numRequiredSignatures` public keys must sign the transaction. - - `header: ` - Details the account types and signatures required by the transaction. - - `numRequiredSignatures: ` - The total number of signatures required to make the transaction valid. The signatures must match the first `numRequiredSignatures` of `message.accountKeys`. - - `numReadonlySignedAccounts: ` - The last `numReadonlySignedAccounts` of the signed keys are read-only accounts. Programs may process multiple transactions that load read-only accounts within a single PoH entry, but are not permitted to credit or debit lamports or modify account data. Transactions targeting the same read-write account are evaluated sequentially. - - `numReadonlyUnsignedAccounts: ` - The last `numReadonlyUnsignedAccounts` of the unsigned keys are read-only accounts. - - `recentBlockhash: ` - A base-58 encoded hash of a recent block in the ledger used to prevent transaction duplication and to give transactions lifetimes. - - `instructions: ` - List of program instructions that will be executed in sequence and committed in one atomic transaction if all succeed. - - `programIdIndex: ` - Index into the `message.accountKeys` array indicating the program account that executes this instruction. - - `accounts: ` - List of ordered indices into the `message.accountKeys` array indicating which accounts to pass to the program. - - `data: ` - The program input data encoded in a base-58 string. - - `addressTableLookups: ` - List of address table lookups used by a transaction to dynamically load addresses from on-chain address lookup tables. Undefined if `maxSupportedTransactionVersion` is not set. - - `accountKey: ` - base-58 encoded public key for an address lookup table account. - - `writableIndexes: ` - List of indices used to load addresses of writable accounts from a lookup table. - - `readonlyIndexes: ` - List of indices used to load addresses of readonly accounts from a lookup table. - -#### Inner Instructions Structure - -The Solana runtime records the cross-program instructions that are invoked during transaction processing and makes these available for greater transparency of what was executed on-chain per transaction instruction. Invoked instructions are grouped by the originating transaction instruction and are listed in order of processing. - -The JSON structure of inner instructions is defined as a list of objects in the following structure: - -- `index: number` - Index of the transaction instruction from which the inner instruction(s) originated -- `instructions: ` - Ordered list of inner program instructions that were invoked during a single transaction instruction. - - `programIdIndex: ` - Index into the `message.accountKeys` array indicating the program account that executes this instruction. - - `accounts: ` - List of ordered indices into the `message.accountKeys` array indicating which accounts to pass to the program. - - `data: ` - The program input data encoded in a base-58 string. - -#### Token Balances Structure - -The JSON structure of token balances is defined as a list of objects in the following structure: - -- `accountIndex: ` - Index of the account in which the token balance is provided for. -- `mint: ` - Pubkey of the token's mint. -- `owner: ` - Pubkey of token balance's owner. -- `programId: ` - Pubkey of the Token program that owns the account. -- `uiTokenAmount: ` - - - `amount: ` - Raw amount of tokens as a string, ignoring decimals. - - `decimals: ` - Number of decimals configured for token's mint. - - `uiAmount: ` - Token amount as a float, accounting for decimals. **DEPRECATED** - - `uiAmountString: ` - Token amount as a string, accounting for decimals. - - diff --git a/docs/src/api/methods/_getBlockCommitment.mdx b/docs/src/api/methods/_getBlockCommitment.mdx deleted file mode 100644 index c12fc186cfca4b..00000000000000 --- a/docs/src/api/methods/_getBlockCommitment.mdx +++ /dev/null @@ -1,70 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getBlockCommitment - -Returns commitment for particular block - - - - - -### Parameters: - - - block number, identified by Slot - - -### Result: - -The result field will be a JSON object containing: - -- `commitment` - commitment, comprising either: - - `` - Unknown block - - `` - commitment, array of u64 integers logging the amount of cluster stake in lamports that has voted on the block at each depth from 0 to `MAX_LOCKOUT_HISTORY` + 1 -- `totalStake` - total active stake, in lamports, of the current epoch - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", "id": 1, - "method": "getBlockCommitment", - "params":[5] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "commitment": [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 10, 32 - ], - "totalStake": 42 - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getBlockHeight.mdx b/docs/src/api/methods/_getBlockHeight.mdx deleted file mode 100644 index 9b8a07d0fd6a2b..00000000000000 --- a/docs/src/api/methods/_getBlockHeight.mdx +++ /dev/null @@ -1,73 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getBlockHeight - -Returns the current block height of the node - - - - - -### Parameters: - - - -Configuration object containing the following fields: - - - - - The minimum slot that the request can be evaluated at - - - - -### Result: - -- `` - Current block height - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc":"2.0","id":1, - "method":"getBlockHeight" - } -' -``` - -Result: - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": 1233, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getBlockProduction.mdx b/docs/src/api/methods/_getBlockProduction.mdx deleted file mode 100644 index eed1b5e6fbd3b2..00000000000000 --- a/docs/src/api/methods/_getBlockProduction.mdx +++ /dev/null @@ -1,97 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getBlockProduction - -Returns recent block production information from the current or previous epoch. - - - - - -### Parameters: - - - -Configuration object containing the following fields: - - - - - Only return results for this validator identity (base-58 encoded) - - - -Slot range to return block production for. If parameter not provided, defaults to current epoch. - -- `firstSlot: ` - first slot to return block production information for (inclusive) -- (optional) `lastSlot: ` - last slot to return block production information for (inclusive). If parameter not provided, defaults to the highest slot - - - - - -### Result: - -The result will be an RpcResponse JSON object with `value` equal to: - -- `` - - `byIdentity: ` - a dictionary of validator identities, - as base-58 encoded strings. Value is a two element array containing the - number of leader slots and the number of blocks produced. - - `range: ` - Block production slot range - - `firstSlot: ` - first slot of the block production information (inclusive) - - `lastSlot: ` - last slot of block production information (inclusive) - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - {"jsonrpc":"2.0","id":1, "method":"getBlockProduction"} -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "context": { - "slot": 9887 - }, - "value": { - "byIdentity": { - "85iYT5RuzRTDgjyRa3cP8SYhM2j21fj7NhfJ3peu1DPr": [9888, 9886] - }, - "range": { - "firstSlot": 0, - "lastSlot": 9887 - } - } - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getBlockTime.mdx b/docs/src/api/methods/_getBlockTime.mdx deleted file mode 100644 index 7fc991b5b8690e..00000000000000 --- a/docs/src/api/methods/_getBlockTime.mdx +++ /dev/null @@ -1,81 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getBlockTime - -Returns the estimated production time of a block. - -:::info -Each validator reports their UTC time to the ledger on a regular interval by -intermittently adding a timestamp to a Vote for a particular block. A requested -block's time is calculated from the stake-weighted mean of the Vote timestamps -in a set of recent blocks recorded on the ledger. -::: - - - - - -### Parameters: - - - block number, identified by Slot - - -### Result: - -- `` - estimated production time, as Unix timestamp (seconds since the Unix epoch) - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc":"2.0", "id":1, - "method": "getBlockTime", - "params":[5] - } -' -``` - -### Response: - -When a block time is available: - -```json -{ - "jsonrpc": "2.0", - "result": 1574721591, - "id": 1 -} -``` - -When a block time is not available: - -```json -{ - "jsonrpc": "2.0", - "error": { - "code": -32004, - "message": "Block not available for slot 150" - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getBlocks.mdx b/docs/src/api/methods/_getBlocks.mdx deleted file mode 100644 index d46927939b2b51..00000000000000 --- a/docs/src/api/methods/_getBlocks.mdx +++ /dev/null @@ -1,86 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getBlocks - -Returns a list of confirmed blocks between two slots - - - - - -### Parameters: - - - start_slot, as u64 integer - - - - end_slot, as u64 integer (must be no more than 500,000 blocks - higher than the `start_slot`) - - - - -Configuration object containing the following fields: - - - -- "processed" is not supported - - - - - -### Result: - -The result field will be an array of u64 integers listing confirmed blocks -between `start_slot` and either `end_slot` - if provided, or latest confirmed block, -inclusive. Max range allowed is 500,000 slots. - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", "id": 1, - "method": "getBlocks", - "params": [ - 5, 10 - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": [5, 6, 7, 8, 9, 10], - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getBlocksWithLimit.mdx b/docs/src/api/methods/_getBlocksWithLimit.mdx deleted file mode 100644 index 04586dea302d25..00000000000000 --- a/docs/src/api/methods/_getBlocksWithLimit.mdx +++ /dev/null @@ -1,84 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getBlocksWithLimit - -Returns a list of confirmed blocks starting at the given slot - - - - - -### Parameters: - - - start_slot, as u64 integer - - - - limit, as u64 integer (must be no more than 500,000 blocks higher - than the start_slot) - - - - -Configuration object containing the following field: - - - -- "processed" is not supported - - - - - -### Result: - -The result field will be an array of u64 integers listing confirmed blocks -starting at `start_slot` for up to `limit` blocks, inclusive. - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", - "id":1, - "method":"getBlocksWithLimit", - "params":[5, 3] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": [5, 6, 7], - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getClusterNodes.mdx b/docs/src/api/methods/_getClusterNodes.mdx deleted file mode 100644 index 735e3aff27fc8e..00000000000000 --- a/docs/src/api/methods/_getClusterNodes.mdx +++ /dev/null @@ -1,71 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getClusterNodes - -Returns information about all the nodes participating in the cluster - - - - -### Parameters: - -**None** - -### Result: - -The result field will be an array of JSON objects, each with the following sub fields: - -- `pubkey: ` - Node public key, as base-58 encoded string -- `gossip: ` - Gossip network address for the node -- `tpu: ` - TPU network address for the node -- `rpc: ` - JSON RPC network address for the node, or `null` if the JSON RPC service is not enabled -- `version: ` - The software version of the node, or `null` if the version information is not available -- `featureSet: ` - The unique identifier of the node's feature set -- `shredVersion: ` - The shred version the node has been configured to use - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", "id": 1, - "method": "getClusterNodes" - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": [ - { - "gossip": "10.239.6.48:8001", - "pubkey": "9QzsJf7LPLj8GkXbYT3LFDKqsj2hHG7TA3xinJHu8epQ", - "rpc": "10.239.6.48:8899", - "tpu": "10.239.6.48:8856", - "version": "1.0.0 c375ce1f" - } - ], - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getEpochInfo.mdx b/docs/src/api/methods/_getEpochInfo.mdx deleted file mode 100644 index 4b8cb1c23b6449..00000000000000 --- a/docs/src/api/methods/_getEpochInfo.mdx +++ /dev/null @@ -1,81 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getEpochInfo - -Returns information about the current epoch - - - - - -### Parameters: - - - -Configuration object containing the following fields: - - - - - The minimum slot that the request can be evaluated at - - - - -### Result: - -The result field will be an object with the following fields: - -- `absoluteSlot: ` - the current slot -- `blockHeight: ` - the current block height -- `epoch: ` - the current epoch -- `slotIndex: ` - the current slot relative to the start of the current epoch -- `slotsInEpoch: ` - the number of slots in this epoch -- `transactionCount: ` - total number of transactions processed without error since genesis - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - {"jsonrpc":"2.0","id":1, "method":"getEpochInfo"} -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "absoluteSlot": 166598, - "blockHeight": 166500, - "epoch": 27, - "slotIndex": 2790, - "slotsInEpoch": 8192, - "transactionCount": 22661093 - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getEpochSchedule.mdx b/docs/src/api/methods/_getEpochSchedule.mdx deleted file mode 100644 index 2e11e8a4be0d93..00000000000000 --- a/docs/src/api/methods/_getEpochSchedule.mdx +++ /dev/null @@ -1,67 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getEpochSchedule - -Returns the epoch schedule information from this cluster's genesis config - - - - -### Parameters: - -**None** - -### Result: - -The result field will be an object with the following fields: - -- `slotsPerEpoch: ` - the maximum number of slots in each epoch -- `leaderScheduleSlotOffset: ` - the number of slots before beginning of an epoch to calculate a leader schedule for that epoch -- `warmup: ` - whether epochs start short and grow -- `firstNormalEpoch: ` - first normal-length epoch, log2(slotsPerEpoch) - log2(MINIMUM_SLOTS_PER_EPOCH) -- `firstNormalSlot: ` - MINIMUM_SLOTS_PER_EPOCH \* (2.pow(firstNormalEpoch) - 1) - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc":"2.0","id":1, - "method":"getEpochSchedule" - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "firstNormalEpoch": 8, - "firstNormalSlot": 8160, - "leaderScheduleSlotOffset": 8192, - "slotsPerEpoch": 8192, - "warmup": true - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getFeeForMessage.mdx b/docs/src/api/methods/_getFeeForMessage.mdx deleted file mode 100644 index 85a81f589b0fc9..00000000000000 --- a/docs/src/api/methods/_getFeeForMessage.mdx +++ /dev/null @@ -1,86 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getFeeForMessage - -Get the fee the network will charge for a particular Message - -:::caution -**NEW: This method is only available in solana-core v1.9 or newer. Please use -[getFees](#getFees) for solana-core v1.8** -::: - - - - -### Parameters: - - - Base-64 encoded Message - - - - -Configuration object containing the following fields: - - - - - The minimum slot that the request can be evaluated at - - - - -### Result: - -- `` - Fee corresponding to the message at the specified blockhash - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' -{ - "id":1, - "jsonrpc":"2.0", - "method":"getFeeForMessage", - "params":[ - "AQABAgIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEBAQAA", - { - "commitment":"processed" - } - ] -} -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { "context": { "slot": 5068 }, "value": 5000 }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getFirstAvailableBlock.mdx b/docs/src/api/methods/_getFirstAvailableBlock.mdx deleted file mode 100644 index 97139e17b5bf60..00000000000000 --- a/docs/src/api/methods/_getFirstAvailableBlock.mdx +++ /dev/null @@ -1,50 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getFirstAvailableBlock - -Returns the slot of the lowest confirmed block that has not been purged from the ledger - - - - -### Parameters: - -**None** - -### Result: - -- `` - Slot - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc":"2.0","id":1, - "method":"getFirstAvailableBlock" - } -' -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": 250000, "id": 1 } -``` - - - - diff --git a/docs/src/api/methods/_getGenesisHash.mdx b/docs/src/api/methods/_getGenesisHash.mdx deleted file mode 100644 index 4a7802d8b1b266..00000000000000 --- a/docs/src/api/methods/_getGenesisHash.mdx +++ /dev/null @@ -1,51 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getGenesisHash - -Returns the genesis hash - - - - -### Parameters: - -**None** - -### Result: - -- `` - a Hash as base-58 encoded string - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - {"jsonrpc":"2.0","id":1, "method":"getGenesisHash"} -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": "GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC", - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getHealth.mdx b/docs/src/api/methods/_getHealth.mdx deleted file mode 100644 index ceb30cc40fa04c..00000000000000 --- a/docs/src/api/methods/_getHealth.mdx +++ /dev/null @@ -1,83 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getHealth - -Returns the current health of the node. A healthy node is one that is within -`HEALTH_CHECK_SLOT_DISTANCE` slots of the latest cluster confirmed slot. - - - - -### Parameters: - -**None** - -### Result: - -If the node is healthy: "ok" - -If the node is unhealthy, a JSON RPC error response is returned. The specifics of the error response are **UNSTABLE** and may change in the future - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - {"jsonrpc":"2.0","id":1, "method":"getHealth"} -' -``` - -### Response: - -Healthy Result: - -```json -{ "jsonrpc": "2.0", "result": "ok", "id": 1 } -``` - -Unhealthy Result (generic): - -```json -{ - "jsonrpc": "2.0", - "error": { - "code": -32005, - "message": "Node is unhealthy", - "data": {} - }, - "id": 1 -} -``` - -Unhealthy Result (if additional information is available) - -```json -{ - "jsonrpc": "2.0", - "error": { - "code": -32005, - "message": "Node is behind by 42 slots", - "data": { - "numSlotsBehind": 42 - } - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getHighestSnapshotSlot.mdx b/docs/src/api/methods/_getHighestSnapshotSlot.mdx deleted file mode 100644 index 73e0603bae5848..00000000000000 --- a/docs/src/api/methods/_getHighestSnapshotSlot.mdx +++ /dev/null @@ -1,78 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getHighestSnapshotSlot - -Returns the highest slot information that the node has snapshots for. - -This will find the highest full snapshot slot, and the highest incremental -snapshot slot _based on_ the full snapshot slot, if there is one. - -:::caution -NEW: This method is only available in solana-core v1.9 or newer. Please use -[getSnapshotSlot](/api/http#getsnapshotslot) for solana-core v1.8 -::: - - - - -### Parameters: - -**None** - -### Result: - -When the node has a snapshot, this returns a JSON object with the following fields: - -- `full: ` - Highest full snapshot slot -- `incremental: ` - Highest incremental snapshot slot _based on_ `full` - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - {"jsonrpc":"2.0","id":1,"method":"getHighestSnapshotSlot"} -' -``` - -### Response: - -Result when the node has a snapshot: - -```json -{ - "jsonrpc": "2.0", - "result": { - "full": 100, - "incremental": 110 - }, - "id": 1 -} -``` - -Result when the node has no snapshot: - -```json -{ - "jsonrpc": "2.0", - "error": { "code": -32008, "message": "No snapshot" }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getIdentity.mdx b/docs/src/api/methods/_getIdentity.mdx deleted file mode 100644 index 263ebb28d7f824..00000000000000 --- a/docs/src/api/methods/_getIdentity.mdx +++ /dev/null @@ -1,55 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getIdentity - -Returns the identity pubkey for the current node - - - - -### Parameters: - -**None** - -### Result: - -The result field will be a JSON object with the following fields: - -- `identity` - the identity pubkey of the current node \(as a base-58 encoded string\) - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - {"jsonrpc":"2.0","id":1, "method":"getIdentity"} -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "identity": "2r1F4iWqVcb8M1DbAjQuFpebkQHY9hcVU4WuW2DJBppN" - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getInflationGovernor.mdx b/docs/src/api/methods/_getInflationGovernor.mdx deleted file mode 100644 index 206fa9a60c498a..00000000000000 --- a/docs/src/api/methods/_getInflationGovernor.mdx +++ /dev/null @@ -1,75 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getInflationGovernor - -Returns the current inflation governor - - - - -### Parameters: - - - -Configuration object containing the following fields: - - - - - -### Result: - -The result field will be a JSON object with the following fields: - -- `initial: ` - the initial inflation percentage from time 0 -- `terminal: ` - terminal inflation percentage -- `taper: ` - rate per year at which inflation is lowered. (Rate reduction is derived using the target slot time in genesis config) -- `foundation: ` - percentage of total inflation allocated to the foundation -- `foundationTerm: ` - duration of foundation pool inflation in years - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - {"jsonrpc":"2.0","id":1, "method":"getInflationGovernor"} -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "foundation": 0.05, - "foundationTerm": 7, - "initial": 0.15, - "taper": 0.15, - "terminal": 0.015 - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getInflationRate.mdx b/docs/src/api/methods/_getInflationRate.mdx deleted file mode 100644 index 1cc987aab13c1c..00000000000000 --- a/docs/src/api/methods/_getInflationRate.mdx +++ /dev/null @@ -1,62 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getInflationRate - -Returns the specific inflation values for the current epoch - - - - -### Parameters: - -**None** - -### Result: - -The result field will be a JSON object with the following fields: - -- `total: ` - total inflation -- `validator: ` -inflation allocated to validators -- `foundation: ` - inflation allocated to the foundation -- `epoch: ` - epoch for which these values are valid - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - {"jsonrpc":"2.0","id":1, "method":"getInflationRate"} -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "epoch": 100, - "foundation": 0.001, - "total": 0.149, - "validator": 0.148 - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getInflationReward.mdx b/docs/src/api/methods/_getInflationReward.mdx deleted file mode 100644 index 840a6f520fa34c..00000000000000 --- a/docs/src/api/methods/_getInflationReward.mdx +++ /dev/null @@ -1,101 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getInflationReward - -Returns the inflation / staking reward for a list of addresses for an epoch - - - - -### Parameters: - - - An array of addresses to query, as base-58 encoded strings - - - - -Configuration object containing the following fields: - - - - - An epoch for which the reward occurs. If omitted, the previous epoch will be - used - - - - The minimum slot that the request can be evaluated at - - - - -### Result: - -The result field will be a JSON array with the following fields: - -- `epoch: ` - epoch for which reward occured -- `effectiveSlot: ` - the slot in which the rewards are effective -- `amount: ` - reward amount in lamports -- `postBalance: ` - post balance of the account in lamports -- `commission: ` - vote account commission when the reward was credited - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", - "id": 1, - "method": "getInflationReward", - "params": [ - [ - "6dmNQ5jwLeLk5REvio1JcMshcbvkYMwy26sJ8pbkvStu", - "BGsqMegLpV6n6Ve146sSX2dTjUMj3M92HnU8BbNRMhF2" - ], - {"epoch": 2} - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": [ - { - "amount": 2500, - "effectiveSlot": 224, - "epoch": 2, - "postBalance": 499999442500 - }, - null - ], - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getLargestAccounts.mdx b/docs/src/api/methods/_getLargestAccounts.mdx deleted file mode 100644 index aef3e9f8202c57..00000000000000 --- a/docs/src/api/methods/_getLargestAccounts.mdx +++ /dev/null @@ -1,150 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getLargestAccounts - -Returns the 20 largest accounts, by lamport balance (results may be cached up to two hours) - - - - -### Parameters: - - - -Configuration object containing the following fields: - - - - - filter results by account type - - - - - - -### Result: - -The result will be an RpcResponse JSON object with `value` equal to an array of `` containing: - -- `address: ` - base-58 encoded address of the account -- `lamports: ` - number of lamports in the account, as a u64 - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - {"jsonrpc":"2.0","id":1, "method":"getLargestAccounts"} -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "context": { - "slot": 54 - }, - "value": [ - { - "lamports": 999974, - "address": "99P8ZgtJYe1buSK8JXkvpLh8xPsCFuLYhz9hQFNw93WJ" - }, - { - "lamports": 42, - "address": "uPwWLo16MVehpyWqsLkK3Ka8nLowWvAHbBChqv2FZeL" - }, - { - "lamports": 42, - "address": "aYJCgU7REfu3XF8b3QhkqgqQvLizx8zxuLBHA25PzDS" - }, - { - "lamports": 42, - "address": "CTvHVtQ4gd4gUcw3bdVgZJJqApXE9nCbbbP4VTS5wE1D" - }, - { - "lamports": 20, - "address": "4fq3xJ6kfrh9RkJQsmVd5gNMvJbuSHfErywvEjNQDPxu" - }, - { - "lamports": 4, - "address": "AXJADheGVp9cruP8WYu46oNkRbeASngN5fPCMVGQqNHa" - }, - { - "lamports": 2, - "address": "8NT8yS6LiwNprgW4yM1jPPow7CwRUotddBVkrkWgYp24" - }, - { - "lamports": 1, - "address": "SysvarEpochSchedu1e111111111111111111111111" - }, - { - "lamports": 1, - "address": "11111111111111111111111111111111" - }, - { - "lamports": 1, - "address": "Stake11111111111111111111111111111111111111" - }, - { - "lamports": 1, - "address": "SysvarC1ock11111111111111111111111111111111" - }, - { - "lamports": 1, - "address": "StakeConfig11111111111111111111111111111111" - }, - { - "lamports": 1, - "address": "SysvarRent111111111111111111111111111111111" - }, - { - "lamports": 1, - "address": "Config1111111111111111111111111111111111111" - }, - { - "lamports": 1, - "address": "SysvarStakeHistory1111111111111111111111111" - }, - { - "lamports": 1, - "address": "SysvarRecentB1ockHashes11111111111111111111" - }, - { - "lamports": 1, - "address": "SysvarFees111111111111111111111111111111111" - }, - { - "lamports": 1, - "address": "Vote111111111111111111111111111111111111111" - } - ] - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getLatestBlockhash.mdx b/docs/src/api/methods/_getLatestBlockhash.mdx deleted file mode 100644 index 85724a785c71ec..00000000000000 --- a/docs/src/api/methods/_getLatestBlockhash.mdx +++ /dev/null @@ -1,92 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getLatestBlockhash - -Returns the latest blockhash - -:::caution -NEW: This method is only available in solana-core v1.9 or newer. Please use -[getRecentBlockhash](#getrecentblockhash) for solana-core v1.8 -::: - - - - -### Parameters: - - - -Configuration object containing the following fields: - - - - - The minimum slot that the request can be evaluated at - - - - -### Result: - -`RpcResponse` - RpcResponse JSON object with `value` field set to a JSON object including: - -- `blockhash: ` - a Hash as base-58 encoded string -- `lastValidBlockHeight: ` - last [block height](../../terminology.md#block-height) at which the blockhash will be valid - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "id":1, - "jsonrpc":"2.0", - "method":"getLatestBlockhash", - "params":[ - { - "commitment":"processed" - } - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "context": { - "slot": 2792 - }, - "value": { - "blockhash": "EkSnNWid2cvwEVnVx9aBqawnmiCNiDgp3gUdkDPTKN1N", - "lastValidBlockHeight": 3090 - } - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getLeaderSchedule.mdx b/docs/src/api/methods/_getLeaderSchedule.mdx deleted file mode 100644 index ee6803cb0ed49f..00000000000000 --- a/docs/src/api/methods/_getLeaderSchedule.mdx +++ /dev/null @@ -1,96 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getLeaderSchedule - -Returns the leader schedule for an epoch - - - - -### Parameters: - - -Fetch the leader schedule for the epoch that corresponds to the provided slot. - -
  • If unspecified, the leader schedule for the current epoch is fetched
  • - -
    - - - -Configuration object containing the following fields: - - - - - Only return results for this validator identity (base-58 encoded) - - - - -### Result: - -Returns a result with one of the two following values: - -- `` - if requested epoch is not found, or -- `` - the result field will be a dictionary of validator identities, - as base-58 encoded strings, and their corresponding leader slot indices as values - (indices are relative to the first slot in the requested epoch) - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", - "id": 1, - "method": "getLeaderSchedule", - "params": [ - null, - { - "identity": "4Qkev8aNZcqFNSRhQzwyLMFSsi94jHqE8WNVTJzTP99F" - } - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "4Qkev8aNZcqFNSRhQzwyLMFSsi94jHqE8WNVTJzTP99F": [ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, - 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, - 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - 57, 58, 59, 60, 61, 62, 63 - ] - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getMaxRetransmitSlot.mdx b/docs/src/api/methods/_getMaxRetransmitSlot.mdx deleted file mode 100644 index 17a104750e5cd6..00000000000000 --- a/docs/src/api/methods/_getMaxRetransmitSlot.mdx +++ /dev/null @@ -1,48 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getMaxRetransmitSlot - -Get the max slot seen from retransmit stage. - - - - -### Parameters: - -**None** - -### Result: - -`` - Slot number - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - {"jsonrpc":"2.0","id":1, "method":"getMaxRetransmitSlot"} -' -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": 1234, "id": 1 } -``` - - - - diff --git a/docs/src/api/methods/_getMaxShredInsertSlot.mdx b/docs/src/api/methods/_getMaxShredInsertSlot.mdx deleted file mode 100644 index d776870ed9cc2c..00000000000000 --- a/docs/src/api/methods/_getMaxShredInsertSlot.mdx +++ /dev/null @@ -1,48 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getMaxShredInsertSlot - -Get the max slot seen from after shred insert. - - - - -### Parameters: - -**None** - -### Result: - -`` - Slot number - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - {"jsonrpc":"2.0","id":1, "method":"getMaxShredInsertSlot"} -' -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": 1234, "id": 1 } -``` - - - - diff --git a/docs/src/api/methods/_getMinimumBalanceForRentExemption.mdx b/docs/src/api/methods/_getMinimumBalanceForRentExemption.mdx deleted file mode 100644 index 3f528284413647..00000000000000 --- a/docs/src/api/methods/_getMinimumBalanceForRentExemption.mdx +++ /dev/null @@ -1,67 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getMinimumBalanceForRentExemption - -Returns minimum balance required to make account rent exempt. - - - - -### Parameters: - - - the Account's data length - - - - -Configuration object containing the following fields: - - - - - -### Result: - -`` - minimum lamports required in the Account to remain rent free - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", "id": 1, - "method": "getMinimumBalanceForRentExemption", - "params": [50] - } -' -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": 500, "id": 1 } -``` - - - - diff --git a/docs/src/api/methods/_getMultipleAccounts.mdx b/docs/src/api/methods/_getMultipleAccounts.mdx deleted file mode 100644 index b9c73a2a4d7090..00000000000000 --- a/docs/src/api/methods/_getMultipleAccounts.mdx +++ /dev/null @@ -1,148 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getMultipleAccounts - -Returns the account information for a list of Pubkeys. - - - - -### Parameters: - - - An array of Pubkeys to query, as base-58 encoded strings (up to a maximum of - 100) - - - - -Configuration object containing the following fields: - - - - - The minimum slot that the request can be evaluated at - - - - Request a slice of the account's data. - - - `length: ` - number of bytes to return - - `offset: ` - byte offset from which to start reading - -:::info -Data slicing is only available for base58, base64, or base64+zstd encodings. -::: - - - - -encoding format for the returned Account data - - - -
    - -- `base58` is slow and limited to less than 129 bytes of Account data. -- `base64` will return base64 encoded data for Account data of any size. -- `base64+zstd` compresses the Account data using [Zstandard](https://facebook.github.io/zstd/) - and base64-encodes the result. -- [`jsonParsed` encoding](/api/http#parsed-responses) attempts to use program-specific state parsers to - return more human-readable and explicit account state data. -- If `jsonParsed` is requested but a parser cannot be found, the field falls back to `base64` - encoding, detectable when the `data` field is type ``. - -
    - -
    - -
    - -### Result: - -The result will be a JSON object with `value` equal to an array of: - -- `` - if the account at that Pubkey doesn't exist, or -- `` - a JSON object containing: - - `lamports: ` - number of lamports assigned to this account, as a u64 - - `owner: ` - base-58 encoded Pubkey of the program this account has been assigned to - - `data: <[string, encoding]|object>` - data associated with the account, either as encoded binary data or JSON format `{: }` - depending on encoding parameter - - `executable: ` - boolean indicating if the account contains a program \(and is strictly read-only\) - - `rentEpoch: ` - the epoch at which this account will next owe rent, as u64 - - `size: ` - the data size of the account - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", - "id": 1, - "method": "getMultipleAccounts", - "params": [ - [ - "vines1vzrYbzLMRdu58ou5XTby4qAqVRLmqo36NKPTg", - "4fYNw3dojWmQ4dXtSGE9epjRGy9pFSx62YypT7avPYvA" - ], - { - "encoding": "base58" - } - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "context": { - "slot": 1 - }, - "value": [ - { - "data": ["", "base64"], - "executable": false, - "lamports": 1000000000, - "owner": "11111111111111111111111111111111", - "rentEpoch": 2, - "space": 16 - }, - { - "data": ["", "base64"], - "executable": false, - "lamports": 5000000000, - "owner": "11111111111111111111111111111111", - "rentEpoch": 2, - "space": 0 - } - ] - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getProgramAccounts.mdx b/docs/src/api/methods/_getProgramAccounts.mdx deleted file mode 100644 index 0104a0cfd852e4..00000000000000 --- a/docs/src/api/methods/_getProgramAccounts.mdx +++ /dev/null @@ -1,164 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getProgramAccounts - -Returns all accounts owned by the provided program Pubkey - - - - -### Parameters: - - - Pubkey of program, as base-58 encoded string - - - - -Configuration object containing the following fields: - - - - - The minimum slot that the request can be evaluated at - - - - wrap the result in an RpcResponse JSON object - - - - -encoding format for the returned Account data - - - -
    - -- `base58` is slow and limited to less than 129 bytes of Account data. -- `base64` will return base64 encoded data for Account data of any size. -- `base64+zstd` compresses the Account data using [Zstandard](https://facebook.github.io/zstd/) and - base64-encodes the result. -- [`jsonParsed` encoding](/api/http#parsed-responses) attempts to use program-specific state - parsers to return more human-readable and explicit account state data. -- If `jsonParsed` is requested but a parser cannot be found, the field falls back - to `base64` encoding, detectable when the `data` field is type ``. - -
    - -
    - - - Request a slice of the account's data. - - - `length: ` - number of bytes to return - - `offset: ` - byte offset from which to start reading - -:::info -Data slicing is only available for base58, base64, or base64+zstd encodings. -::: - - - - -filter results using up to 4 filter objects - -:::info -The resultant account(s) must meet **ALL** filter criteria to be included in the returned results -::: - - - -
    - -### Result: - -By default, the result field will be an array of JSON objects. - -:::info -If `withContext` flag is set the array will be wrapped in an RpcResponse JSON object. -::: - -The resultant response array will contain: - -- `pubkey: ` - the account Pubkey as base-58 encoded string -- `account: ` - a JSON object, with the following sub fields: - - `lamports: ` - number of lamports assigned to this account, as a u64 - - `owner: ` - base-58 encoded Pubkey of the program this account has been assigned to - - `data: <[string,encoding]|object>` - data associated with the account, either as encoded binary data or JSON format `{: }` - depending on encoding parameter - - `executable: ` - boolean indicating if the account contains a program \(and is strictly read-only\) - - `rentEpoch: ` - the epoch at which this account will next owe rent, as u64 - - `size: ` - the data size of the account - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", - "id": 1, - "method": "getProgramAccounts", - "params": [ - "4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T", - { - "filters": [ - { - "dataSize": 17 - }, - { - "memcmp": { - "offset": 4, - "bytes": "3Mc6vR" - } - } - ] - } - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": [ - { - "account": { - "data": "2R9jLfiAQ9bgdcw6h8s44439", - "executable": false, - "lamports": 15298080, - "owner": "4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T", - "rentEpoch": 28, - "space": 42 - }, - "pubkey": "CxELquR1gPP8wHe33gZ4QxqGB3sZ9RSwsJ2KshVewkFY" - } - ], - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getRecentPerformanceSamples.mdx b/docs/src/api/methods/_getRecentPerformanceSamples.mdx deleted file mode 100644 index 9b07a1322e3e12..00000000000000 --- a/docs/src/api/methods/_getRecentPerformanceSamples.mdx +++ /dev/null @@ -1,103 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getRecentPerformanceSamples - -Returns a list of recent performance samples, in reverse slot order. Performance samples are taken every 60 seconds and -include the number of transactions and slots that occur in a given time window. - - - - -### Parameters: - - - -number of samples to return (maximum 720) - - - -### Result: - -An array of `RpcPerfSample` with the following fields: - -- `slot: ` - Slot in which sample was taken at -- `numTransactions: ` - Number of transactions processed during the sample period -- `numSlots: ` - Number of slots completed during the sample period -- `samplePeriodSecs: ` - Number of seconds in a sample window -- `numNonVoteTransaction: ` - Number of non-vote transactions processed during the - sample period. - -:::info -`numNonVoteTransaction` is present starting with v1.15. - -To get a number of voting transactions compute:
    -`numTransactions - numNonVoteTransaction` -::: - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc":"2.0", "id":1, - "method": "getRecentPerformanceSamples", - "params": [4]} -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": [ - { - "numSlots": 126, - "numTransactions": 126, - "numNonVoteTransaction": 1, - "samplePeriodSecs": 60, - "slot": 348125 - }, - { - "numSlots": 126, - "numTransactions": 126, - "numNonVoteTransaction": 1, - "samplePeriodSecs": 60, - "slot": 347999 - }, - { - "numSlots": 125, - "numTransactions": 125, - "numNonVoteTransaction": 0, - "samplePeriodSecs": 60, - "slot": 347873 - }, - { - "numSlots": 125, - "numTransactions": 125, - "numNonVoteTransaction": 0, - "samplePeriodSecs": 60, - "slot": 347748 - } - ], - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getRecentPrioritizationFees.mdx b/docs/src/api/methods/_getRecentPrioritizationFees.mdx deleted file mode 100644 index 2c88d512601b4e..00000000000000 --- a/docs/src/api/methods/_getRecentPrioritizationFees.mdx +++ /dev/null @@ -1,95 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getRecentPrioritizationFees - -Returns a list of prioritization fees from recent blocks. - -:::info -Currently, a node's prioritization-fee cache stores data from up to 150 blocks. -::: - - - - -### Parameters: - - - -An array of Account addresses (up to a maximum of 128 addresses), as base-58 encoded strings - -:::note -If this parameter is provided, the response will reflect a fee to land a transaction locking all of the provided accounts as writable. -::: - - - -### Result: - -An array of `RpcPrioritizationFee` with the following fields: - -- `slot: ` - slot in which the fee was observed -- `prioritizationFee: ` - the per-compute-unit fee paid by at least - one successfully landed transaction, specified in increments of micro-lamports (0.000001 lamports) - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc":"2.0", "id":1, - "method": "getRecentPrioritizationFees", - "params": [ - ["CxELquR1gPP8wHe33gZ4QxqGB3sZ9RSwsJ2KshVewkFY"] - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": [ - { - "slot": 348125, - "prioritizationFee": 0 - }, - { - "slot": 348126, - "prioritizationFee": 1000 - }, - { - "slot": 348127, - "prioritizationFee": 500 - }, - { - "slot": 348128, - "prioritizationFee": 0 - }, - { - "slot": 348129, - "prioritizationFee": 1234 - } - ], - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getSignatureStatuses.mdx b/docs/src/api/methods/_getSignatureStatuses.mdx deleted file mode 100644 index 9617d7d228fa12..00000000000000 --- a/docs/src/api/methods/_getSignatureStatuses.mdx +++ /dev/null @@ -1,114 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getSignatureStatuses - -Returns the statuses of a list of signatures. Each signature must be a [txid](/terminology#transaction-id), the first signature of a transaction. - -:::info -Unless the `searchTransactionHistory` configuration parameter is included, -this method only searches the recent status cache of signatures, which -retains statuses for all active slots plus `MAX_RECENT_BLOCKHASHES` rooted slots. -::: - - - - -### Parameters: - - - An array of transaction signatures to confirm, as base-58 encoded strings (up - to a maximum of 256) - - - - -Configuration object containing the following fields: - - - -if `true` - a Solana node will search its ledger cache for any signatures not -found in the recent status cache - - - - - -### Result: - -An array of `RpcResponse` consisting of either: - -- `` - Unknown transaction, or -- `` - - `slot: ` - The slot the transaction was processed - - `confirmations: ` - Number of blocks since signature confirmation, null if rooted, as well as finalized by a supermajority of the cluster - - `err: ` - Error if transaction failed, null if transaction succeeded. - See [TransactionError definitions](https://github.com/solana-labs/solana/blob/c0c60386544ec9a9ec7119229f37386d9f070523/sdk/src/transaction/error.rs#L13) - - `confirmationStatus: ` - The transaction's cluster confirmation status; - Either `processed`, `confirmed`, or `finalized`. See [Commitment](/api/http#configuring-state-commitment) for more on optimistic confirmation. - - DEPRECATED: `status: ` - Transaction status - - `"Ok": ` - Transaction was successful - - `"Err": ` - Transaction failed with TransactionError - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", - "id": 1, - "method": "getSignatureStatuses", - "params": [ - [ - "5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW" - ], - { - "searchTransactionHistory": true - } - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "context": { - "slot": 82 - }, - "value": [ - { - "slot": 48, - "confirmations": null, - "err": null, - "status": { - "Ok": null - }, - "confirmationStatus": "finalized" - }, - null - ] - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getSignaturesForAddress.mdx b/docs/src/api/methods/_getSignaturesForAddress.mdx deleted file mode 100644 index dc0517058d34c7..00000000000000 --- a/docs/src/api/methods/_getSignaturesForAddress.mdx +++ /dev/null @@ -1,117 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getSignaturesForAddress - -Returns signatures for confirmed transactions that include the given address in -their `accountKeys` list. Returns signatures backwards in time from the -provided signature or most recent confirmed block - - - - -### Parameters: - - - Account address as base-58 encoded string - - - - -Configuration object containing the following fields: - - - - - The minimum slot that the request can be evaluated at - - - - maximum transaction signatures to return (between 1 and 1,000). - - - - start searching backwards from this transaction signature. If not provided the - search starts from the top of the highest max confirmed block. - - - - search until this transaction signature, if found before limit reached - - - - -### Result: - -An array of ``, ordered from **newest** to **oldest** transaction, containing transaction -signature information with the following fields: - -- `signature: ` - transaction signature as base-58 encoded string -- `slot: ` - The slot that contains the block with the transaction -- `err: ` - Error if transaction failed, null if transaction succeeded. - See [TransactionError definitions](https://github.com/solana-labs/solana/blob/c0c60386544ec9a9ec7119229f37386d9f070523/sdk/src/transaction/error.rs#L13) - for more info. -- `memo: ` - Memo associated with the transaction, null if no memo is present -- `blockTime: ` - estimated production time, as Unix timestamp (seconds since the Unix epoch) - of when transaction was processed. null if not available. -- `confirmationStatus: ` - The transaction's cluster confirmation status; - Either `processed`, `confirmed`, or `finalized`. See [Commitment](/api/http#configuring-state-commitment) - for more on optimistic confirmation. - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", - "id": 1, - "method": "getSignaturesForAddress", - "params": [ - "Vote111111111111111111111111111111111111111", - { - "limit": 1 - } - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": [ - { - "err": null, - "memo": null, - "signature": "5h6xBEauJ3PK6SWCZ1PGjBvj8vDdWG3KpwATGy1ARAXFSDwt8GFXM7W5Ncn16wmqokgpiKRLuS83KUxyZyv2sUYv", - "slot": 114, - "blockTime": null - } - ], - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getSlot.mdx b/docs/src/api/methods/_getSlot.mdx deleted file mode 100644 index 8693f8f3bb038e..00000000000000 --- a/docs/src/api/methods/_getSlot.mdx +++ /dev/null @@ -1,63 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getSlot - -Returns the slot that has reached the [given or default commitment level](/api/http#configuring-state-commitment) - - - - -### Parameters: - - - -Configuration object containing the following fields: - - - - - The minimum slot that the request can be evaluated at - - - - -### Result: - -`` - Current slot - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - {"jsonrpc":"2.0","id":1, "method":"getSlot"} -' -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": 1234, "id": 1 } -``` - - - - diff --git a/docs/src/api/methods/_getSlotLeader.mdx b/docs/src/api/methods/_getSlotLeader.mdx deleted file mode 100644 index 7f8550ee1b8bab..00000000000000 --- a/docs/src/api/methods/_getSlotLeader.mdx +++ /dev/null @@ -1,67 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getSlotLeader - -Returns the current slot leader - - - - -### Parameters: - - - -Configuration object containing the following fields: - - - - - The minimum slot that the request can be evaluated at - - - - -### Result: - -`` - Node identity Pubkey as base-58 encoded string - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - {"jsonrpc":"2.0","id":1, "method":"getSlotLeader"} -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": "ENvAW7JScgYq6o4zKZwewtkzzJgDzuJAFxYasvmEQdpS", - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getSlotLeaders.mdx b/docs/src/api/methods/_getSlotLeaders.mdx deleted file mode 100644 index 386a74fb44de94..00000000000000 --- a/docs/src/api/methods/_getSlotLeaders.mdx +++ /dev/null @@ -1,77 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getSlotLeaders - -Returns the slot leaders for a given slot range - - - - -### Parameters: - - - Start slot, as u64 integer - - - - Limit, as u64 integer (between 1 and 5,000) - - -### Result: - -`` - array of Node identity public keys as base-58 encoded strings - - - - - -### Code sample: - -If the current slot is `#99` - query the next `10` leaders with the following request: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc":"2.0", "id": 1, - "method": "getSlotLeaders", - "params": [100, 10] - } -' -``` - -### Response: - -The first leader returned is the leader for slot `#100`: - -```json -{ - "jsonrpc": "2.0", - "result": [ - "ChorusmmK7i1AxXeiTtQgQZhQNiXYU84ULeaYF1EH15n", - "ChorusmmK7i1AxXeiTtQgQZhQNiXYU84ULeaYF1EH15n", - "ChorusmmK7i1AxXeiTtQgQZhQNiXYU84ULeaYF1EH15n", - "ChorusmmK7i1AxXeiTtQgQZhQNiXYU84ULeaYF1EH15n", - "Awes4Tr6TX8JDzEhCZY2QVNimT6iD1zWHzf1vNyGvpLM", - "Awes4Tr6TX8JDzEhCZY2QVNimT6iD1zWHzf1vNyGvpLM", - "Awes4Tr6TX8JDzEhCZY2QVNimT6iD1zWHzf1vNyGvpLM", - "Awes4Tr6TX8JDzEhCZY2QVNimT6iD1zWHzf1vNyGvpLM", - "DWvDTSh3qfn88UoQTEKRV2JnLt5jtJAVoiCo3ivtMwXP", - "DWvDTSh3qfn88UoQTEKRV2JnLt5jtJAVoiCo3ivtMwXP" - ], - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getStakeActivation.mdx b/docs/src/api/methods/_getStakeActivation.mdx deleted file mode 100644 index 28b2d8a81438b2..00000000000000 --- a/docs/src/api/methods/_getStakeActivation.mdx +++ /dev/null @@ -1,96 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getStakeActivation - -Returns epoch activation information for a stake account - - - - -### Parameters: - - - Pubkey of stake Account to query, as base-58 encoded string - - - - -Configuration object containing the following fields: - - - - - The minimum slot that the request can be evaluated at - - - - epoch for which to calculate activation details. If parameter not provided, - defaults to current epoch. - **DEPRECATED**, inputs other than the current epoch return an error. - - - - -### Result: - -The result will be a JSON object with the following fields: - -- `state: ` - the stake account's activation state, - either: `active`, `inactive`, `activating`, or `deactivating` -- `active: ` - stake active during the epoch -- `inactive: ` - stake inactive during the epoch - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", - "id": 1, - "method": "getStakeActivation", - "params": [ - "CYRJWqiSjLitBAcRxPvWpgX3s5TvmN2SuRY3eEYypFvT", - { - "epoch": 4 - } - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "active": 124429280, - "inactive": 73287840, - "state": "activating" - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getStakeMinimumDelegation.mdx b/docs/src/api/methods/_getStakeMinimumDelegation.mdx deleted file mode 100644 index 94e01ac87052e9..00000000000000 --- a/docs/src/api/methods/_getStakeMinimumDelegation.mdx +++ /dev/null @@ -1,73 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getStakeMinimumDelegation - -Returns the stake minimum delegation, in lamports. - - - - -### Parameters: - - - -Configuration object containing the following fields: - - - - - -### Result: - -The result will be an RpcResponse JSON object with `value` equal to: - -- `` - The stake minimum delegation, in lamports - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc":"2.0", "id":1, - "method": "getStakeMinimumDelegation" - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "context": { - "slot": 501 - }, - "value": 1000000000 - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getSupply.mdx b/docs/src/api/methods/_getSupply.mdx deleted file mode 100644 index a1d8915a1b87d1..00000000000000 --- a/docs/src/api/methods/_getSupply.mdx +++ /dev/null @@ -1,87 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getSupply - -Returns information about the current supply. - - - - -### Parameters: - - - -Configuration object containing the following fields: - - - - - exclude non circulating accounts list from response - - - - -### Result: - -The result will be an RpcResponse JSON object with `value` equal to a JSON object containing: - -- `total: ` - Total supply in lamports -- `circulating: ` - Circulating supply in lamports -- `nonCirculating: ` - Non-circulating supply in lamports -- `nonCirculatingAccounts: ` - an array of account addresses of non-circulating accounts, as strings. If `excludeNonCirculatingAccountsList` is enabled, the returned array will be empty. - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - {"jsonrpc":"2.0", "id":1, "method":"getSupply"} -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "context": { - "slot": 1114 - }, - "value": { - "circulating": 16000, - "nonCirculating": 1000000, - "nonCirculatingAccounts": [ - "FEy8pTbP5fEoqMV1GdTz83byuA8EKByqYat1PKDgVAq5", - "9huDUZfxoJ7wGMTffUE7vh1xePqef7gyrLJu9NApncqA", - "3mi1GmwEE3zo2jmfDuzvjSX9ovRXsDUKHvsntpkhuLJ9", - "BYxEJTDerkaRWBem3XgnVcdhppktBXa2HbkHPKj2Ui4Z" - ], - "total": 1016000 - } - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getTokenAccountBalance.mdx b/docs/src/api/methods/_getTokenAccountBalance.mdx deleted file mode 100644 index e0cb785f7d6038..00000000000000 --- a/docs/src/api/methods/_getTokenAccountBalance.mdx +++ /dev/null @@ -1,91 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getTokenAccountBalance - -Returns the token balance of an SPL Token account. - - - - -### Parameters: - - - Pubkey of Token account to query, as base-58 encoded string - - - - -Configuration object containing the following fields: - - - - - -### Result: - -The result will be an RpcResponse JSON object with `value` equal to a JSON object containing: - -- `amount: ` - the raw balance without decimals, a string representation of u64 -- `decimals: ` - number of base 10 digits to the right of the decimal place -- `uiAmount: ` - the balance, using mint-prescribed decimals **DEPRECATED** -- `uiAmountString: ` - the balance as a string, using mint-prescribed decimals - -For more details on returned data, the [Token Balances Structure](#token-balances-structure) -response from [getBlock](#getblock) follows a similar structure. - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", "id": 1, - "method": "getTokenAccountBalance", - "params": [ - "7fUAJdStEuGbc3sM84cKRL6yYaaSstyLSU4ve5oovLS7" - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "context": { - "slot": 1114 - }, - "value": { - "amount": "9864", - "decimals": 2, - "uiAmount": 98.64, - "uiAmountString": "98.64" - }, - "id": 1 - } -} -``` - - - - diff --git a/docs/src/api/methods/_getTokenAccountsByDelegate.mdx b/docs/src/api/methods/_getTokenAccountsByDelegate.mdx deleted file mode 100644 index 7125724573339d..00000000000000 --- a/docs/src/api/methods/_getTokenAccountsByDelegate.mdx +++ /dev/null @@ -1,180 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getTokenAccountsByDelegate - -Returns all SPL Token accounts by approved Delegate. - - - - -### Parameters: - - - Pubkey of account delegate to query, as base-58 encoded string - - - - -A JSON object with one of the following fields: - -- `mint: ` - Pubkey of the specific token Mint to limit accounts to, as base-58 encoded string; or -- `programId: ` - Pubkey of the Token program that owns the accounts, as base-58 encoded string - - - - - -Configuration object containing the following fields: - - - - - The minimum slot that the request can be evaluated at - - - - Request a slice of the account's data. - - - `length: ` - number of bytes to return - - `offset: ` - byte offset from which to start reading - -:::info -Data slicing is only available for base58, base64, or base64+zstd encodings. -::: - - - - -Encoding format for Account data - - - -
    - -- `base58` is slow and limited to less than 129 bytes of Account data. -- `base64` will return base64 encoded data for Account data of any size. -- `base64+zstd` compresses the Account data using [Zstandard](https://facebook.github.io/zstd/) - and base64-encodes the result. -- `jsonParsed` encoding attempts to use program-specific state parsers to return - more human-readable and explicit account state data. -- If `jsonParsed` is requested but a parser cannot be found, the field falls - back to `base64` encoding, detectable when the `data` field is type `string`. - -
    - -
    - -
    - -### Result: - -The result will be an RpcResponse JSON object with `value` equal to an array of JSON objects, which will contain: - -- `pubkey: ` - the account Pubkey as base-58 encoded string -- `account: ` - a JSON object, with the following sub fields: - - `lamports: ` - number of lamports assigned to this account, as a u64 - - `owner: ` - base-58 encoded Pubkey of the program this account has been assigned to - - `data: ` - Token state data associated with the account, either as encoded binary data or in JSON format `{: }` - - `executable: ` - boolean indicating if the account contains a program (and is strictly read-only\) - - `rentEpoch: ` - the epoch at which this account will next owe rent, as u64 - - `size: ` - the data size of the account - -When the data is requested with the `jsonParsed` encoding a format similar to that of the -[Token Balances Structure](#token-balances-structure) can be expected inside the structure, -both for the `tokenAmount` and the `delegatedAmount` - with the latter being an optional object. - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", - "id": 1, - "method": "getTokenAccountsByDelegate", - "params": [ - "4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T", - { - "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA" - }, - { - "encoding": "jsonParsed" - } - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "context": { - "slot": 1114 - }, - "value": [ - { - "account": { - "data": { - "program": "spl-token", - "parsed": { - "info": { - "tokenAmount": { - "amount": "1", - "decimals": 1, - "uiAmount": 0.1, - "uiAmountString": "0.1" - }, - "delegate": "4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T", - "delegatedAmount": { - "amount": "1", - "decimals": 1, - "uiAmount": 0.1, - "uiAmountString": "0.1" - }, - "state": "initialized", - "isNative": false, - "mint": "3wyAj7Rt1TWVPZVteFJPLa26JmLvdb1CAKEFZm3NY75E", - "owner": "CnPoSPKXu7wJqxe59Fs72tkBeALovhsCxYeFwPCQH9TD" - }, - "type": "account" - }, - "space": 165 - }, - "executable": false, - "lamports": 1726080, - "owner": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", - "rentEpoch": 4, - "space": 165 - }, - "pubkey": "28YTZEwqtMHWrhWcvv34se7pjS7wctgqzCPB3gReCFKp" - } - ] - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getTokenAccountsByOwner.mdx b/docs/src/api/methods/_getTokenAccountsByOwner.mdx deleted file mode 100644 index 27aa971189b8f2..00000000000000 --- a/docs/src/api/methods/_getTokenAccountsByOwner.mdx +++ /dev/null @@ -1,179 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getTokenAccountsByOwner - -Returns all SPL Token accounts by token owner. - - - - -### Parameters: - - - Pubkey of account delegate to query, as base-58 encoded string - - - - -A JSON object with one of the following fields: - -- `mint: ` - Pubkey of the specific token Mint to limit accounts to, as base-58 encoded string; or -- `programId: ` - Pubkey of the Token program that owns the accounts, as base-58 encoded string - - - - - -Configuration object containing the following fields: - - - - - The minimum slot that the request can be evaluated at - - - - Request a slice of the account's data. - - - `length: ` - number of bytes to return - - `offset: ` - byte offset from which to start reading - -:::info -Data slicing is only available for base58, base64, or base64+zstd encodings. -::: - - - - -Encoding format for Account data - - - -
    - -- `base58` is slow and limited to less than 129 bytes of Account data. -- `base64` will return base64 encoded data for Account data of any size. -- `base64+zstd` compresses the Account data using [Zstandard](https://facebook.github.io/zstd/) - and base64-encodes the result. -- `jsonParsed` encoding attempts to use program-specific state parsers to return - more human-readable and explicit account state data. -- If `jsonParsed` is requested but a parser cannot be found, the field falls - back to `base64` encoding, detectable when the `data` field is type `string`. - -
    - -
    - -
    - -### Result: - -The result will be an RpcResponse JSON object with `value` equal to an array of JSON objects, which will contain: - -- `pubkey: ` - the account Pubkey as base-58 encoded string -- `account: ` - a JSON object, with the following sub fields: - - `lamports: ` - number of lamports assigned to this account, as a u64 - - `owner: ` - base-58 encoded Pubkey of the program this account has been assigned to - - `data: ` - Token state data associated with the account, either as encoded binary data or in JSON format `{: }` - - `executable: ` - boolean indicating if the account contains a program \(and is strictly read-only\) - - `rentEpoch: ` - the epoch at which this account will next owe rent, as u64 - - `size: ` - the data size of the account - -When the data is requested with the `jsonParsed` encoding a format similar to that of the [Token Balances Structure](/api/http#token-balances-structure) can be expected inside the structure, both for the `tokenAmount` and the `delegatedAmount` - with the latter being an optional object. - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", - "id": 1, - "method": "getTokenAccountsByOwner", - "params": [ - "4Qkev8aNZcqFNSRhQzwyLMFSsi94jHqE8WNVTJzTP99F", - { - "mint": "3wyAj7Rt1TWVPZVteFJPLa26JmLvdb1CAKEFZm3NY75E" - }, - { - "encoding": "jsonParsed" - } - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "context": { - "slot": 1114 - }, - "value": [ - { - "account": { - "data": { - "program": "spl-token", - "parsed": { - "accountType": "account", - "info": { - "tokenAmount": { - "amount": "1", - "decimals": 1, - "uiAmount": 0.1, - "uiAmountString": "0.1" - }, - "delegate": "4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T", - "delegatedAmount": { - "amount": "1", - "decimals": 1, - "uiAmount": 0.1, - "uiAmountString": "0.1" - }, - "state": "initialized", - "isNative": false, - "mint": "3wyAj7Rt1TWVPZVteFJPLa26JmLvdb1CAKEFZm3NY75E", - "owner": "4Qkev8aNZcqFNSRhQzwyLMFSsi94jHqE8WNVTJzTP99F" - }, - "type": "account" - }, - "space": 165 - }, - "executable": false, - "lamports": 1726080, - "owner": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", - "rentEpoch": 4, - "space": 165 - }, - "pubkey": "C2gJg6tKpQs41PRS1nC8aw3ZKNZK3HQQZGVrDFDup5nx" - } - ] - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getTokenLargestAccounts.mdx b/docs/src/api/methods/_getTokenLargestAccounts.mdx deleted file mode 100644 index 387f00ad2e1bd4..00000000000000 --- a/docs/src/api/methods/_getTokenLargestAccounts.mdx +++ /dev/null @@ -1,99 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getTokenLargestAccounts - -Returns the 20 largest accounts of a particular SPL Token type. - - - - -### Parameters: - - - Pubkey of the token Mint to query, as base-58 encoded string - - - - -Configuration object containing the following fields: - - - - - -### Result: - -The result will be an RpcResponse JSON object with `value` equal to an array of JSON objects containing: - -- `address: ` - the address of the token account -- `amount: ` - the raw token account balance without decimals, a string representation of u64 -- `decimals: ` - number of base 10 digits to the right of the decimal place -- `uiAmount: ` - the token account balance, using mint-prescribed decimals **DEPRECATED** -- `uiAmountString: ` - the token account balance as a string, using mint-prescribed decimals - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", "id": 1, - "method": "getTokenLargestAccounts", - "params": [ - "3wyAj7Rt1TWVPZVteFJPLa26JmLvdb1CAKEFZm3NY75E" - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "context": { - "slot": 1114 - }, - "value": [ - { - "address": "FYjHNoFtSQ5uijKrZFyYAxvEr87hsKXkXcxkcmkBAf4r", - "amount": "771", - "decimals": 2, - "uiAmount": 7.71, - "uiAmountString": "7.71" - }, - { - "address": "BnsywxTcaYeNUtzrPxQUvzAWxfzZe3ZLUJ4wMMuLESnu", - "amount": "229", - "decimals": 2, - "uiAmount": 2.29, - "uiAmountString": "2.29" - } - ] - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getTokenSupply.mdx b/docs/src/api/methods/_getTokenSupply.mdx deleted file mode 100644 index af42feee0683ac..00000000000000 --- a/docs/src/api/methods/_getTokenSupply.mdx +++ /dev/null @@ -1,88 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getTokenSupply - -Returns the total supply of an SPL Token type. - - - - -### Parameters: - - - Pubkey of the token Mint to query, as base-58 encoded string - - - - -Configuration object containing the following fields: - - - - - -### Result: - -The result will be an RpcResponse JSON object with `value` equal to a JSON object containing: - -- `amount: ` - the raw total token supply without decimals, a string representation of u64 -- `decimals: ` - number of base 10 digits to the right of the decimal place -- `uiAmount: ` - the total token supply, using mint-prescribed decimals **DEPRECATED** -- `uiAmountString: ` - the total token supply as a string, using mint-prescribed decimals - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", "id": 1, - "method": "getTokenSupply", - "params": [ - "3wyAj7Rt1TWVPZVteFJPLa26JmLvdb1CAKEFZm3NY75E" - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "context": { - "slot": 1114 - }, - "value": { - "amount": "100000", - "decimals": 2, - "uiAmount": 1000, - "uiAmountString": "1000" - } - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getTransaction.mdx b/docs/src/api/methods/_getTransaction.mdx deleted file mode 100644 index 8a70a3219b1414..00000000000000 --- a/docs/src/api/methods/_getTransaction.mdx +++ /dev/null @@ -1,172 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getTransaction - -Returns transaction details for a confirmed transaction - - - - -### Parameters: - - - Transaction signature, as base-58 encoded string - - - - -Configuration object containing the following fields: - - - - - Set the max transaction version to return in responses. If the requested - transaction is a higher version, an error will be returned. If this parameter - is omitted, only legacy transactions will be returned, and any versioned - transaction will prompt the error. - - - - -Encoding for the returned Transaction - - - -
    - -- `jsonParsed` encoding attempts to use program-specific state parsers to return - more human-readable and explicit data in the `transaction.message.instructions` list. -- If `jsonParsed` is requested but a parser cannot be found, the instruction - falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` fields). - -
    - -
    - -
    - -### Result: - -- `` - if transaction is not found or not confirmed -- `` - if transaction is confirmed, an object with the following fields: - - `slot: ` - the slot this transaction was processed in - - `transaction: ` - [Transaction](#transaction-structure) object, either in JSON format or encoded binary data, depending on encoding parameter - - `blockTime: ` - estimated production time, as Unix timestamp (seconds since the Unix epoch) of when the transaction was processed. null if not available - - `meta: ` - transaction status metadata object: - - `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://docs.rs/solana-sdk/VERSION_FOR_DOCS_RS/solana_sdk/transaction/enum.TransactionError.html) - - `fee: ` - fee this transaction was charged, as u64 integer - - `preBalances: ` - array of u64 account balances from before the transaction was processed - - `postBalances: ` - array of u64 account balances after the transaction was processed - - `innerInstructions: ` - List of [inner instructions](#inner-instructions-structure) or `null` if inner instruction recording was not enabled during this transaction - - `preTokenBalances: ` - List of [token balances](#token-balances-structure) from before the transaction was processed or omitted if token balance recording was not yet enabled during this transaction - - `postTokenBalances: ` - List of [token balances](#token-balances-structure) from after the transaction was processed or omitted if token balance recording was not yet enabled during this transaction - - `logMessages: ` - array of string log messages or `null` if log message recording was not enabled during this transaction - - DEPRECATED: `status: ` - Transaction status - - `"Ok": ` - Transaction was successful - - `"Err": ` - Transaction failed with TransactionError - - `rewards: ` - transaction-level rewards, populated if rewards are requested; an array of JSON objects containing: - - `pubkey: ` - The public key, as base-58 encoded string, of the account that received the reward - - `lamports: `- number of reward lamports credited or debited by the account, as a i64 - - `postBalance: ` - account balance in lamports after the reward was applied - - `rewardType: ` - type of reward: currently only "rent", other types may be added in the future - - `commission: ` - vote account commission when the reward was credited, only present for voting and staking rewards - - `loadedAddresses: ` - Transaction addresses loaded from address lookup tables. Undefined if `maxSupportedTransactionVersion` is not set in request params, or if `jsonParsed` encoding is set in request params. - - `writable: ` - Ordered list of base-58 encoded addresses for writable loaded accounts - - `readonly: ` - Ordered list of base-58 encoded addresses for readonly loaded accounts - - `returnData: ` - the most-recent return data generated by an instruction in the transaction, with the following fields: - - `programId: ` - the program that generated the return data, as base-58 encoded Pubkey - - `data: <[string, encoding]>` - the return data itself, as base-64 encoded binary data - - `computeUnitsConsumed: ` - number of [compute units](developing/programming-model/runtime.md#compute-budget) consumed by the transaction - - `version: <"legacy"|number|undefined>` - Transaction version. Undefined if `maxSupportedTransactionVersion` is not set in request params. - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", - "id": 1, - "method": "getTransaction", - "params": [ - "2nBhEBYYvfaAe16UMNqRHre4YNSskvuYgx3M6E4JP1oDYvZEJHvoPzyUidNgNX5r9sTyN1J9UxtbCXy2rqYcuyuv", - "json" - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "meta": { - "err": null, - "fee": 5000, - "innerInstructions": [], - "postBalances": [499998932500, 26858640, 1, 1, 1], - "postTokenBalances": [], - "preBalances": [499998937500, 26858640, 1, 1, 1], - "preTokenBalances": [], - "rewards": [], - "status": { - "Ok": null - } - }, - "slot": 430, - "transaction": { - "message": { - "accountKeys": [ - "3UVYmECPPMZSCqWKfENfuoTv51fTDTWicX9xmBD2euKe", - "AjozzgE83A3x1sHNUR64hfH7zaEBWeMaFuAN9kQgujrc", - "SysvarS1otHashes111111111111111111111111111", - "SysvarC1ock11111111111111111111111111111111", - "Vote111111111111111111111111111111111111111" - ], - "header": { - "numReadonlySignedAccounts": 0, - "numReadonlyUnsignedAccounts": 3, - "numRequiredSignatures": 1 - }, - "instructions": [ - { - "accounts": [1, 2, 3, 0], - "data": "37u9WtQpcm6ULa3WRQHmj49EPs4if7o9f1jSRVZpm2dvihR9C8jY4NqEwXUbLwx15HBSNcP1", - "programIdIndex": 4 - } - ], - "recentBlockhash": "mfcyqEXB3DnHXki6KjjmZck6YjmZLvpAByy2fj4nh6B" - }, - "signatures": [ - "2nBhEBYYvfaAe16UMNqRHre4YNSskvuYgx3M6E4JP1oDYvZEJHvoPzyUidNgNX5r9sTyN1J9UxtbCXy2rqYcuyuv" - ] - } - }, - "blockTime": null, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_getTransactionCount.mdx b/docs/src/api/methods/_getTransactionCount.mdx deleted file mode 100644 index 3a966a1f66dbf2..00000000000000 --- a/docs/src/api/methods/_getTransactionCount.mdx +++ /dev/null @@ -1,63 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getTransactionCount - -Returns the current Transaction count from the ledger - - - - -### Parameters: - - - -Configuration object containing the following fields: - - - - - The minimum slot that the request can be evaluated at - - - - -### Result: - -`` - the current Transaction count from the ledger - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - {"jsonrpc":"2.0","id":1, "method":"getTransactionCount"} -' -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": 268, "id": 1 } -``` - - - - diff --git a/docs/src/api/methods/_getVersion.mdx b/docs/src/api/methods/_getVersion.mdx deleted file mode 100644 index a5e9bc2f0e5ce0..00000000000000 --- a/docs/src/api/methods/_getVersion.mdx +++ /dev/null @@ -1,51 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getVersion - -Returns the current Solana version running on the node - - - - -### Parameters: - -**None** - -### Result: - -The result field will be a JSON object with the following fields: - -- `solana-core` - software version of solana-core as a `string` -- `feature-set` - unique identifier of the current software's feature set as a `u32` - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - {"jsonrpc":"2.0","id":1, "method":"getVersion"} -' -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": { "feature-set": 2891131721, "solana-core": "1.16.7" }, "id": 1 } -``` - - - - diff --git a/docs/src/api/methods/_getVoteAccounts.mdx b/docs/src/api/methods/_getVoteAccounts.mdx deleted file mode 100644 index 04a57a3aa42207..00000000000000 --- a/docs/src/api/methods/_getVoteAccounts.mdx +++ /dev/null @@ -1,114 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## getVoteAccounts - -Returns the account info and associated stake for all the voting accounts in the current bank. - - - - -### Parameters: - - - -Configuration object containing the following fields: - - - - - Only return results for this validator vote address (base-58 encoded) - - - - Do not filter out delinquent validators with no stake - - - - Specify the number of slots behind the tip that a validator must fall to be - considered delinquent. **NOTE:** For the sake of consistency between ecosystem - products, _it is **not** recommended that this argument be specified._ - - - - -### Result: - -The result field will be a JSON object of `current` and `delinquent` accounts, -each containing an array of JSON objects with the following sub fields: - -- `votePubkey: ` - Vote account address, as base-58 encoded string -- `nodePubkey: ` - Validator identity, as base-58 encoded string -- `activatedStake: ` - the stake, in lamports, delegated to this vote account and active in this epoch -- `epochVoteAccount: ` - bool, whether the vote account is staked for this epoch -- `commission: ` - percentage (0-100) of rewards payout owed to the vote account -- `lastVote: ` - Most recent slot voted on by this vote account -- `epochCredits: ` - Latest history of earned credits for up to five epochs, as an array of arrays containing: `[epoch, credits, previousCredits]`. -- `rootSlot: ` - Current root slot for this vote account - - - - - -### Code sample: - -Restrict results to a single validator vote account: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", - "id": 1, - "method": "getVoteAccounts", - "params": [ - { - "votePubkey": "3ZT31jkAGhUaw8jsy4bTknwBMP8i4Eueh52By4zXcsVw" - } - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "current": [ - { - "commission": 0, - "epochVoteAccount": true, - "epochCredits": [ - [1, 64, 0], - [2, 192, 64] - ], - "nodePubkey": "B97CCUW3AEZFGy6uUg6zUdnNYvnVq5VG8PUtb2HayTDD", - "lastVote": 147, - "activatedStake": 42, - "votePubkey": "3ZT31jkAGhUaw8jsy4bTknwBMP8i4Eueh52By4zXcsVw" - } - ], - "delinquent": [] - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_isBlockhashValid.mdx b/docs/src/api/methods/_isBlockhashValid.mdx deleted file mode 100644 index d8903dc0ee44eb..00000000000000 --- a/docs/src/api/methods/_isBlockhashValid.mdx +++ /dev/null @@ -1,89 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## isBlockhashValid - -Returns whether a blockhash is still valid or not - -:::caution -NEW: This method is only available in solana-core v1.9 or newer. Please use -[getFeeCalculatorForBlockhash](#getfeecalculatorforblockhash) for solana-core v1.8 -::: - - - - -### Parameters: - - - the blockhash of the block to evauluate, as base-58 encoded string - - - - -Configuration object containing the following fields: - - - - - The minimum slot that the request can be evaluated at - - - - -### Result: - -`` - `true` if the blockhash is still valid - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "id":45, - "jsonrpc":"2.0", - "method":"isBlockhashValid", - "params":[ - "J7rBdM6AecPDEZp8aPq5iPSNKVkU5Q76F3oAV4eW5wsW", - {"commitment":"processed"} - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "context": { - "slot": 2483 - }, - "value": false - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_minimumLedgerSlot.mdx b/docs/src/api/methods/_minimumLedgerSlot.mdx deleted file mode 100644 index 1ac63315d972ed..00000000000000 --- a/docs/src/api/methods/_minimumLedgerSlot.mdx +++ /dev/null @@ -1,52 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## minimumLedgerSlot - -Returns the lowest slot that the node has information about in its ledger. - -:::info -This value may increase over time if the node is configured to purge older ledger data -::: - - - - -### Parameters: - -**None** - -### Result: - -`u64` - Minimum ledger slot number - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - {"jsonrpc":"2.0","id":1, "method":"minimumLedgerSlot"} -' -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": 1234, "id": 1 } -``` - - - - diff --git a/docs/src/api/methods/_requestAirdrop.mdx b/docs/src/api/methods/_requestAirdrop.mdx deleted file mode 100644 index 7a9cf3527f919d..00000000000000 --- a/docs/src/api/methods/_requestAirdrop.mdx +++ /dev/null @@ -1,78 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## requestAirdrop - -Requests an airdrop of lamports to a Pubkey - - - - -### Parameters: - - - Pubkey of account to receive lamports, as a base-58 encoded string - - - - lamports to airdrop, as a "u64" - - - - -Configuration object containing the following fields: - - - - - -### Result: - -`` - Transaction Signature of the airdrop, as a base-58 encoded string - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", "id": 1, - "method": "requestAirdrop", - "params": [ - "83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri", - 1000000000 - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": "5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW", - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_sendTransaction.mdx b/docs/src/api/methods/_sendTransaction.mdx deleted file mode 100644 index fc9978aaeea0c6..00000000000000 --- a/docs/src/api/methods/_sendTransaction.mdx +++ /dev/null @@ -1,124 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## sendTransaction - -Submits a signed transaction to the cluster for processing. - -This method does not alter the transaction in any way; it relays the -transaction created by clients to the node as-is. - -If the node's rpc service receives the transaction, this method immediately -succeeds, without waiting for any confirmations. A successful response from -this method does not guarantee the transaction is processed or confirmed by the -cluster. - -While the rpc service will reasonably retry to submit it, the transaction -could be rejected if transaction's `recent_blockhash` expires before it lands. - -Use [`getSignatureStatuses`](#getsignaturestatuses) to ensure a transaction is processed and confirmed. - -Before submitting, the following preflight checks are performed: - -1. The transaction signatures are verified -2. The transaction is simulated against the bank slot specified by the preflight - commitment. On failure an error will be returned. Preflight checks may be - disabled if desired. It is recommended to specify the same commitment and - preflight commitment to avoid confusing behavior. - -The returned signature is the first signature in the transaction, which -is used to identify the transaction ([transaction id](../../terminology.md#transaction-id)). -This identifier can be easily extracted from the transaction data before -submission. - - - - -### Parameters: - - - Fully-signed Transaction, as encoded string. - - - - -Configuration object containing the following optional fields: - - - -Encoding used for the transaction data. - -Values: `base58` (_slow_, **DEPRECATED**), or `base64`. - - - - - if "true", skip the preflight transaction checks - - - - Commitment level to use for preflight. - - - - Maximum number of times for the RPC node to retry sending the transaction to - the leader. If this parameter not provided, the RPC node will retry the - transaction until it is finalized or until the blockhash expires. - - - - set the minimum slot at which to perform preflight transaction checks - - - - -### Result: - -`` - First Transaction Signature embedded in the transaction, as base-58 encoded string ([transaction id](../../terminology.md#transaction-id)) - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", - "id": 1, - "method": "sendTransaction", - "params": [ - "4hXTCkRzt9WyecNzV1XPgCDfGAZzQKNxLXgynz5QDuWWPSAZBZSHptvWRL3BjCvzUXRdKvHL2b7yGrRQcWyaqsaBCncVG7BFggS8w9snUts67BSh3EqKpXLUm5UMHfD7ZBe9GhARjbNQMLJ1QD3Spr6oMTBU6EhdB4RD8CP2xUxr2u3d6fos36PD98XS6oX8TQjLpsMwncs5DAMiD4nNnR8NBfyghGCWvCVifVwvA8B8TJxE1aiyiv2L429BCWfyzAme5sZW8rDb14NeCQHhZbtNqfXhcp2tAnaAT" - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": "2id3YC2jK9G5Wo2phDx4gJVAew8DcY5NAojnVuao8rkxwPYPe8cSwE5GzhEgJA2y8fVjDEo6iR6ykBvDxrTQrtpb", - "id": 1 -} -``` - - - - diff --git a/docs/src/api/methods/_simulateTransaction.mdx b/docs/src/api/methods/_simulateTransaction.mdx deleted file mode 100644 index 9a245846d1f915..00000000000000 --- a/docs/src/api/methods/_simulateTransaction.mdx +++ /dev/null @@ -1,174 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## simulateTransaction - -Simulate sending a transaction - - - - -### Parameters: - - - -Transaction, as an encoded string. - -:::note -The transaction must have a valid blockhash, but is not required to be signed. -::: - - - - - -Configuration object containing the following fields: - - - Commitment level to simulate the transaction at - - - - if `true` the transaction signatures will be verified (conflicts with - `replaceRecentBlockhash`) - - - - if `true` the transaction recent blockhash will be replaced with the most - recent blockhash. (conflicts with `sigVerify`) - - - - the minimum slot that the request can be evaluated at - - - - -Encoding used for the transaction data. - -Values: `base58` (_slow_, **DEPRECATED**), or `base64`. - - - - - -Accounts configuration object containing the following fields: - - - An `array` of accounts to return, as base-58 encoded strings - - - - -encoding for returned Account data - - - -
    - -- `jsonParsed` encoding attempts to use program-specific state - parsers to return more human-readable and explicit account state data. -- If `jsonParsed` is requested but a [parser cannot be found](https://github.com/solana-labs/solana/blob/cfd0a00ae2ba85a6d76757df8b4fa38ed242d185/account-decoder/src/parse_account_data.rs#L98-L100), the field falls - back to `base64` encoding, detectable when the returned `accounts.data` field is type `string`. - -
    - -
    - -
    - -
    - -### Result: - -The result will be an RpcResponse JSON object with `value` set to a JSON object with the following fields: - -- `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/c0c60386544ec9a9ec7119229f37386d9f070523/sdk/src/transaction/error.rs#L13) -- `logs: ` - Array of log messages the transaction instructions output during execution, null if simulation failed before the transaction was able to execute (for example due to an invalid blockhash or signature verification failure) -- `accounts: ` - array of accounts with the same length as the `accounts.addresses` array in the request - - `` - if the account doesn't exist or if `err` is not null - - `` - otherwise, a JSON object containing: - - `lamports: ` - number of lamports assigned to this account, as a u64 - - `owner: ` - base-58 encoded Pubkey of the program this account has been assigned to - - `data: <[string, encoding]|object>` - data associated with the account, either as encoded binary data or JSON format `{: }` - depending on encoding parameter - - `executable: ` - boolean indicating if the account contains a program \(and is strictly read-only\) - - `rentEpoch: ` - the epoch at which this account will next owe rent, as u64 -- `unitsConsumed: ` - The number of compute budget units consumed during the processing of this transaction -- `returnData: ` - the most-recent return data generated by an instruction in the transaction, with the following fields: - - `programId: ` - the program that generated the return data, as base-58 encoded Pubkey - - `data: <[string, encoding]>` - the return data itself, as base-64 encoded binary data - - - - - -### Code sample: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' - { - "jsonrpc": "2.0", - "id": 1, - "method": "simulateTransaction", - "params": [ - "AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAEDArczbMia1tLmq7zz4DinMNN0pJ1JtLdqIJPUw3YrGCzYAMHBsgN27lcgB6H2WQvFgyZuJYHa46puOQo9yQ8CVQbd9uHXZaGT2cvhRs7reawctIXtX1s3kTqM9YV+/wCp20C7Wj2aiuk5TReAXo+VTVg8QTHjs0UjNMMKCvpzZ+ABAgEBARU=", - { - "encoding":"base64", - } - ] - } -' -``` - -### Response: - -```json -{ - "jsonrpc": "2.0", - "result": { - "context": { - "slot": 218 - }, - "value": { - "err": null, - "accounts": null, - "logs": [ - "Program 83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri invoke [1]", - "Program 83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri consumed 2366 of 1400000 compute units", - "Program return: 83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri KgAAAAAAAAA=", - "Program 83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri success" - ], - "returnData": { - "data": ["Kg==", "base64"], - "programId": "83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri" - }, - "unitsConsumed": 2366 - } - }, - "id": 1 -} -``` - - - - diff --git a/docs/src/api/websocket.md b/docs/src/api/websocket.md deleted file mode 100644 index d409072f462901..00000000000000 --- a/docs/src/api/websocket.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: RPC Websocket API -displayed_sidebar: apiWebsocketMethodsSidebar -hide_table_of_contents: true ---- - -After connecting to the RPC PubSub websocket at `ws://
    /`: - -- Submit subscription requests to the websocket using the methods below -- Multiple subscriptions may be active at once -- Many subscriptions take the optional [`commitment` parameter](/api/http#configuring-state-commitment), defining how finalized a change should be to trigger a notification. For subscriptions, if commitment is unspecified, the default value is `finalized`. - -## RPC PubSub WebSocket Endpoint - -**Default port:** 8900 e.g. ws://localhost:8900, [http://192.168.1.88:8900](http://192.168.1.88:8900) - -## Methods - -The following methods are supported in the RPC Websocket API: - -import AccountSubscribe from "./websocket/\_accountSubscribe.mdx" - - - -import AccountUnsubscribe from "./websocket/\_accountUnsubscribe.mdx" - - - -import BlockSubscribe from "./websocket/\_blockSubscribe.mdx" - - - -import BlockUnsubscribe from "./websocket/\_blockUnsubscribe.mdx" - - - -import LogsSubscribe from "./websocket/\_logsSubscribe.mdx" - - - -import LogsUnsubscribe from "./websocket/\_logsUnsubscribe.mdx" - - - -import ProgramSubscribe from "./websocket/\_programSubscribe.mdx" - - - -import ProgramUnsubscribe from "./websocket/\_programUnsubscribe.mdx" - - - -import SignatureSubscribe from "./websocket/\_signatureSubscribe.mdx" - - - -import SignatureUnsubscribe from "./websocket/\_signatureUnsubscribe.mdx" - - - -import SlotSubscribe from "./websocket/\_slotSubscribe.mdx" - - - -import SlotUnsubscribe from "./websocket/\_slotUnsubscribe.mdx" - - - -import SlotsUpdatesSubscribe from "./websocket/\_slotsUpdatesSubscribe.mdx" - - - -import SlotsUpdatesUnsubscribe from "./websocket/\_slotsUpdatesUnsubscribe.mdx" - - - -import RootSubscribe from "./websocket/\_rootSubscribe.mdx" - - - -import RootUnsubscribe from "./websocket/\_rootUnsubscribe.mdx" - - - -import VoteSubscribe from "./websocket/\_voteSubscribe.mdx" - - - -import VoteUnsubscribe from "./websocket/\_voteUnsubscribe.mdx" - - diff --git a/docs/src/api/websocket/_accountSubscribe.mdx b/docs/src/api/websocket/_accountSubscribe.mdx deleted file mode 100644 index f86e214a64aa92..00000000000000 --- a/docs/src/api/websocket/_accountSubscribe.mdx +++ /dev/null @@ -1,160 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## accountSubscribe - -Subscribe to an account to receive notifications when the lamports or data for a given account public key changes - - - - -### Parameters: - - - Account Pubkey, as base-58 encoded string - - - - -Configuration object containing the following fields: - - - - - -Encoding format for Account data - - - -
    - -- `base58` is slow. -- `jsonParsed` encoding attempts to use program-specific state parsers to return more - human-readable and explicit account state data -- If `jsonParsed` is requested but a parser cannot be found, the field falls back to - binary encoding, detectable when the `data`field is type`string`. - -
    - -
    - -
    - -### Result: - -`` - Subscription id \(needed to unsubscribe\) - -
    - - - -### Code sample: - -```json -{ - "jsonrpc": "2.0", - "id": 1, - "method": "accountSubscribe", - "params": [ - "CM78CPUeXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNH12", - { - "encoding": "jsonParsed", - "commitment": "finalized" - } - ] -} -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": 23784, "id": 1 } -``` - - -
    - -#### Notification Format: - -The notification format is the same as seen in the [getAccountInfo](#getAccountInfo) RPC HTTP method. - -Base58 encoding: - -```json -{ - "jsonrpc": "2.0", - "method": "accountNotification", - "params": { - "result": { - "context": { - "slot": 5199307 - }, - "value": { - "data": [ - "11116bv5nS2h3y12kD1yUKeMZvGcKLSjQgX6BeV7u1FrjeJcKfsHPXHRDEHrBesJhZyqnnq9qJeUuF7WHxiuLuL5twc38w2TXNLxnDbjmuR", - "base58" - ], - "executable": false, - "lamports": 33594, - "owner": "11111111111111111111111111111111", - "rentEpoch": 635, - "space": 80 - } - }, - "subscription": 23784 - } -} -``` - -Parsed-JSON encoding: - -```json -{ - "jsonrpc": "2.0", - "method": "accountNotification", - "params": { - "result": { - "context": { - "slot": 5199307 - }, - "value": { - "data": { - "program": "nonce", - "parsed": { - "type": "initialized", - "info": { - "authority": "Bbqg1M4YVVfbhEzwA9SpC9FhsaG83YMTYoR4a8oTDLX", - "blockhash": "LUaQTmM7WbMRiATdMMHaRGakPtCkc2GHtH57STKXs6k", - "feeCalculator": { - "lamportsPerSignature": 5000 - } - } - } - }, - "executable": false, - "lamports": 33594, - "owner": "11111111111111111111111111111111", - "rentEpoch": 635, - "space": 80 - } - }, - "subscription": 23784 - } -} -``` - -
    diff --git a/docs/src/api/websocket/_accountUnsubscribe.mdx b/docs/src/api/websocket/_accountUnsubscribe.mdx deleted file mode 100644 index d3a90de9ab9132..00000000000000 --- a/docs/src/api/websocket/_accountUnsubscribe.mdx +++ /dev/null @@ -1,53 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## accountUnsubscribe - -Unsubscribe from account change notifications - - - - -### Parameters: - - - id of the account Subscription to cancel - - -### Result: - -`` - unsubscribe success message - - - - - -### Code sample: - -```json -{ - "jsonrpc": "2.0", - "id": 1, - "method": "accountUnsubscribe", - "params": [0] -} -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": true, "id": 1 } -``` - - - - diff --git a/docs/src/api/websocket/_blockSubscribe.mdx b/docs/src/api/websocket/_blockSubscribe.mdx deleted file mode 100644 index b86543798e5bc3..00000000000000 --- a/docs/src/api/websocket/_blockSubscribe.mdx +++ /dev/null @@ -1,378 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## blockSubscribe - -Subscribe to receive notification anytime a new block is Confirmed or Finalized. - -:::caution -This subscription is **unstable** and only available if the validator was started -with the `--rpc-pubsub-enable-block-subscription` flag. - -**NOTE: The format of this subscription may change in the future** -::: - - - - -### Parameters: - - - -filter criteria for the logs to receive results by account type; currently supported: - - - all - include all transactions in block - - - - -A JSON object with the following field: - -- `mentionsAccountOrProgram: ` - return only transactions that mention the provided public key (as base-58 encoded string). If no mentions in a given block, then no notification will be sent. - - - - - - - -Configuration object containing the following fields: - - -
  • - the default is finalized -
  • -
  • - processed is not supported. -
  • -
    - - - -encoding format for each returned Transaction - - - -
    - -- `jsonParsed` attempts to use program-specific instruction parsers to return - more human-readable and explicit data in the `transaction.message.instructions` list. -- If `jsonParsed` is requested but a parser cannot be found, the instruction - falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` fields). - -
    - -
    - - - -level of transaction detail to return - - - -
    - -- If `accounts` are requested, transaction details only include signatures and - an annotated list of accounts in each transaction. -- Transaction metadata is limited to only: fee, err, pre_balances, - post_balances, pre_token_balances, and post_token_balances. - -
    - -
    - - - -the max transaction version to return in responses. - -
    - -- If the requested block contains a transaction with a higher version, an - error will be returned. -- If this parameter is omitted, only legacy transactions will be returned, and - a block containing any versioned transaction will prompt the error. - -
    - -
    - - - whether to populate the `rewards` array. If parameter not provided, the - default includes rewards. - - -
    - -### Result: - -`integer` - subscription id \(needed to unsubscribe\) - -
    - - - -### Code sample: - -```json -{ - "jsonrpc": "2.0", - "id": "1", - "method": "blockSubscribe", - "params": ["all"] -} -``` - -```json -{ - "jsonrpc": "2.0", - "id": "1", - "method": "blockSubscribe", - "params": [ - { - "mentionsAccountOrProgram": "LieKvPRE8XeX3Y2xVNHjKlpAScD12lYySBVQ4HqoJ5op" - }, - { - "commitment": "confirmed", - "encoding": "base64", - "showRewards": true, - "transactionDetails": "full" - } - ] -} -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": 0, "id": 1 } -``` - - -
    - -#### Notification Format: - -The notification will be an object with the following fields: - -- `slot: ` - The corresponding slot. -- `err: ` - Error if something went wrong publishing the notification otherwise null. -- `block: ` - A block object as seen in the [getBlock](/api/http#getblock) RPC HTTP method. - -```json -{ - "jsonrpc": "2.0", - "method": "blockNotification", - "params": { - "result": { - "context": { - "slot": 112301554 - }, - "value": { - "slot": 112301554, - "block": { - "previousBlockhash": "GJp125YAN4ufCSUvZJVdCyWQJ7RPWMmwxoyUQySydZA", - "blockhash": "6ojMHjctdqfB55JDpEpqfHnP96fiaHEcvzEQ2NNcxzHP", - "parentSlot": 112301553, - "transactions": [ - { - "transaction": [ - "OpltwoUvWxYi1P2U8vbIdE/aPntjYo5Aa0VQ2JJyeJE2g9Vvxk8dDGgFMruYfDu8/IfUWb0REppTe7IpAuuLRgIBAAkWnj4KHRpEWWW7gvO1c0BHy06wZi2g7/DLqpEtkRsThAXIdBbhXCLvltw50ZnjDx2hzw74NVn49kmpYj2VZHQJoeJoYJqaKcvuxCi/2i4yywedcVNDWkM84Iuw+cEn9/ROCrXY4qBFI9dveEERQ1c4kdU46xjxj9Vi+QXkb2Kx45QFVkG4Y7HHsoS6WNUiw2m4ffnMNnOVdF9tJht7oeuEfDMuUEaO7l9JeUxppCvrGk3CP45saO51gkwVYEgKzhpKjCx3rgsYxNR81fY4hnUQXSbbc2Y55FkwgRBpVvQK7/+clR4Gjhd3L4y+OtPl7QF93Akg1LaU9wRMs5nvfDFlggqI9PqJl+IvVWrNRdBbPS8LIIhcwbRTkSbqlJQWxYg3Bo2CTVbw7rt1ZubuHWWp0mD/UJpLXGm2JprWTePNULzHu67sfqaWF99LwmwjTyYEkqkRt1T0Je5VzHgJs0N5jY4iIU9K3lMqvrKOIn/2zEMZ+ol2gdgjshx+sphIyhw65F3J/Dbzk04LLkK+CULmN571Y+hFlXF2ke0BIuUG6AUF+4214Cu7FXnqo3rkxEHDZAk0lRrAJ8X/Z+iwuwI5cgbd9uHXZaGT2cvhRs7reawctIXtX1s3kTqM9YV+/wCpDLAp8axcEkaQkLDKRoWxqp8XLNZSKial7Rk+ELAVVKWoWLRXRZ+OIggu0OzMExvVLE5VHqy71FNHq4gGitkiKYNFWSLIE4qGfdFLZXy/6hwS+wq9ewjikCpd//C9BcCL7Wl0iQdUslxNVCBZHnCoPYih9JXvGefOb9WWnjGy14sG9j70+RSVx6BlkFELWwFvIlWR/tHn3EhHAuL0inS2pwX7ZQTAU6gDVaoqbR2EiJ47cKoPycBNvHLoKxoY9AZaBjPl6q8SKQJSFyFd9n44opAgI6zMTjYF/8Ok4VpXEESp3QaoUyTI9sOJ6oFP6f4dwnvQelgXS+AEfAsHsKXxGAIUDQENAgMEBQAGBwgIDg8IBJCER3QXl1AVDBADCQoOAAQLERITDAjb7ugh3gOuTy==", - "base64" - ], - "meta": { - "err": null, - "status": { - "Ok": null - }, - "fee": 5000, - "preBalances": [ - 1758510880, 2067120, 1566000, 1461600, 2039280, 2039280, - 1900080, 1865280, 0, 3680844220, 2039280 - ], - "postBalances": [ - 1758505880, 2067120, 1566000, 1461600, 2039280, 2039280, - 1900080, 1865280, 0, 3680844220, 2039280 - ], - "innerInstructions": [ - { - "index": 0, - "instructions": [ - { - "programIdIndex": 13, - "accounts": [1, 15, 3, 4, 2, 14], - "data": "21TeLgZXNbtHXVBzCaiRmH" - }, - { - "programIdIndex": 14, - "accounts": [3, 4, 1], - "data": "6qfC8ic7Aq99" - }, - { - "programIdIndex": 13, - "accounts": [1, 15, 3, 5, 2, 14], - "data": "21TeLgZXNbsn4QEpaSEr3q" - }, - { - "programIdIndex": 14, - "accounts": [3, 5, 1], - "data": "6LC7BYyxhFRh" - } - ] - }, - { - "index": 1, - "instructions": [ - { - "programIdIndex": 14, - "accounts": [4, 3, 0], - "data": "7aUiLHFjSVdZ" - }, - { - "programIdIndex": 19, - "accounts": [17, 18, 16, 9, 11, 12, 14], - "data": "8kvZyjATKQWYxaKR1qD53V" - }, - { - "programIdIndex": 14, - "accounts": [9, 11, 18], - "data": "6qfC8ic7Aq99" - } - ] - } - ], - "logMessages": [ - "Program QMNeHCGYnLVDn1icRAfQZpjPLBNkfGbSKRB83G5d8KB invoke [1]", - "Program QMWoBmAyJLAsA1Lh9ugMTw2gciTihncciphzdNzdZYV invoke [2]" - ], - "preTokenBalances": [ - { - "accountIndex": 4, - "mint": "iouQcQBAiEXe6cKLS85zmZxUqaCqBdeHFpqKoSz615u", - "uiTokenAmount": { - "uiAmount": null, - "decimals": 6, - "amount": "0", - "uiAmountString": "0" - }, - "owner": "LieKvPRE8XeX3Y2xVNHjKlpAScD12lYySBVQ4HqoJ5op", - "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA" - }, - { - "accountIndex": 5, - "mint": "iouQcQBAiEXe6cKLS85zmZxUqaCqBdeHFpqKoSz615u", - "uiTokenAmount": { - "uiAmount": 11513.0679, - "decimals": 6, - "amount": "11513067900", - "uiAmountString": "11513.0679" - }, - "owner": "rXhAofQCT7NN9TUqigyEAUzV1uLL4boeD8CRkNBSkYk", - "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA" - }, - { - "accountIndex": 10, - "mint": "Saber2gLauYim4Mvftnrasomsv6NvAuncvMEZwcLpD1", - "uiTokenAmount": { - "uiAmount": null, - "decimals": 6, - "amount": "0", - "uiAmountString": "0" - }, - "owner": "CL9wkGFT3SZRRNa9dgaovuRV7jrVVigBUZ6DjcgySsCU", - "programId": "TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb" - }, - { - "accountIndex": 11, - "mint": "Saber2gLauYim4Mvftnrasomsv6NvAuncvMEZwcLpD1", - "uiTokenAmount": { - "uiAmount": 15138.514093, - "decimals": 6, - "amount": "15138514093", - "uiAmountString": "15138.514093" - }, - "owner": "LieKvPRE8XeX3Y2xVNHjKlpAScD12lYySBVQ4HqoJ5op", - "programId": "TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb" - } - ], - "postTokenBalances": [ - { - "accountIndex": 4, - "mint": "iouQcQBAiEXe6cKLS85zmZxUqaCqBdeHFpqKoSz615u", - "uiTokenAmount": { - "uiAmount": null, - "decimals": 6, - "amount": "0", - "uiAmountString": "0" - }, - "owner": "LieKvPRE8XeX3Y2xVNHjKlpAScD12lYySBVQ4HqoJ5op", - "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA" - }, - { - "accountIndex": 5, - "mint": "iouQcQBAiEXe6cKLS85zmZxUqaCqBdeHFpqKoSz615u", - "uiTokenAmount": { - "uiAmount": 11513.103028, - "decimals": 6, - "amount": "11513103028", - "uiAmountString": "11513.103028" - }, - "owner": "rXhAofQCT7NN9TUqigyEAUzV1uLL4boeD8CRkNBSkYk", - "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA" - }, - { - "accountIndex": 10, - "mint": "Saber2gLauYim4Mvftnrasomsv6NvAuncvMEZwcLpD1", - "uiTokenAmount": { - "uiAmount": null, - "decimals": 6, - "amount": "0", - "uiAmountString": "0" - }, - "owner": "CL9wkGFT3SZRRNa9dgaovuRV7jrVVigBUZ6DjcgySsCU", - "programId": "TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb" - }, - { - "accountIndex": 11, - "mint": "Saber2gLauYim4Mvftnrasomsv6NvAuncvMEZwcLpD1", - "uiTokenAmount": { - "uiAmount": 15489.767829, - "decimals": 6, - "amount": "15489767829", - "uiAmountString": "15489.767829" - }, - "owner": "BeiHVPRE8XeX3Y2xVNrSsTpAScH94nYySBVQ4HqgN9at", - "programId": "TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb" - } - ], - "rewards": [] - } - } - ], - "blockTime": 1639926816, - "blockHeight": 101210751 - }, - "err": null - } - }, - "subscription": 14 - } -} -``` - -
    diff --git a/docs/src/api/websocket/_blockUnsubscribe.mdx b/docs/src/api/websocket/_blockUnsubscribe.mdx deleted file mode 100644 index a16b73ca639aa4..00000000000000 --- a/docs/src/api/websocket/_blockUnsubscribe.mdx +++ /dev/null @@ -1,53 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## blockUnsubscribe - -Unsubscribe from block notifications - - - - -### Parameters: - - - subscription id to cancel - - -### Result: - -`` - unsubscribe success message - - - - - -### Code sample: - -```json -{ - "jsonrpc": "2.0", - "id": 1, - "method": "blockUnsubscribe", - "params": [0] -} -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": true, "id": 1 } -``` - - - - diff --git a/docs/src/api/websocket/_logsSubscribe.mdx b/docs/src/api/websocket/_logsSubscribe.mdx deleted file mode 100644 index 6955004489cda9..00000000000000 --- a/docs/src/api/websocket/_logsSubscribe.mdx +++ /dev/null @@ -1,146 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## logsSubscribe - -Subscribe to transaction logging - - - - -### Parameters: - - - filter criteria for the logs to receive results by account type. The following filters types are currently supported: - - - -A string with one of the following values: - -- `all` - subscribe to all transactions except for simple vote transactions -- `allWithVotes` - subscribe to all transactions, including simple vote - transactions - - - - - -An object with the following field: - -- `mentions: [ ]` - array containing a single Pubkey (as base-58 - encoded string); if present, subscribe to only transactions mentioning this address - -:::caution - -Currently, the `mentions` field -[only supports one](https://github.com/solana-labs/solana/blob/master/rpc/src/rpc_pubsub.rs#L481) -Pubkey string per method call. Listing additional addresses will result in an -error. - -::: - - - - - - - -Configuration object containing the following fields: - - - - - -### Result: - -`` - Subscription id \(needed to unsubscribe\) - - - - - -### Code sample: - -```json -{ - "jsonrpc": "2.0", - "id": 1, - "method": "logsSubscribe", - "params": [ - { - "mentions": [ "11111111111111111111111111111111" ] - }, - { - "commitment": "finalized" - } - ] -} -{ - "jsonrpc": "2.0", - "id": 1, - "method": "logsSubscribe", - "params": [ "all" ] -} -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": 24040, "id": 1 } -``` - - - - -#### Notification Format: - -The notification will be an RpcResponse JSON object with value equal to: - -- `signature: ` - The transaction signature base58 encoded. -- `err: ` - Error if transaction failed, null if transaction - succeeded. - [TransactionError definitions](https://github.com/solana-labs/solana/blob/c0c60386544ec9a9ec7119229f37386d9f070523/sdk/src/transaction/error.rs#L13) -- `logs: ` - Array of log messages the transaction instructions - output during execution, null if simulation failed before the transaction was - able to execute (for example due to an invalid blockhash or signature - verification failure) - -Example: - -```json -{ - "jsonrpc": "2.0", - "method": "logsNotification", - "params": { - "result": { - "context": { - "slot": 5208469 - }, - "value": { - "signature": "5h6xBEauJ3PK6SWCZ1PGjBvj8vDdWG3KpwATGy1ARAXFSDwt8GFXM7W5Ncn16wmqokgpiKRLuS83KUxyZyv2sUYv", - "err": null, - "logs": [ - "SBF program 83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri success" - ] - } - }, - "subscription": 24040 - } -} -``` - - diff --git a/docs/src/api/websocket/_logsUnsubscribe.mdx b/docs/src/api/websocket/_logsUnsubscribe.mdx deleted file mode 100644 index 6a75606eb02b06..00000000000000 --- a/docs/src/api/websocket/_logsUnsubscribe.mdx +++ /dev/null @@ -1,53 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## logsUnsubscribe - -Unsubscribe from transaction logging - - - - -### Parameters: - - - subscription id to cancel - - -### Result: - -`` - unsubscribe success message - - - - - -### Code sample: - -```json -{ - "jsonrpc": "2.0", - "id": 1, - "method": "logsUnsubscribe", - "params": [0] -} -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": true, "id": 1 } -``` - - - - diff --git a/docs/src/api/websocket/_programSubscribe.mdx b/docs/src/api/websocket/_programSubscribe.mdx deleted file mode 100644 index bea83bac6c0fad..00000000000000 --- a/docs/src/api/websocket/_programSubscribe.mdx +++ /dev/null @@ -1,205 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## programSubscribe - -Subscribe to a program to receive notifications when the lamports or data for an account owned by the given program changes - - - - -### Parameters: - - - -Pubkey of the `program_id`, as base-58 encoded string - - - - - -Configuration object containing the following fields: - - - - - -filter results using various [filter objects](/api/http#filter-criteria) - -:::info -The resultant account must meet **ALL** filter criteria to be included in the returned results -::: - - - - - -Encoding format for Account data - - - -
    - -- `base58` is slow. -- [`jsonParsed`](/api/http#parsed-responses">) encoding attempts to use program-specific - state parsers to return more human-readable and explicit account state data. -- If `jsonParsed` is requested but a parser cannot be found, the field falls - back to `base64` encoding, detectable when the `data` field is type `string`. - -
    - -
    - -
    - -### Result: - -`` - Subscription id \(needed to unsubscribe\) - -
    - - - -### Code sample: - -```json -{ - "jsonrpc": "2.0", - "id": 1, - "method": "programSubscribe", - "params": [ - "11111111111111111111111111111111", - { - "encoding": "base64", - "commitment": "finalized" - } - ] -} -{ - "jsonrpc": "2.0", - "id": 1, - "method": "programSubscribe", - "params": [ - "11111111111111111111111111111111", - { - "encoding": "jsonParsed" - } - ] -} -{ - "jsonrpc": "2.0", - "id": 1, - "method": "programSubscribe", - "params": [ - "11111111111111111111111111111111", - { - "encoding": "base64", - "filters": [ - { - "dataSize": 80 - } - ] - } - ] -} -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": 24040, "id": 1 } -``` - - -
    - -#### Notification format - -The notification format is a single program account object as seen in the [getProgramAccounts](/api/http#getprogramaccounts) RPC HTTP method. - -Base58 encoding: - -```json -{ - "jsonrpc": "2.0", - "method": "programNotification", - "params": { - "result": { - "context": { - "slot": 5208469 - }, - "value": { - "pubkey": "H4vnBqifaSACnKa7acsxstsY1iV1bvJNxsCY7enrd1hq", - "account": { - "data": [ - "11116bv5nS2h3y12kD1yUKeMZvGcKLSjQgX6BeV7u1FrjeJcKfsHPXHRDEHrBesJhZyqnnq9qJeUuF7WHxiuLuL5twc38w2TXNLxnDbjmuR", - "base58" - ], - "executable": false, - "lamports": 33594, - "owner": "11111111111111111111111111111111", - "rentEpoch": 636, - "space": 80 - } - } - }, - "subscription": 24040 - } -} -``` - -Parsed-JSON encoding: - -```json -{ - "jsonrpc": "2.0", - "method": "programNotification", - "params": { - "result": { - "context": { - "slot": 5208469 - }, - "value": { - "pubkey": "H4vnBqifaSACnKa7acsxstsY1iV1bvJNxsCY7enrd1hq", - "account": { - "data": { - "program": "nonce", - "parsed": { - "type": "initialized", - "info": { - "authority": "Bbqg1M4YVVfbhEzwA9SpC9FhsaG83YMTYoR4a8oTDLX", - "blockhash": "LUaQTmM7WbMRiATdMMHaRGakPtCkc2GHtH57STKXs6k", - "feeCalculator": { - "lamportsPerSignature": 5000 - } - } - } - }, - "executable": false, - "lamports": 33594, - "owner": "11111111111111111111111111111111", - "rentEpoch": 636, - "space": 80 - } - } - }, - "subscription": 24040 - } -} -``` - -
    diff --git a/docs/src/api/websocket/_programUnsubscribe.mdx b/docs/src/api/websocket/_programUnsubscribe.mdx deleted file mode 100644 index b3decdcb9a50fe..00000000000000 --- a/docs/src/api/websocket/_programUnsubscribe.mdx +++ /dev/null @@ -1,53 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## programUnsubscribe - -Unsubscribe from program-owned account change notifications - - - - -### Parameters: - - - id of account Subscription to cancel - - -### Result: - -`` - unsubscribe success message - - - - - -### Code sample: - -```json -{ - "jsonrpc": "2.0", - "id": 1, - "method": "programUnsubscribe", - "params": [0] -} -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": true, "id": 1 } -``` - - - - diff --git a/docs/src/api/websocket/_rootSubscribe.mdx b/docs/src/api/websocket/_rootSubscribe.mdx deleted file mode 100644 index 98fd59407f023b..00000000000000 --- a/docs/src/api/websocket/_rootSubscribe.mdx +++ /dev/null @@ -1,62 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## rootSubscribe - -Subscribe to receive notification anytime a new root is set by the validator. - - - - -### Parameters: - -**None** - -### Result: - -`integer` - subscription id \(needed to unsubscribe\) - - - - - -### Code sample: - -```json -{ "jsonrpc": "2.0", "id": 1, "method": "rootSubscribe" } -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": 0, "id": 1 } -``` - - - - -#### Notification Format: - -The result is the latest root slot number. - -```json -{ - "jsonrpc": "2.0", - "method": "rootNotification", - "params": { - "result": 42, - "subscription": 0 - } -} -``` - - diff --git a/docs/src/api/websocket/_rootUnsubscribe.mdx b/docs/src/api/websocket/_rootUnsubscribe.mdx deleted file mode 100644 index 8d4085f183678f..00000000000000 --- a/docs/src/api/websocket/_rootUnsubscribe.mdx +++ /dev/null @@ -1,53 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## rootUnsubscribe - -Unsubscribe from root notifications - - - - -### Parameters: - - - subscription id to cancel - - -### Result: - -`` - unsubscribe success message - - - - - -### Code sample: - -```json -{ - "jsonrpc": "2.0", - "id": 1, - "method": "rootUnsubscribe", - "params": [0] -} -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": true, "id": 1 } -``` - - - - diff --git a/docs/src/api/websocket/_signatureSubscribe.mdx b/docs/src/api/websocket/_signatureSubscribe.mdx deleted file mode 100644 index 1c0d8f58fc36b7..00000000000000 --- a/docs/src/api/websocket/_signatureSubscribe.mdx +++ /dev/null @@ -1,160 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## signatureSubscribe - -Subscribe to receive a notification when the transaction with the given -signature reaches the specified commitment level. - -:::caution - -This is a subscription to a single notification. It is automatically cancelled -by the server once the notification, `signatureNotification`, is sent by the -RPC. - -::: - - - - -### Parameters: - - - -transaction signature, as base-58 encoded string - -:::info - -The transaction signature must be the first signature from the transaction (see -[transaction id](/terminology#transaction-id) for more details). - -::: - - - - - -Configuration object containing the following fields: - - - - - -Whether or not to subscribe for notifications when signatures are received by -the RPC, in addition to when they are processed. - - - - - -### Result: - -`` - subscription id (needed to unsubscribe) - - - - - -### Code sample: - -```json -{ - "jsonrpc": "2.0", - "id": 1, - "method": "signatureSubscribe", - "params": [ - "2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b", - { - "commitment": "finalized", - "enableReceivedNotification": false - } - ] -} -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": 0, "id": 1 } -``` - - - - -#### Notification Format: - -The notification will be an RpcResponse JSON object with value containing an -object with: - -- `slot: ` - The corresponding slot. -- `value: ` - a notification value of - [`RpcSignatureResult`](https://github.com/solana-labs/solana/blob/6d28fd455b07e3557fc6c0c3ddf3ba03e3fe8482/rpc-client-api/src/response.rs#L265-L268), - resulting in either: - - when `enableReceivedNotification` is `true` and the signature is received: the - literal string - [`"receivedSignature"`](https://github.com/solana-labs/solana/blob/6d28fd455b07e3557fc6c0c3ddf3ba03e3fe8482/rpc-client-api/src/response.rs#L286-L288), - or - - when the signature is processed: `err: `: - - `null` if the transaction succeeded in being processed at the specified - commitment level, or - - a - [`TransactionError`](https://github.com/solana-labs/solana/blob/6d28fd455b07e3557fc6c0c3ddf3ba03e3fe8482/sdk/src/transaction/error.rs#L15-L164), - if the transaction failed - -#### Example responses: - -The following is an example response of a notification from a successfully -**processed** transactions: - -```json -{ - "jsonrpc": "2.0", - "method": "signatureNotification", - "params": { - "result": { - "context": { - "slot": 5207624 - }, - "value": { - "err": null - } - }, - "subscription": 24006 - } -} -``` - -The following is an example response of a notification from a successfully -**recieved** transaction signature: - -```json -{ - "jsonrpc": "2.0", - "method": "signatureNotification", - "params": { - "result": { - "context": { - "slot": 5207624 - }, - "value": "receivedSignature" - }, - "subscription": 24006 - } -} -``` - - diff --git a/docs/src/api/websocket/_signatureUnsubscribe.mdx b/docs/src/api/websocket/_signatureUnsubscribe.mdx deleted file mode 100644 index 880efed7c49fe7..00000000000000 --- a/docs/src/api/websocket/_signatureUnsubscribe.mdx +++ /dev/null @@ -1,53 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## signatureUnsubscribe - -Unsubscribe from signature confirmation notification - - - - -### Parameters: - - - subscription id to cancel - - -### Result: - -`` - unsubscribe success message - - - - - -### Code sample: - -```json -{ - "jsonrpc": "2.0", - "id": 1, - "method": "signatureUnsubscribe", - "params": [0] -} -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": true, "id": 1 } -``` - - - - diff --git a/docs/src/api/websocket/_slotSubscribe.mdx b/docs/src/api/websocket/_slotSubscribe.mdx deleted file mode 100644 index c746ff060e5b22..00000000000000 --- a/docs/src/api/websocket/_slotSubscribe.mdx +++ /dev/null @@ -1,72 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## slotSubscribe - -Subscribe to receive notification anytime a slot is processed by the validator - - - - -### Parameters: - -**None** - -### Result: - -`` - Subscription id \(needed to unsubscribe\) - - - - - -### Code sample: - -```json -{ "jsonrpc": "2.0", "id": 1, "method": "slotSubscribe" } -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": 0, "id": 1 } -``` - - - - -#### Notification Format: - -The notification will be an object with the following fields: - -- `parent: ` - The parent slot -- `root: ` - The current root slot -- `slot: ` - The newly set slot value - -Example: - -```json -{ - "jsonrpc": "2.0", - "method": "slotNotification", - "params": { - "result": { - "parent": 75, - "root": 44, - "slot": 76 - }, - "subscription": 0 - } -} -``` - - diff --git a/docs/src/api/websocket/_slotUnsubscribe.mdx b/docs/src/api/websocket/_slotUnsubscribe.mdx deleted file mode 100644 index 0ce506163c87ca..00000000000000 --- a/docs/src/api/websocket/_slotUnsubscribe.mdx +++ /dev/null @@ -1,53 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## slotUnsubscribe - -Unsubscribe from slot notifications - - - - -### Parameters: - - - subscription id to cancel - - -### Result: - -`` - unsubscribe success message - - - - - -### Code sample: - -```json -{ - "jsonrpc": "2.0", - "id": 1, - "method": "slotUnsubscribe", - "params": [0] -} -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": true, "id": 1 } -``` - - - - diff --git a/docs/src/api/websocket/_slotsUpdatesSubscribe.mdx b/docs/src/api/websocket/_slotsUpdatesSubscribe.mdx deleted file mode 100644 index cbd6a6aec13303..00000000000000 --- a/docs/src/api/websocket/_slotsUpdatesSubscribe.mdx +++ /dev/null @@ -1,92 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## slotsUpdatesSubscribe - -Subscribe to receive a notification from the validator on a variety of updates -on every slot - -:::caution -This subscription is unstable - -**NOTE: the format of this subscription may change in the future and it may not always be supported** -::: - - - - -### Parameters: - -**None** - -### Result: - -`` - Subscription id (needed to unsubscribe) - - - - - -### Code sample: - -```json -{ "jsonrpc": "2.0", "id": 1, "method": "slotsUpdatesSubscribe" } -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": 0, "id": 1 } -``` - - - - -#### Notification Format: - -The notification will be an object with the following fields: - -- `err: ` - The error message. Only present if the update is of type "dead". -- `parent: ` - The parent slot. Only present if the update is of type "createdBank". -- `slot: ` - The newly updated slot -- `stats: ` - The error message. Only present if the update is of type "frozen". An object with the following fields: - - `maxTransactionsPerEntry: `, - - `numFailedTransactions: `, - - `numSuccessfulTransactions: `, - - `numTransactionEntries: `, -- `timestamp: ` - The Unix timestamp of the update -- `type: ` - The update type, one of: - - "firstShredReceived" - - "completed" - - "createdBank" - - "frozen" - - "dead" - - "optimisticConfirmation" - - "root" - -```bash -{ - "jsonrpc": "2.0", - "method": "slotsUpdatesNotification", - "params": { - "result": { - "parent": 75, - "slot": 76, - "timestamp": 1625081266243, - "type": "optimisticConfirmation" - }, - "subscription": 0 - } -} -``` - - diff --git a/docs/src/api/websocket/_slotsUpdatesUnsubscribe.mdx b/docs/src/api/websocket/_slotsUpdatesUnsubscribe.mdx deleted file mode 100644 index 8169e52118781b..00000000000000 --- a/docs/src/api/websocket/_slotsUpdatesUnsubscribe.mdx +++ /dev/null @@ -1,53 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## slotsUpdatesUnsubscribe - -Unsubscribe from slot-update notifications - - - - -### Parameters: - - - subscription id to cancel - - -### Result: - -`` - unsubscribe success message - - - - - -### Code sample: - -```json -{ - "jsonrpc": "2.0", - "id": 1, - "method": "slotsUpdatesUnsubscribe", - "params": [0] -} -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": true, "id": 1 } -``` - - - - diff --git a/docs/src/api/websocket/_voteSubscribe.mdx b/docs/src/api/websocket/_voteSubscribe.mdx deleted file mode 100644 index d100035d93658e..00000000000000 --- a/docs/src/api/websocket/_voteSubscribe.mdx +++ /dev/null @@ -1,80 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## voteSubscribe - -Subscribe to receive notification anytime a new vote is observed in gossip. -These votes are pre-consensus therefore there is no guarantee these votes will -enter the ledger. - -:::caution -This subscription is unstable and only available if the validator was started -with the `--rpc-pubsub-enable-vote-subscription` flag. The format of this -subscription may change in the future -::: - - - - -### Parameters: - -**None** - -### Result: - -`` - subscription id (needed to unsubscribe) - - - - - -### Code sample: - -```json -{ "jsonrpc": "2.0", "id": 1, "method": "voteSubscribe" } -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": 0, "id": 1 } -``` - - - - -#### Notification Format: - -The notification will be an object with the following fields: - -- `hash: ` - The vote hash -- `slots: ` - The slots covered by the vote, as an array of u64 integers -- `timestamp: ` - The timestamp of the vote -- `signature: ` - The signature of the transaction that contained this vote -- `votePubkey: ` - The public key of the vote account, as base-58 encoded string - -```json -{ - "jsonrpc": "2.0", - "method": "voteNotification", - "params": { - "result": { - "hash": "8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM", - "slots": [1, 2], - "timestamp": null - }, - "subscription": 0 - } -} -``` - - diff --git a/docs/src/api/websocket/_voteUnsubscribe.mdx b/docs/src/api/websocket/_voteUnsubscribe.mdx deleted file mode 100644 index db37b4205479a1..00000000000000 --- a/docs/src/api/websocket/_voteUnsubscribe.mdx +++ /dev/null @@ -1,53 +0,0 @@ -import { - DocBlock, - DocSideBySide, - CodeParams, - Parameter, - Field, - Values, - CodeSnippets, -} from "../../../components/CodeDocBlock"; - - - -## voteUnsubscribe - -Unsubscribe from vote notifications - - - - -### Parameters: - - - subscription id to cancel - - -### Result: - -`` - unsubscribe success message - - - - - -### Code sample: - -```json -{ - "jsonrpc": "2.0", - "id": 1, - "method": "voteUnsubscribe", - "params": [0] -} -``` - -### Response: - -```json -{ "jsonrpc": "2.0", "result": true, "id": 1 } -``` - - - - diff --git a/docs/src/architecture.md b/docs/src/architecture.md new file mode 100644 index 00000000000000..3ba64f2d80e822 --- /dev/null +++ b/docs/src/architecture.md @@ -0,0 +1,7 @@ +--- +title: Architecture +sidebar_label: Overview +sidebar_position: 0 +--- + +In this section, we will describe the architecture. diff --git a/docs/src/developing/backwards-compatibility.md b/docs/src/backwards-compatibility.md similarity index 100% rename from docs/src/developing/backwards-compatibility.md rename to docs/src/backwards-compatibility.md diff --git a/docs/src/cli/.usage.md.header b/docs/src/cli/.usage.md.header index 4c532f9ebf36cc..9ee41c132f2537 100644 --- a/docs/src/cli/.usage.md.header +++ b/docs/src/cli/.usage.md.header @@ -1,5 +1,7 @@ --- -title: CLI Usage Reference +title: Solana CLI Reference and Usage +sidebar_label: Reference & Usage +sidebar_position: 3 --- The [solana-cli crate](https://crates.io/crates/solana-cli) provides a command-line interface tool for Solana diff --git a/docs/src/cli/examples/_category_.json b/docs/src/cli/examples/_category_.json new file mode 100644 index 00000000000000..9a1a43e307208a --- /dev/null +++ b/docs/src/cli/examples/_category_.json @@ -0,0 +1,7 @@ +{ + "position": 4.5, + "label": "Command Examples", + "collapsible": true, + "collapsed": false, + "link": null +} diff --git a/docs/src/cli/choose-a-cluster.md b/docs/src/cli/examples/choose-a-cluster.md similarity index 91% rename from docs/src/cli/choose-a-cluster.md rename to docs/src/cli/examples/choose-a-cluster.md index cf10c1f1907cb4..b7c51ffc7320b2 100644 --- a/docs/src/cli/choose-a-cluster.md +++ b/docs/src/cli/examples/choose-a-cluster.md @@ -2,7 +2,7 @@ title: Connecting to a Cluster --- -See [Solana Clusters](../clusters.md) for general information about the +See [Solana Clusters](../../clusters/available.md) for general information about the available clusters. ## Configure the command-line tool diff --git a/docs/src/cli/delegate-stake.md b/docs/src/cli/examples/delegate-stake.md similarity index 77% rename from docs/src/cli/delegate-stake.md rename to docs/src/cli/examples/delegate-stake.md index 093c3e2ccf1c91..03d0465eef6b46 100644 --- a/docs/src/cli/delegate-stake.md +++ b/docs/src/cli/examples/delegate-stake.md @@ -1,24 +1,26 @@ --- title: Staking --- + For an overview of staking, read first the [Staking and Inflation FAQ](https://solana.com/staking). ------- +--- -After you have [received SOL](transfer-tokens.md), you might consider putting -it to use by delegating _stake_ to a validator. Stake is what we call tokens -in a _stake account_. Solana weights validator votes by the amount of stake -delegated to them, which gives those validators more influence in determining -then next valid block of transactions in the blockchain. Solana then generates -new SOL periodically to reward stakers and validators. You earn more rewards -the more stake you delegate. +After you have [received SOL](./transfer-tokens.md), you might consider putting it +to use by delegating _stake_ to a validator. Stake is what we call tokens in a +_stake account_. Solana weights validator votes by the amount of stake delegated +to them, which gives those validators more influence in determining then next +valid block of transactions in the blockchain. Solana then generates new SOL +periodically to reward stakers and validators. You earn more rewards the more +stake you delegate. ## Create a Stake Account To delegate stake, you will need to transfer some tokens into a stake account. To create an account, you will need a keypair. Its public key will be used as -the [stake account address](../staking/stake-accounts.md#account-address). +the +[stake account address](https://solana.com/docs/economics/staking/stake-accounts#account-address). No need for a password or encryption here; this keypair will be discarded right after creating the stake account. @@ -67,11 +69,10 @@ Withdraw Authority: EXU95vqs93yPeCeAU7mPPu6HbRUmTFPEiGug9oCdvQ5F ### Set Stake and Withdraw Authorities -[Stake and withdraw authorities](../staking/stake-accounts.md#understanding-account-authorities) -can be set when creating an account via the -`--stake-authority` and `--withdraw-authority` options, or afterward with the -`solana stake-authorize` command. For example, to set a new stake authority, -run: +[Stake and withdraw authorities](https://solana.com/docs/economics/staking/stake-accounts#understanding-account-authorities) +can be set when creating an account via the `--stake-authority` and +`--withdraw-authority` options, or afterward with the `solana stake-authorize` +command. For example, to set a new stake authority, run: ```bash solana stake-authorize \ @@ -95,11 +96,12 @@ solana create-stake-account --from --seed --withdraw-authority --fee-payer ``` -`` is an arbitrary string up to 32 bytes, but will typically be a -number corresponding to which derived account this is. The first account might -be "0", then "1", and so on. The public key of `` acts -as the base address. The command derives a new address from the base address -and seed string. To see what stake address the command will derive, use `solana create-address-with-seed`: +`` is an arbitrary string up to 32 bytes, but will typically be a number +corresponding to which derived account this is. The first account might be "0", +then "1", and so on. The public key of `` acts as the +base address. The command derives a new address from the base address and seed +string. To see what stake address the command will derive, use +`solana create-address-with-seed`: ```bash solana create-address-with-seed --from STAKE @@ -121,9 +123,9 @@ accounts with the `solana validators` command: solana validators ``` -The first column of each row contains the validator's identity and the second -is the vote account address. Choose a validator and use its vote account -address in `solana delegate-stake`: +The first column of each row contains the validator's identity and the second is +the vote account address. Choose a validator and use its vote account address in +`solana delegate-stake`: ```bash solana delegate-stake --stake-authority \ @@ -134,8 +136,8 @@ The stake authority `` authorizes the operation on the account with address ``. The stake is delegated to the vote account with address ``. -After delegating stake, use `solana stake-account` to observe the changes -to the stake account: +After delegating stake, use `solana stake-account` to observe the changes to the +stake account: ```bash solana stake-account @@ -164,8 +166,8 @@ solana deactivate-stake --stake-authority \ --fee-payer ``` -The stake authority `` authorizes the operation on the account -with address ``. +The stake authority `` authorizes the operation on the account with +address ``. Note that stake takes several epochs to "cool down". Attempts to delegate stake in the cool down period will fail. @@ -180,8 +182,8 @@ solana withdraw-stake --withdraw-authority ` is the existing stake account, the stake authority -`` is the withdraw authority, and `` is the number of tokens -to transfer to ``. +`` is the withdraw authority, and `` is the number of tokens to +transfer to ``. ## Split Stake @@ -196,11 +198,11 @@ solana split-stake --stake-authority ` is the existing stake account, the stake authority -`` is the stake authority, `` is the -keypair for the new account, and `` is the number of tokens to transfer -to the new account. +`` is the stake authority, `` is the keypair +for the new account, and `` is the number of tokens to transfer to the +new account. To split a stake account into a derived account address, use the `--seed` option. See -[Derive Stake Account Addresses](#advanced-derive-stake-account-addresses) -for details. +[Derive Stake Account Addresses](#advanced-derive-stake-account-addresses) for +details. diff --git a/docs/src/cli/deploy-a-program.md b/docs/src/cli/examples/deploy-a-program.md similarity index 54% rename from docs/src/cli/deploy-a-program.md rename to docs/src/cli/examples/deploy-a-program.md index 6c195f18b05742..b7a62837f74210 100644 --- a/docs/src/cli/deploy-a-program.md +++ b/docs/src/cli/examples/deploy-a-program.md @@ -2,19 +2,22 @@ title: Deploy a Program --- -Developers can deploy on-chain [programs](terminology.md#program) (often called -smart contracts elsewhere) with the Solana tools. +Developers can deploy on-chain +[programs](https://solana.com/docs/terminology#program) (often called smart +contracts elsewhere) with the Solana tools. To learn about developing and executing programs on Solana, start with the -[intro to Solana programs](developing/intro/programs.md) and then dig into the -details of [on-chain programs](developing/on-chain-programs/overview.md). +[intro to Solana programs](https://solana.com/docs/core/programs) and then dig +into the details of +[developing on-chain programs](https://solana.com/docs/programs). To deploy a program, use the Solana tools to interact with the on-chain loader to: - Initialize a program account -- Upload the program's shared object to the program account's data buffer -- Verify the uploaded program +- Upload the program's shared object (the program binary `.so`) to the program + account's data buffer +- (optional) Verify the uploaded program - Finalize the program by marking the program account executable. Once deployed, anyone can execute the program by sending transactions that @@ -25,7 +28,7 @@ reference it to the cluster. ### Deploy a program To deploy a program, you will need the location of the program's shared object -(the program binary .so) +(the program binary `.so`): ```bash solana program deploy @@ -89,8 +92,9 @@ Data Length: 5216 (0x1460) bytes ### Redeploy a program A program can be redeployed to the same address to facilitate rapid development, -bug fixes, or upgrades. Matching keypair files are generated once so that -redeployments will be to the same program address. +bug fixes, or upgrades. If a program id is not provided, the program will be +deployed to the default address at `-keypair.json`. This default +keypair is generated during the first program compilation. The command looks the same as the deployment command: @@ -100,26 +104,21 @@ solana program deploy By default, programs are deployed to accounts that are twice the size of the original deployment. Doing so leaves room for program growth in future -redeployments. But, if the initially deployed program is very small and then -later grows substantially, the redeployment may fail. To avoid this, specify -a `max_len` that is at least the size (in bytes) that the program is expected -to become (plus some wiggle room). +redeployments. But, if the initially deployed program is very small and then +later grows substantially, the redeployment may fail. To avoid this, specify a +`max_len` that is at least the size (in bytes) that the program is expected to +become (plus some wiggle room). ```bash solana program deploy --max-len 200000 ``` -Note that program accounts are required to be -[rent-exempt](developing/programming-model/accounts.md#rent-exemption), and the -`max-len` is fixed after initial deployment, so any SOL in the program accounts -is locked up permanently. - ### Resuming a failed deploy If program deployment fails, there will be a hanging intermediate buffer account -that contains a non-zero balance. In order to recoup that balance you may -resume a failed deployment by providing the same intermediate buffer to a new -call to `deploy`. +that contains a non-zero balance. In order to recoup that balance you may resume +a failed deployment by providing the same intermediate buffer to a new call to +`deploy`. Deployment failures will print an error message specifying the seed phrase needed to recover the generated intermediate buffer's keypair: @@ -157,9 +156,9 @@ solana program deploy --buffer Both program and buffer accounts can be closed and their lamport balances transferred to a recipient's account. -If deployment fails there will be a left over buffer account that holds -lamports. The buffer account can either be used to [resume a -deploy](#resuming-a-failed-deploy) or closed. +If deployment fails there will be a left-over buffer account that holds +lamports. The buffer account can either be used to +[resume a deploy](#resuming-a-failed-deploy) or closed. The program or buffer account's authority must be present to close an account, to list all the open program or buffer accounts that match the default @@ -173,8 +172,8 @@ solana program show --buffers To specify a different authority: ```bash -solana program show --programs --buffer-authority -solana program show --buffers --buffer-authority +solana program show --programs --buffer-authority +solana program show --buffers --buffer-authority ``` To close a single account: @@ -209,7 +208,7 @@ solana program show --buffers --all ### Set a program's upgrade authority -The program's upgrade authority must to be present to deploy a program. If no +The program's upgrade authority must be present to deploy a program. If no authority is specified during program deployment, the default keypair is used as the authority. This is why redeploying a program in the steps above didn't require an authority to be explicitly specified. @@ -232,6 +231,12 @@ Or after deployment and specifying the current authority: solana program set-upgrade-authority --upgrade-authority --new-upgrade-authority ``` +By default, `set-upgrade-authority` requires a signature from the new authority. +This behavior prevents a developer from giving upgrade authority to a key that +they do not have access to. The `--skip-new-upgrade-authority-signer-check` +option relaxes the signer check. This can be useful for situations where the new +upgrade authority is an offline signer or a multisig. + ### Immutable programs A program can be marked immutable, which prevents all further redeployments, by @@ -256,12 +261,12 @@ solana program dump ``` The dumped file will be in the same as what was deployed, so in the case of a -shared object, the dumped file will be a fully functional shared object. Note -that the `dump` command dumps the entire data space, which means the output file -will have trailing zeros after the shared object's data up to `max_len`. -Sometimes it is useful to dump and compare a program to ensure it matches a -known program binary. The original program file can be zero-extended, hashed, -and compared to the hash of the dumped file. +shared object (the program binary `.so`), the dumped file will be a fully +functional shared object. Note that the `dump` command dumps the entire data +space, which means the output file will have trailing zeros after the shared +object's data up to `max_len`. Sometimes it is useful to dump and compare a +program to ensure it matches a known program binary. The dumped file can be +zero-truncated, hashed, and compared to the hash of the original program file. ```bash $ solana dump dump.so @@ -273,22 +278,33 @@ $ sha256sum extended.so dump.so ### Using an intermediary Buffer account Instead of deploying directly to the program account, the program can be written -to an intermediary buffer account. Intermediary accounts can be useful for things -like multi-entity governed programs where the governing members fist verify the -intermediary buffer contents and then vote to allow an upgrade using it. +to an intermediary buffer account. Intermediary accounts can be useful for +things like multi-entity governed programs where the governing members first +verify the intermediary buffer contents and then vote to allow an upgrade using +it. ```bash solana program write-buffer ``` -Buffer accounts support authorities like program accounts: +Buffer accounts are managed by an authority. To create a buffer and specify a different +authority than the default: + +```bash +solana program write-buffer --buffer-authority +``` + +Only the buffer authority may write to the buffer, so the `--buffer-authority` above must be a +**signer**, and not an address. This requirement limits usage with offline signers. +To use an offline address as a buffer authority, the buffer account must be initialized and +written with an online keypair, and then the buffer authority must be assigned using +`solana program set-buffer-authority`: ```bash solana program set-buffer-authority --new-buffer-authority ``` -One exception is that buffer accounts cannot be marked immutable like program -accounts can, so they don't support `--final`. +Unlike program accounts buffer accounts cannot be marked immutable, so they don't support the `--final` option. The buffer account, once entirely written, can be passed to `deploy` to deploy the program: @@ -297,6 +313,58 @@ the program: solana program deploy --program-id --buffer ``` -Note, the buffer's authority must match the program's upgrade authority. +Note, the buffer's authority must match the program's upgrade authority. During +deployment, the buffer account's contents are copied into the program-data +account and the buffer account is set to zero. The lamports from the buffer +account are refunded to a spill account. Buffers also support `show` and `dump` just like programs do. + +### Upgrading program using offline signer as authority + +Some security models require separating the signing process from the transaction broadcast, such that the signing keys can be completely disconnected from any network, also known as [offline signing](offline-signing.md). + +This section describes how a program developer can use offline signing to upgrade their program, unlike the [previous section](deploy-a-program.md#redeploy-a-program), which assumes the machine is connected to the internet, aka online signing. + +Note that only the `upgrade` command can be performed in offline mode. The initial program deployment **must** be performed from an online machine, and only subsequent program upgrades can leverage offline signing. + +Assuming your program has been deployed and its upgrade authority has been changed to an +offline signer, +a typical setup would consist of 2 different signers: +- online signer (fee payer for uploading program buffer and upgrading program) +- offline signer (program upgrade authority) + +The general process is as follows: +1. (online) create buffer and write new program to it +2. (online) set buffer authority to offline signer +3. (optional, online) verify the buffer's on-chain contents +4. (offline) sign a transaction to upgrade the program +5. (online) use this signature to broadcast the upgrade transaction + +```bash +# (1) (use online machine) create buffer +solana program write-buffer + +# (2) (use online machine) set buffer authority to offline signer +solana program set-buffer-authority --new-buffer-authority +``` + +(3) (optional) You may verify that the uploaded program matches the built binary. See +[dumping a program to a file](deploy-a-program.md#dumping-a-program-to-a-file) for more information and details. + +```bash +# (4) (use offline machine) get a signature for your intent to upgrade program +solana program upgrade --sign-only --fee-payer --upgrade-authority --blockhash + +# (5) (use online machine) use this signature to build and broadcast the upgrade transaction on-chain +solana program upgrade --fee-payer --upgrade-authority --blockhash --signer : +``` +Note: +- typically, the output of the previous command(s) will contain some values useful in subsequent commands, e.g. + `--program-id`, `--buffer`, `--signer` +- you need to specify matching (or corresponding) values for params with same names (`--fee-payer`, `--program-id`, + `--upgrade-authority`, `--buffer`, `--blockhash`) in offline/online modes +- you should pre-fill every value except for `blockhash` ahead of time, and once you are ready to act - you'll need to + look up a recent `blockhash` and paste it in to generate the offline transaction signature. The `blockhash` expires + after ~60 seconds. If you didn't make it in time - just get another fresh hash and repeat until you succeed, or + consider using [durable transaction nonces](durable-nonce.md). diff --git a/docs/src/offline-signing/durable-nonce.md b/docs/src/cli/examples/durable-nonce.md similarity index 79% rename from docs/src/offline-signing/durable-nonce.md rename to docs/src/cli/examples/durable-nonce.md index be412b965e1aba..7f0199b8d44fc5 100644 --- a/docs/src/offline-signing/durable-nonce.md +++ b/docs/src/cli/examples/durable-nonce.md @@ -2,15 +2,16 @@ title: Durable Transaction Nonces --- -Durable transaction nonces are a mechanism for getting around the typical -short lifetime of a transaction's [`recent_blockhash`](developing/programming-model/transactions.md#recent-blockhash). +Durable transaction nonces are a mechanism for getting around the typical short +lifetime of a transaction's +[`recent_blockhash`](https://solana.com/docs/core/transactions#recent-blockhash). They are implemented as a Solana Program, the mechanics of which can be read -about in the [proposal](../implemented-proposals/durable-tx-nonces.md). +about in the [proposal](../../implemented-proposals/durable-tx-nonces.md). ## Usage Examples Full usage details for durable nonce CLI commands can be found in the -[CLI reference](../cli/usage.md). +[CLI reference](../usage.md). ### Nonce Authority @@ -18,9 +19,9 @@ Authority over a nonce account can optionally be assigned to another account. In doing so the new authority inherits full control over the nonce account from the previous authority, including the account creator. This feature enables the creation of more complex account ownership arrangements and derived account -addresses not associated with a keypair. The `--nonce-authority ` -argument is used to specify this account and is supported by the following -commands +addresses not associated with a keypair. The +`--nonce-authority ` argument is used to specify this account +and is supported by the following commands - `create-nonce-account` - `new-nonce` @@ -30,10 +31,12 @@ commands ### Nonce Account Creation The durable transaction nonce feature uses an account to store the next nonce -value. Durable nonce accounts must be [rent-exempt](../implemented-proposals/rent.md#two-tiered-rent-regime), -so need to carry the minimum balance to achieve this. +value. Durable nonce accounts must be +[rent-exempt](../../implemented-proposals/rent.md#two-tiered-rent-regime), so need +to carry the minimum balance to achieve this. -A nonce account is created by first generating a new keypair, then create the account on chain +A nonce account is created by first generating a new keypair, then create the +account on chain - Command @@ -48,9 +51,11 @@ solana create-nonce-account nonce-keypair.json 1 2SymGjGV4ksPdpbaqWFiDoBz8okvtiik4KE9cnMQgRHrRLySSdZ6jrEcpPifW4xUpp4z66XM9d9wM48sA7peG2XL ``` -> To keep the keypair entirely offline, use the [Paper Wallet](wallet-guide/paper-wallet.md) keypair generation [instructions](wallet-guide/paper-wallet.md#seed-phrase-generation) instead +> To keep the keypair entirely offline, use the +> [Paper Wallet](../wallets/paper.md) keypair generation +> [instructions](../wallets/paper.md#seed-phrase-generation) instead -> [Full usage documentation](../cli/usage.md#solana-create-nonce-account) +> [Full usage documentation](../usage.md#solana-create-nonce-account) ### Querying the Stored Nonce Value @@ -70,7 +75,7 @@ solana nonce nonce-keypair.json 8GRipryfxcsxN8mAGjy8zbFo9ezaUsh47TsPzmZbuytU ``` -> [Full usage documentation](../cli/usage.md#solana-get-nonce) +> [Full usage documentation](../usage.md#solana-get-nonce) ### Advancing the Stored Nonce Value @@ -89,7 +94,7 @@ solana new-nonce nonce-keypair.json 44jYe1yPKrjuYDmoFTdgPjg8LFpYyh1PFKJqm5SC1PiSyAL8iw1bhadcAX1SL7KDmREEkmHpYvreKoNv6fZgfvUK ``` -> [Full usage documentation](../cli/usage.md#solana-new-nonce) +> [Full usage documentation](../usage.md#solana-new-nonce) ### Display Nonce Account @@ -109,7 +114,7 @@ minimum balance required: 0.00136416 SOL nonce: DZar6t2EaCFQTbUP4DHKwZ1wT8gCPW2aRfkVWhydkBvS ``` -> [Full usage documentation](../cli/usage.md#solana-nonce-account) +> [Full usage documentation](../usage.md#solana-nonce-account) ### Withdraw Funds from a Nonce Account @@ -129,7 +134,7 @@ solana withdraw-from-nonce-account nonce-keypair.json ~/.config/solana/id.json 0 > Close a nonce account by withdrawing the full balance -> [Full usage documentation](../cli/usage.md#solana-withdraw-from-nonce-account) +> [Full usage documentation](../usage.md#solana-withdraw-from-nonce-account) ### Assign a New Authority to a Nonce Account @@ -147,7 +152,7 @@ solana authorize-nonce-account nonce-keypair.json nonce-authority.json 3F9cg4zN9wHxLGx4c3cUKmqpej4oa67QbALmChsJbfxTgTffRiL3iUehVhR9wQmWgPua66jPuAYeL1K2pYYjbNoT ``` -> [Full usage documentation](../cli/usage.md#solana-authorize-nonce-account) +> [Full usage documentation](../usage.md#solana-authorize-nonce-account) ## Other Commands Supporting Durable Nonces @@ -159,9 +164,9 @@ supported. The following subcommands have received this treatment so far -- [`pay`](../cli/usage.md#solana-pay) -- [`delegate-stake`](../cli/usage.md#solana-delegate-stake) -- [`deactivate-stake`](../cli/usage.md#solana-deactivate-stake) +- [`pay`](../usage.md#solana-pay) +- [`delegate-stake`](../usage.md#solana-delegate-stake) +- [`deactivate-stake`](../usage.md#solana-deactivate-stake) ### Example Pay Using Durable Nonce @@ -192,7 +197,8 @@ $ solana airdrop -k alice.json 1 Now Alice needs a nonce account. Create one -> Here, no separate [nonce authority](#nonce-authority) is employed, so `alice.json` has full authority over the nonce account +> Here, no separate [nonce authority](#nonce-authority) is employed, so +> `alice.json` has full authority over the nonce account ```bash $ solana create-nonce-account -k alice.json nonce.json 0.1 @@ -215,7 +221,8 @@ Error: Io(Custom { kind: Other, error: "Transaction \"33gQQaoPc9jWePMvDAeyJpcnSP Alice retries the transaction, this time specifying her nonce account and the blockhash stored there -> Remember, `alice.json` is the [nonce authority](#nonce-authority) in this example +> Remember, `alice.json` is the [nonce authority](#nonce-authority) in this +> example ```bash $ solana nonce-account nonce.json diff --git a/docs/src/offline-signing.md b/docs/src/cli/examples/offline-signing.md similarity index 77% rename from docs/src/offline-signing.md rename to docs/src/cli/examples/offline-signing.md index 5062f0b6a041d6..8b9312853a9a11 100644 --- a/docs/src/offline-signing.md +++ b/docs/src/cli/examples/offline-signing.md @@ -8,7 +8,7 @@ include: - Collecting signatures from geographically disparate signers in a [multi-signature scheme](https://spl.solana.com/token#multisig-usage) -- Signing transactions using an [airgapped]() +- Signing transactions using an [air-gapped]() signing device This document describes using Solana's CLI to separately sign and submit a @@ -18,26 +18,26 @@ transaction. At present, the following commands support offline signing: -- [`create-stake-account`](cli/usage.md#solana-create-stake-account) -- [`create-stake-account-checked`](cli/usage.md#solana-create-stake-account-checked) -- [`deactivate-stake`](cli/usage.md#solana-deactivate-stake) -- [`delegate-stake`](cli/usage.md#solana-delegate-stake) -- [`split-stake`](cli/usage.md#solana-split-stake) -- [`stake-authorize`](cli/usage.md#solana-stake-authorize) -- [`stake-authorize-checked`](cli/usage.md#solana-stake-authorize-checked) -- [`stake-set-lockup`](cli/usage.md#solana-stake-set-lockup) -- [`stake-set-lockup-checked`](cli/usage.md#solana-stake-set-lockup-checked) -- [`transfer`](cli/usage.md#solana-transfer) -- [`withdraw-stake`](cli/usage.md#solana-withdraw-stake) - -- [`create-vote-account`](cli/usage.md#solana-create-vote-account) -- [`vote-authorize-voter`](cli/usage.md#solana-vote-authorize-voter) -- [`vote-authorize-voter-checked`](cli/usage.md#solana-vote-authorize-voter-checked) -- [`vote-authorize-withdrawer`](cli/usage.md#solana-vote-authorize-withdrawer) -- [`vote-authorize-withdrawer-checked`](cli/usage.md#solana-vote-authorize-withdrawer-checked) -- [`vote-update-commission`](cli/usage.md#solana-vote-update-commission) -- [`vote-update-validator`](cli/usage.md#solana-vote-update-validator) -- [`withdraw-from-vote-account`](cli/usage.md#solana-withdraw-from-vote-account) +- [`create-stake-account`](../usage.md#solana-create-stake-account) +- [`create-stake-account-checked`](../usage.md#solana-create-stake-account-checked) +- [`deactivate-stake`](../usage.md#solana-deactivate-stake) +- [`delegate-stake`](../usage.md#solana-delegate-stake) +- [`split-stake`](../usage.md#solana-split-stake) +- [`stake-authorize`](../usage.md#solana-stake-authorize) +- [`stake-authorize-checked`](../usage.md#solana-stake-authorize-checked) +- [`stake-set-lockup`](../usage.md#solana-stake-set-lockup) +- [`stake-set-lockup-checked`](../usage.md#solana-stake-set-lockup-checked) +- [`transfer`](../usage.md#solana-transfer) +- [`withdraw-stake`](../usage.md#solana-withdraw-stake) + +- [`create-vote-account`](../usage.md#solana-create-vote-account) +- [`vote-authorize-voter`](../usage.md#solana-vote-authorize-voter) +- [`vote-authorize-voter-checked`](../usage.md#solana-vote-authorize-voter-checked) +- [`vote-authorize-withdrawer`](../usage.md#solana-vote-authorize-withdrawer) +- [`vote-authorize-withdrawer-checked`](../usage.md#solana-vote-authorize-withdrawer-checked) +- [`vote-update-commission`](../usage.md#solana-vote-update-commission) +- [`vote-update-validator`](../usage.md#solana-vote-update-validator) +- [`withdraw-from-vote-account`](../usage.md#solana-withdraw-from-vote-account) ## Signing Transactions Offline @@ -170,5 +170,5 @@ ohGKvpRC46jAduwU9NW8tP91JkCT5r8Mo67Ysnid4zc76tiiV1Ho6jv3BKFSbBcr2NcPPCarmfTLSkTH Typically a Solana transaction must be signed and accepted by the network within a number of slots from the blockhash in its `recent_blockhash` field (~1min at the time of this writing). If your signing procedure takes longer than this, a -[Durable Transaction Nonce](offline-signing/durable-nonce.md) can give you the extra time you +[Durable Transaction Nonce](./durable-nonce.md) can give you the extra time you need. diff --git a/docs/src/cli/sign-offchain-message.md b/docs/src/cli/examples/sign-offchain-message.md similarity index 94% rename from docs/src/cli/sign-offchain-message.md rename to docs/src/cli/examples/sign-offchain-message.md index cc256281722647..ae14119f7b91b9 100644 --- a/docs/src/cli/sign-offchain-message.md +++ b/docs/src/cli/examples/sign-offchain-message.md @@ -57,7 +57,7 @@ solana sign-offchain-message -k usb://ledger ``` For more information on how to setup and work with the ledger device see this -[link](../wallet-guide/hardware-wallets/ledger.md). +[link](../wallets/hardware/ledger.md). Please note that UTF-8 encoded messages require `Allow blind sign` option enabled in Solana Ledger App. Also, due to the lack of UTF-8 support in Ledger @@ -95,4 +95,4 @@ with a fixed prefix: `\xffsolana offchain`, where first byte is chosen such that it is implicitly illegal as the first byte in a transaction `MessageHeader` today. More details about the payload format and other considerations are available in the -[proposal](https://github.com/solana-labs/solana/blob/e80f67dd58b7fa3901168055211f346164efa43a/docs/src/proposals/off-chain-message-signing.md). +[proposal](https://github.com/solana-labs/solana/blob/master/docs/src/proposals/off-chain-message-signing.md). diff --git a/docs/src/developing/test-validator.md b/docs/src/cli/examples/test-validator.md similarity index 85% rename from docs/src/developing/test-validator.md rename to docs/src/cli/examples/test-validator.md index 5d6c3b0727b6ec..70f050c77f5663 100644 --- a/docs/src/developing/test-validator.md +++ b/docs/src/cli/examples/test-validator.md @@ -1,5 +1,6 @@ --- title: Solana Test Validator +sidebar_label: Test Validator --- During early stage development, it is often convenient to target a cluster with @@ -11,7 +12,7 @@ starts a full-featured, single-node cluster on the developer's workstation. - No RPC rate-limits - No airdrop limits -- Direct [on-chain program](on-chain-programs/overview) deployment +- Direct [on-chain program](https://solana.com/docs/programs) deployment (`--bpf-program ...`) - Clone accounts from a public cluster, including programs (`--clone ...`) - Load accounts from files @@ -22,7 +23,7 @@ starts a full-featured, single-node cluster on the developer's workstation. ## Installation The `solana-test-validator` binary ships with the Solana CLI Tool Suite. -[Install](/cli/install-solana-cli-tools) before continuing. +[Install](../install.md) before continuing. ## Running @@ -125,7 +126,7 @@ Log: test-ledger/validator.log Identity: EPhgPANa5Rh2wa4V2jxt7YbtWa3Uyw4sTeZ13cQjDDB8 ``` -- The validator's identity in the [gossip network](/validator/gossip#gossip-overview) +- The validator's identity in the [gossip network](../../validator/gossip.md#gossip-overview) ``` Version: 1.6.7 @@ -139,8 +140,8 @@ TPU Address: 127.0.0.1:1027 JSON RPC URL: http://127.0.0.1:8899 ``` -- The network address of the [Gossip](/validator/gossip#gossip-overview), - [Transaction Processing Unit](/validator/tpu) and [JSON RPC](../api/http#json-rpc-api-reference) +- The network address of the [Gossip](../../validator/gossip.md#gossip-overview), + [Transaction Processing Unit](../../validator/tpu.md) and [JSON RPC](https://solana.com/docs/rpc) service, respectively ``` @@ -148,15 +149,15 @@ JSON RPC URL: http://127.0.0.1:8899 ``` - Session running time, current slot of the three block - [commitment levels](../api/http#configuring-state-commitment), + [commitment levels](https://solana.com/docs/rpc#configuring-state-commitment), slot height of the last snapshot, transaction count, - [voting authority](/running-validator/vote-accounts#vote-authority) balance + [voting authority](../../operations/guides/vote-accounts.md#vote-authority) balance ## Appendix II: Runtime Features -By default, the test validator runs with all [runtime features](programming-model/runtime#features) activated. +By default, the test validator runs with all [runtime features](https://solana.com/docs/core/runtime#features) activated. -You can verify this using the [Solana command-line tools](cli/install-solana-cli-tools.md): +You can verify this using the [Solana command-line tools](../install.md): ```bash solana feature status -ul diff --git a/docs/src/cli/transfer-tokens.md b/docs/src/cli/examples/transfer-tokens.md similarity index 92% rename from docs/src/cli/transfer-tokens.md rename to docs/src/cli/examples/transfer-tokens.md index 187486ab88673b..89374ebf43864a 100644 --- a/docs/src/cli/transfer-tokens.md +++ b/docs/src/cli/examples/transfer-tokens.md @@ -3,12 +3,12 @@ title: Send and Receive Tokens --- This page describes how to receive and send SOL tokens using the command line -tools with a command line wallet such as a [paper wallet](../wallet-guide/paper-wallet.md), -a [file system wallet](../wallet-guide/file-system-wallet.md), or a -[hardware wallet](../wallet-guide/hardware-wallets.md). Before you begin, make sure +tools with a command line wallet such as a [paper wallet](../wallets/paper.md), +a [file system wallet](../wallets/file-system.md), or a +[hardware wallet](../wallets/hardware/index.md). Before you begin, make sure you have created a wallet and have access to its address (pubkey) and the signing keypair. Check out our -[conventions for entering keypairs for different wallet types](../cli/conventions.md#keypair-conventions). +[conventions for entering keypairs for different wallet types](../intro.md#keypair-conventions). ## Testing your Wallet @@ -67,9 +67,9 @@ pubkey: GKvqsuNcnwWqPzzuhLmGi4rzzh55FhJtGizkhHaEJqiV ``` You can also create a second (or more) wallet of any type: -[paper](../wallet-guide/paper-wallet#creating-multiple-paper-wallet-addresses), -[file system](../wallet-guide/file-system-wallet.md#creating-multiple-file-system-wallet-addresses), -or [hardware](../wallet-guide/hardware-wallets.md#multiple-addresses-on-a-single-hardware-wallet). +[paper](../wallets/paper.md#creating-multiple-paper-wallet-addresses), +[file system](../wallets/file-system.md#creating-multiple-file-system-wallet-addresses), +or [hardware](../wallets/hardware/index.md#multiple-addresses-on-a-single-hardware-wallet). #### Transfer tokens from your first wallet to the second address @@ -143,7 +143,7 @@ To receive tokens, you will need an address for others to send tokens to. In Solana, the wallet address is the public key of a keypair. There are a variety of techniques for generating keypairs. The method you choose will depend on how you choose to store keypairs. Keypairs are stored in wallets. Before receiving -tokens, you will need to [create a wallet](../wallet-guide/cli.md). +tokens, you will need to [create a wallet](../wallets/index.md). Once completed, you should have a public key for each keypair you generated. The public key is a long string of base58 characters. Its length varies from 32 to 44 characters. diff --git a/docs/src/cli.md b/docs/src/cli/index.md similarity index 65% rename from docs/src/cli.md rename to docs/src/cli/index.md index a4461c6d76e36a..cdf1ed10f83d76 100644 --- a/docs/src/cli.md +++ b/docs/src/cli/index.md @@ -1,10 +1,12 @@ --- -title: Command-line Guide +title: Solana CLI Tool Suite +sidebar_label: Overview +sidebar_position: 0 --- In this section, we will describe how to use the Solana command-line tools to -create a _wallet_, to send and receive SOL tokens, and to participate in -the cluster by delegating stake. +create a _wallet_, to send and receive SOL tokens, and to participate in the +cluster by delegating stake. To interact with a Solana cluster, we will use its command-line interface, also known as the CLI. We use the command-line because it is the first place the @@ -16,7 +18,7 @@ secure access to your Solana accounts. To get started using the Solana Command Line (CLI) tools: -- [Install the Solana Tools](cli/install-solana-cli-tools.md) -- [Choose a Cluster](cli/choose-a-cluster.md) -- [Create a Wallet](wallet-guide/cli.md) -- [Check out our CLI conventions](cli/conventions.md) +- [Install the Solana CLI Tool Suite](./install.md) +- [Choose a Cluster](./examples/choose-a-cluster.md) +- [Create a Wallet](./wallets/index.md) +- [Introduction to our CLI conventions](./intro.md) diff --git a/docs/src/cli/install-solana-cli-tools.md b/docs/src/cli/install.md similarity index 59% rename from docs/src/cli/install-solana-cli-tools.md rename to docs/src/cli/install.md index 26a4ede5af62b7..7773631dda59d3 100644 --- a/docs/src/cli/install-solana-cli-tools.md +++ b/docs/src/cli/install.md @@ -1,9 +1,11 @@ --- -title: Install the Solana Tool Suite +title: Install the Solana CLI +sidebar_label: Installation +sidebar_position: 1 --- -There are multiple ways to install the Solana tools on your computer -depending on your preferred workflow: +There are multiple ways to install the Solana tools on your computer depending +on your preferred workflow: - [Use Solana's Install Tool (Simplest option)](#use-solanas-install-tool) - [Download Prebuilt Binaries](#download-prebuilt-binaries) @@ -17,8 +19,8 @@ depending on your preferred workflow: - Open your favorite Terminal application - Install the Solana release - [LATEST_SOLANA_RELEASE_VERSION](https://github.com/solana-labs/solana/releases/tag/LATEST_SOLANA_RELEASE_VERSION) on your - machine by running: + [LATEST_SOLANA_RELEASE_VERSION](https://github.com/solana-labs/solana/releases/tag/LATEST_SOLANA_RELEASE_VERSION) + on your machine by running: ```bash sh -c "$(curl -sSfL https://release.solana.com/LATEST_SOLANA_RELEASE_VERSION/install)" @@ -39,15 +41,14 @@ Active release directory: /home/solana/.local/share/solana/install/active_releas Update successful ``` -- Depending on your system, the end of the installer messaging may prompt you - to +- Depending on your system, the end of the installer messaging may prompt you to ```bash Please update your PATH environment variable to include the solana programs: ``` -- If you get the above message, copy and paste the recommended command below - it to update `PATH` +- If you get the above message, copy and paste the recommended command below it + to update `PATH` - Confirm you have the desired version of `solana` installed by running: ```bash @@ -63,10 +64,10 @@ solana --version - Open a Command Prompt (`cmd.exe`) as an Administrator - - Search for Command Prompt in the Windows search bar. When the Command - Prompt app appears, right-click and select “Open as Administrator”. - If you are prompted by a pop-up window asking “Do you want to allow this app to - make changes to your device?”, click Yes. + - Search for Command Prompt in the Windows search bar. When the Command Prompt + app appears, right-click and select “Open as Administrator”. If you are + prompted by a pop-up window asking “Do you want to allow this app to make + changes to your device?”, click Yes. - Copy and paste the following command, then press Enter to download the Solana installer into a temporary directory: @@ -147,8 +148,100 @@ set PATH=%cd%/bin;%PATH% ## Build From Source If you are unable to use the prebuilt binaries or prefer to build it yourself -from source, navigate to -[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), +from source, follow these steps, ensuring you have the necessary prerequisites +installed on your system. + +### Prerequisites + +Before building from source, make sure to install the following prerequisites: + +#### For Debian and Other Linux Distributions: + +Rust Programming Language: Check "Install Rust" at +[https://www.rust-lang.org/tools/install](https://www.rust-lang.org/tools/install), +which recommends the following command. + +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` + +Install build dependencies: + +- Build essential +- Package config +- Udev & LLM & libclang +- Protocol buffers + +```bash +apt-get install \ + build-essential \ + pkg-config \ + libudev-dev llvm libclang-dev \ + protobuf-compiler +``` + +#### For Other Linux Distributions: + +Replace `apt` with your distribution's package manager (e.g., `yum`, `dnf`, +`pacman`) and adjust package names as needed. + +#### For macOS: + +Install Homebrew (if not already installed), check "Install Hombrew" at +[https://brew.sh/](https://brew.sh/), which recommends the following command: + +```bash +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +``` + +Install the necessary tools and libraries using Homebrew: + +```bash +brew install rust pkg-config libudev protobuf llvm coreutils +``` + +Follow the instructions given at the end of the brew install command about +`PATH` configurations. + +#### For Windows: + +Rust Programming Language: Check "Install Rust" at +[https://www.rust-lang.org/tools/install](https://www.rust-lang.org/tools/install), +which recommends the following command. + +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` + +- Download and install the Build Tools for Visual Studio (2019 or later) from + the + [Visual Studio downloads page](https://visualstudio.microsoft.com/downloads/). + Make sure to include the C++ build tools in the installation. +- Install LLVM: Download and install LLVM from the + [official LLVM download page](https://releases.llvm.org/download.html). +- Install Protocol Buffers Compiler (protoc): Download `protoc` from the + [GitHub releases page of Protocol Buffers](https://github.com/protocolbuffers/protobuf/releases), + and add it to your `PATH`. + +:::info + +Users on Windows 10 or 11 may need to install +[Windows Subsystem for Linux](https://learn.microsoft.com/en-us/windows/wsl/install) +(WSL) in order to be able to build from source. WSL provides a Linux environment +that runs inside your existing Windows installation. You can then run regular +Linux software, including the Linux versions of Solana CLI. + +After installed, run `wsl` from your Windows terminal, then continue through the +[Debian and Other Linux Distributions](#for-debian-and-other-linux-distributions) +above. + +::: + +### Building from Source + +After installing the prerequisites, proceed with building Solana from source, +navigate to +[Solana's GitHub releases page](https://github.com/solana-labs/solana/releases/latest), and download the **Source Code** archive. Extract the code and build the binaries with: @@ -166,15 +259,16 @@ solana-install init ## Use Homebrew -This option requires you to have [Homebrew](https://brew.sh/) package manager on your MacOS or Linux machine. +This option requires you to have [Homebrew](https://brew.sh/) package manager on +your MacOS or Linux machine. ### MacOS & Linux - Follow instructions at: https://formulae.brew.sh/formula/solana [Homebrew formulae](https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/solana.rb) -is updated after each `solana` release, however it is possible that -the Homebrew version is outdated. +is updated after each `solana` release, however it is possible that the Homebrew +version is outdated. - Confirm you have the desired version of `solana` installed by entering: diff --git a/docs/src/cli/conventions.md b/docs/src/cli/intro.md similarity index 91% rename from docs/src/cli/conventions.md rename to docs/src/cli/intro.md index 7f97132ba57670..1701450173e39a 100644 --- a/docs/src/cli/conventions.md +++ b/docs/src/cli/intro.md @@ -1,5 +1,7 @@ --- -title: Using Solana CLI +title: Introduction to the Solana CLI +sidebar_label: Introduction +sidebar_position: 2 --- Before running any Solana CLI commands, let's go over some conventions that @@ -31,7 +33,7 @@ the base58 encoding of your public key, such as Many commands using the CLI tools require a value for a ``. The value you should use for the keypair depends on what type of -[command line wallet you created](../wallet-guide/cli.md). +[command line wallet you created](./wallets/index.md). For example, the CLI help shows that the way to display any wallet's address (also known as the keypair's pubkey), is: @@ -72,7 +74,7 @@ solana-keygen pubkey /home/solana/my_wallet.json #### Hardware Wallet If you chose a hardware wallet, use your -[keypair URL](../wallet-guide/hardware-wallets.md#specify-a-hardware-wallet-key), +[keypair URL](./wallets/hardware/index.md#specify-a-hardware-wallet-key), such as `usb://ledger?key=0`. ```bash diff --git a/docs/src/cli/wallets/_category_.json b/docs/src/cli/wallets/_category_.json new file mode 100644 index 00000000000000..e420a33bb4a11f --- /dev/null +++ b/docs/src/cli/wallets/_category_.json @@ -0,0 +1,7 @@ +{ + "position": 3.5, + "label": "Command-line Wallets", + "collapsible": true, + "collapsed": false, + "link": null +} diff --git a/docs/src/wallet-guide/file-system-wallet.md b/docs/src/cli/wallets/file-system.md similarity index 95% rename from docs/src/wallet-guide/file-system-wallet.md rename to docs/src/cli/wallets/file-system.md index 90fb5c29269f1d..0041c51876b490 100644 --- a/docs/src/wallet-guide/file-system-wallet.md +++ b/docs/src/cli/wallets/file-system.md @@ -1,5 +1,6 @@ --- -title: File System Wallet +title: File System Wallets +sidebar_position: 2 --- This document describes how to create and use a file system wallet with the @@ -11,7 +12,7 @@ on your computer system's filesystem. ## Before you Begin Make sure you have -[installed the Solana Command Line Tools](../cli/install-solana-cli-tools.md) +[installed the Solana Command Line Tools](../install.md) ## Generate a File System Wallet Keypair diff --git a/docs/src/cli/wallets/hardware/_category_.json b/docs/src/cli/wallets/hardware/_category_.json new file mode 100644 index 00000000000000..d000fb2312b1f0 --- /dev/null +++ b/docs/src/cli/wallets/hardware/_category_.json @@ -0,0 +1,7 @@ +{ + "position": 5, + "label": "Hardware Wallets", + "collapsible": false, + "collapsed": false, + "link": null +} diff --git a/docs/src/wallet-guide/hardware-wallets.md b/docs/src/cli/wallets/hardware/index.md similarity index 92% rename from docs/src/wallet-guide/hardware-wallets.md rename to docs/src/cli/wallets/hardware/index.md index 01d98b0318f8e0..9c8642cf34c6a1 100644 --- a/docs/src/wallet-guide/hardware-wallets.md +++ b/docs/src/cli/wallets/hardware/index.md @@ -1,5 +1,7 @@ --- -title: Using Hardware Wallets on the Solana CLI +title: Using Hardware Wallets in the Solana CLI +sidebar_label: Using in the Solana CLI +sidebar_position: 0 --- Signing a transaction requires a private key, but storing a private @@ -19,7 +21,7 @@ hardware wallet. The Solana CLI supports the following hardware wallets: -- [Ledger Nano S and Ledger Nano X](hardware-wallets/ledger.md) +- [Ledger Nano S and Ledger Nano X](./ledger.md) ## Specify a Keypair URL diff --git a/docs/src/wallet-guide/hardware-wallets/ledger.md b/docs/src/cli/wallets/hardware/ledger.md similarity index 61% rename from docs/src/wallet-guide/hardware-wallets/ledger.md rename to docs/src/cli/wallets/hardware/ledger.md index bb3f0ebfbe92de..e0060aba803eb2 100644 --- a/docs/src/wallet-guide/hardware-wallets/ledger.md +++ b/docs/src/cli/wallets/hardware/ledger.md @@ -1,14 +1,15 @@ --- -title: Ledger Nano +title: Using Ledger Nano Hardware Wallets in the Solana CLI +sidebar_label: Ledger Nano --- -This page describes how to use a Ledger Nano S, Nano S Plus, or Nano X to interact with Solana -using the command line tools. +This page describes how to use a Ledger Nano S, Nano S Plus, or Nano X to +interact with Solana using the command line tools. ## Before You Begin - [Set up a Nano with the Solana App](https://support.ledger.com/hc/en-us/articles/360016265659-Solana-SOL-?docs=true) -- [Install the Solana command-line tools](../../cli/install-solana-cli-tools.md) +- [Install the Solana command-line tools](../../install.md) ## Use Ledger Nano with Solana CLI @@ -27,18 +28,18 @@ solana-keygen pubkey usb://ledger This confirms your Ledger device is connected properly and in the correct state to interact with the Solana CLI. The command returns your Ledger's unique -_wallet ID_. When you have multiple Nano devices connected to the same -computer, you can use your wallet ID to specify which Ledger hardware wallet -you want to use. If you only plan to use a single Nano on your computer -at a time, you don't need to include the wallet ID. For information on -using the wallet ID to use a specific Ledger, see +_wallet ID_. When you have multiple Nano devices connected to the same computer, +you can use your wallet ID to specify which Ledger hardware wallet you want to +use. If you only plan to use a single Nano on your computer at a time, you don't +need to include the wallet ID. For information on using the wallet ID to use a +specific Ledger, see [Manage Multiple Hardware Wallets](#manage-multiple-hardware-wallets). ### View your Wallet Addresses -Your Nano supports an arbitrary number of valid wallet addresses and signers. -To view any address, use the `solana-keygen pubkey` command, as shown below, -followed by a valid [keypair URL](../hardware-wallets.md#specify-a-keypair-url). +Your Nano supports an arbitrary number of valid wallet addresses and signers. To +view any address, use the `solana-keygen pubkey` command, as shown below, +followed by a valid [keypair URL](./index.md#specify-a-keypair-url). Multiple wallet addresses can be useful if you want to transfer tokens between your own accounts for different purposes, or use different keypairs on the @@ -57,12 +58,11 @@ solana-keygen pubkey usb://ledger?key=2 - NOTE: keypair url parameters are ignored in **zsh**  [see troubleshooting for more info](#troubleshooting) -You can use other values for the number after `key=` as well. -Any of the addresses displayed by these commands are valid Solana wallet -addresses. The private portion associated with each address is stored securely -on the Nano, and is used to sign transactions from this address. -Just make a note of which keypair URL you used to derive any address you will be -using to receive tokens. +You can use other values for the number after `key=` as well. Any of the +addresses displayed by these commands are valid Solana wallet addresses. The +private portion associated with each address is stored securely on the Nano, and +is used to sign transactions from this address. Just make a note of which +keypair URL you used to derive any address you will be using to receive tokens. If you are only planning to use a single address/keypair on your device, a good easy-to-remember path might be to use the address at `key=0`. View this address @@ -93,26 +93,26 @@ solana balance 7cvkjYAkUYs4W8XcXsca7cBrEGFeSUjeZmKoNBvEwyri ``` You can also view the balance of any account address on the Accounts tab in the -[Explorer](https://explorer.solana.com/accounts) -and paste the address in the box to view the balance in you web browser. +[Explorer](https://explorer.solana.com/accounts) and paste the address in the +box to view the balance in your web browser. Note: Any address with a balance of 0 SOL, such as a newly created one on your -Ledger, will show as "Not Found" in the explorer. Empty accounts and non-existent -accounts are treated the same in Solana. This will change when your account -address has some SOL in it. +Ledger, will show as "Not Found" in the explorer. Empty accounts and +non-existent accounts are treated the same in Solana. This will change when your +account address has some SOL in it. ### Send SOL from a Nano -To send some tokens from an address controlled by your Nano, you will -need to use the device to sign a transaction, using the same keypair URL you -used to derive the address. To do this, make sure your Nano is plugged in, -unlocked with the PIN, Ledger Live is not running, and the Solana App is open -on the device, showing "Application is Ready". +To send some tokens from an address controlled by your Nano, you will need to +use the device to sign a transaction, using the same keypair URL you used to +derive the address. To do this, make sure your Nano is plugged in, unlocked with +the PIN, Ledger Live is not running, and the Solana App is open on the device, +showing "Application is Ready". -The `solana transfer` command is used to specify to which address to send tokens, -how many tokens to send, and uses the `--keypair` argument to specify which -keypair is sending the tokens, which will sign the transaction, and the balance -from the associated address will decrease. +The `solana transfer` command is used to specify to which address to send +tokens, how many tokens to send, and uses the `--keypair` argument to specify +which keypair is sending the tokens, which will sign the transaction, and the +balance from the associated address will decrease. ```bash solana transfer RECIPIENT_ADDRESS AMOUNT --keypair KEYPAIR_URL_OF_SENDER @@ -120,12 +120,12 @@ solana transfer RECIPIENT_ADDRESS AMOUNT --keypair KEYPAIR_URL_OF_SENDER Below is a full example. First, an address is viewed at a certain keypair URL. Second, the balance of that address is checked. Lastly, a transfer transaction -is entered to send `1` SOL to the recipient address `7cvkjYAkUYs4W8XcXsca7cBrEGFeSUjeZmKoNBvEwyri`. -When you hit Enter for a transfer command, you will be prompted to approve the -transaction details on your Ledger device. On the device, use the right and -left buttons to review the transaction details. If they look correct, click -both buttons on the "Approve" screen, otherwise push both buttons on the "Reject" -screen. +is entered to send `1` SOL to the recipient address +`7cvkjYAkUYs4W8XcXsca7cBrEGFeSUjeZmKoNBvEwyri`. When you hit Enter for a +transfer command, you will be prompted to approve the transaction details on +your Ledger device. On the device, use the right and left buttons to review the +transaction details. If they look correct, click both buttons on the "Approve" +screen, otherwise push both buttons on the "Reject" screen. ```bash ~$ solana-keygen pubkey usb://ledger?key=42 @@ -146,8 +146,8 @@ transaction signature, and wait for the maximum number of confirmations (32) before returning. This only takes a few seconds, and then the transaction is finalized on the Solana network. You can view details of this or any other transaction by going to the Transaction tab in the -[Explorer](https://explorer.solana.com/transactions) -and paste in the transaction signature. +[Explorer](https://explorer.solana.com/transactions) and paste in the +transaction signature. ## Advanced Operations @@ -155,14 +155,13 @@ and paste in the transaction signature. It is sometimes useful to sign a transaction with keys from multiple hardware wallets. Signing with multiple wallets requires _fully qualified keypair URLs_. -When the URL is not fully qualified, the Solana CLI will prompt you with -the fully qualified URLs of all connected hardware wallets, and ask you to -choose which wallet to use for each signature. +When the URL is not fully qualified, the Solana CLI will prompt you with the +fully qualified URLs of all connected hardware wallets, and ask you to choose +which wallet to use for each signature. -Instead of using the interactive prompts, you can generate fully qualified -URLs using the Solana CLI `resolve-signer` command. For example, try -connecting a Nano to USB, unlock it with your pin, and running the -following command: +Instead of using the interactive prompts, you can generate fully qualified URLs +using the Solana CLI `resolve-signer` command. For example, try connecting a +Nano to USB, unlock it with your pin, and running the following command: ```text solana resolve-signer usb://ledger?key=0/0 @@ -176,11 +175,11 @@ usb://ledger/BsNsvfXqQTtJnagwFWdBS7FBXgnsK8VZ5CmuznN85swK?key=0/0 but where `BsNsvfXqQTtJnagwFWdBS7FBXgnsK8VZ5CmuznN85swK` is your `WALLET_ID`. -With your fully qualified URL, you can connect multiple hardware wallets to -the same computer and uniquely identify a keypair from any of them. -Use the output from the `resolve-signer` command anywhere a `solana` command -expects a `` entry to use that resolved path as the signer for that -part of the given transaction. +With your fully qualified URL, you can connect multiple hardware wallets to the +same computer and uniquely identify a keypair from any of them. Use the output +from the `resolve-signer` command anywhere a `solana` command expects a +`` entry to use that resolved path as the signer for that part of the +given transaction. ## Troubleshooting @@ -210,9 +209,9 @@ solana-keygen pubkey usb://ledger\?key=0 ## Support -Check out our [Wallet Support Page](../support.md) -for ways to get help. +You can find additional support and get help on the +[Solana StackExchange](https://solana.stackexchange.com). -Read more about [sending and receiving tokens](../../cli/transfer-tokens.md) and -[delegating stake](../../cli/delegate-stake.md). You can use your Ledger keypair URL -anywhere you see an option or argument that accepts a ``. +Read more about [sending and receiving tokens](../../examples/transfer-tokens.md) and +[delegating stake](../../examples/delegate-stake.md). You can use your Ledger keypair +URL anywhere you see an option or argument that accepts a ``. diff --git a/docs/src/wallet-guide/cli.md b/docs/src/cli/wallets/index.md similarity index 86% rename from docs/src/wallet-guide/cli.md rename to docs/src/cli/wallets/index.md index 841dc7264d47a7..fcd907629c8d85 100644 --- a/docs/src/wallet-guide/cli.md +++ b/docs/src/cli/wallets/index.md @@ -1,11 +1,13 @@ --- title: Command Line Wallets +sidebar_label: Overview +sidebar_position: 0 --- Solana supports several different types of wallets that can be used to interface directly with the Solana command-line tools. -To use a Command Line Wallet, you must first [install the Solana CLI tools](../cli/install-solana-cli-tools.md) +To use a Command Line Wallet, you must first [install the Solana CLI tools](../install.md) ## File System Wallet @@ -26,7 +28,7 @@ keypairs are stored on your computer as files, a skilled hacker with physical access to your computer may be able to access it. Using an encrypted hard drive, such as FileVault on MacOS, minimizes that risk. -[File System Wallet](file-system-wallet.md) +See [File System Wallets](./file-system.md) for more details. ## Paper Wallet @@ -39,9 +41,9 @@ regenerate a keypair on demand. In terms of convenience versus security, a paper wallet sits at the opposite side of the spectrum from an FS wallet. It is terribly inconvenient to use, but offers excellent security. That high security is further amplified when paper -wallets are used in conjunction with [offline signing](../offline-signing.md). +wallets are used in conjunction with [offline signing](../examples/offline-signing.md). -[Paper Wallets](paper-wallet.md) +See [Paper Wallets](./paper.md) for more details ## Hardware Wallet @@ -56,4 +58,4 @@ security and convenience for cryptocurrencies. It effectively automates the process of offline signing while retaining nearly all the convenience of a file system wallet. -[Hardware Wallets](hardware-wallets.md) +See [Hardware Wallets](./hardware/index.md) for more details diff --git a/docs/src/wallet-guide/paper-wallet.md b/docs/src/cli/wallets/paper.md similarity index 82% rename from docs/src/wallet-guide/paper-wallet.md rename to docs/src/cli/wallets/paper.md index bb6336507cfd3e..85c76779b852ed 100644 --- a/docs/src/wallet-guide/paper-wallet.md +++ b/docs/src/cli/wallets/paper.md @@ -1,11 +1,13 @@ --- -title: Paper Wallet +title: Paper Wallets +sidebar_position: 1 --- This document describes how to create and use a paper wallet with the Solana CLI tools. -> We do not intend to advise on how to _securely_ create or manage paper wallets. Please research the security concerns carefully. +> We do not intend to advise on how to _securely_ create or manage paper +> wallets. Please research the security concerns carefully. ## Overview @@ -20,11 +22,13 @@ Solana commands can be run without ever saving a keypair to disk on a machine. If avoiding writing a private key to disk is a security concern of yours, you've come to the right place. -> Even using this secure input method, it's still possible that a private key gets written to disk by unencrypted memory swaps. It is the user's responsibility to protect against this scenario. +> Even using this secure input method, it's still possible that a private key +> gets written to disk by unencrypted memory swaps. It is the user's +> responsibility to protect against this scenario. ## Before You Begin -- [Install the Solana command-line tools](../cli/install-solana-cli-tools.md) +- [Install the Solana command-line tools](../install.md) ### Check your installation @@ -42,7 +46,8 @@ The seed phrase and passphrase can be used together as a paper wallet. As long as you keep your seed phrase and passphrase stored safely, you can use them to access your account. -> For more information about how seed phrases work, review this [Bitcoin Wiki page](https://en.bitcoin.it/wiki/Seed_phrase). +> For more information about how seed phrases work, review this +> [Bitcoin Wiki page](https://en.bitcoin.it/wiki/Seed_phrase). ### Seed Phrase Generation @@ -59,7 +64,9 @@ have not made any errors. solana-keygen new --no-outfile ``` -> If the `--no-outfile` flag is **omitted**, the default behavior is to write the keypair to `~/.config/solana/id.json`, resulting in a [file system wallet](file-system-wallet.md). +> If the `--no-outfile` flag is **omitted**, the default behavior is to write +> the keypair to `~/.config/solana/id.json`, resulting in a +> [file system wallet](./file-system.md). The output of this command will display a line like this: @@ -69,10 +76,11 @@ pubkey: 9ZNTfG4NyQgxy2SWjSiQoUyBPEvXT2xo7fKc5hPYYJ7b The value shown after `pubkey:` is your _wallet address_. -**Note:** In working with paper wallets and file system wallets, the terms "pubkey" -and "wallet address" are sometimes used interchangeably. +**Note:** In working with paper wallets and file system wallets, the terms +"pubkey" and "wallet address" are sometimes used interchangeably. -> For added security, increase the seed phrase word count using the `--word-count` argument +> For added security, increase the seed phrase word count using the +> `--word-count` argument For full usage details, run: @@ -80,7 +88,6 @@ For full usage details, run: solana-keygen new --help ``` - ### Public Key Derivation Public keys can be derived from a seed phrase and a passphrase if you choose to @@ -93,7 +100,8 @@ with the solana command-line tools using the `prompt` URI scheme. solana-keygen pubkey prompt:// ``` -> Note that you could potentially use different passphrases for the same seed phrase. Each unique passphrase will yield a different keypair. +> Note that you could potentially use different passphrases for the same seed +> phrase. Each unique passphrase will yield a different keypair. The `solana-keygen` tool uses the same BIP39 standard English word list as it does to generate seed phrases. If your seed phrase was generated with another @@ -105,19 +113,22 @@ validation. solana-keygen pubkey prompt:// --skip-seed-phrase-validation ``` -After entering your seed phrase with `solana-keygen pubkey prompt://` the console -will display a string of base-58 characters. This is the [derived](#hierarchical-derivation) solana BIP44 _wallet address_ -associated with your seed phrase. +After entering your seed phrase with `solana-keygen pubkey prompt://` the +console will display a string of base-58 characters. This is the +[derived](#hierarchical-derivation) solana BIP44 _wallet address_ associated +with your seed phrase. > Copy the derived address to a USB stick for easy usage on networked computers -If needed, you can access the legacy, raw keypair's pubkey by instead passing the `ASK` keyword: +If needed, you can access the legacy, raw keypair's pubkey by instead passing +the `ASK` keyword: ```bash solana-keygen pubkey ASK ``` -> A common next step is to [check the balance](#checking-account-balance) of the account associated with a public key +> A common next step is to [check the balance](#checking-account-balance) of the +> account associated with a public key For full usage details, run: @@ -140,7 +151,8 @@ derive a child key, supply the `?key=/` query string. solana-keygen pubkey prompt://?key=0/1 ``` -To use a derivation path other than solana's standard BIP44, you can supply `?full-path=m////`. +To use a derivation path other than solana's standard BIP44, you can supply +`?full-path=m////`. ```bash solana-keygen pubkey prompt://?full-path=m/44/2017/0/1 @@ -178,7 +190,7 @@ Public keys can then be typed manually or transferred via a USB stick to a networked machine. Next, configure the `solana` CLI tool to -[connect to a particular cluster](../cli/choose-a-cluster.md): +[connect to a particular cluster](../examples/choose-a-cluster.md): ```bash solana config set --url # (i.e. https://api.mainnet-beta.solana.com) @@ -192,12 +204,13 @@ solana balance ## Creating Multiple Paper Wallet Addresses -You can create as many wallet addresses as you like. Simply re-run the -steps in [Seed Phrase Generation](#seed-phrase-generation) or +You can create as many wallet addresses as you like. Simply re-run the steps in +[Seed Phrase Generation](#seed-phrase-generation) or [Public Key Derivation](#public-key-derivation) to create a new address. Multiple wallet addresses can be useful if you want to transfer tokens between your own accounts for different purposes. ## Support -Check out our [Wallet Support Page](support.md) for ways to get help. +You can find additional support and get help on the +[Solana StackExchange](https://solana.stackexchange.com). diff --git a/docs/src/cluster/fork-generation.md b/docs/src/cluster/fork-generation.md deleted file mode 100644 index 2ab6e1d70cae82..00000000000000 --- a/docs/src/cluster/fork-generation.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: Fork Generation -description: "A fork is created when validators do not agree on a newly produced block. Using a consensus algorithm validators vote on which will be finalized." ---- - -The Solana protocol doesn’t wait for all validators to agree on a newly produced block before the next block is produced. Because of that, it’s quite common for two different blocks to be chained to the same parent block. In those situations, we call each conflicting chain a [“fork.”](./fork-generation.md) - -Solana validators need to vote on one of these forks and reach agreement on which one to use through a consensus algorithm (that is beyond the scope of this article). The main point you need to remember is that when there are competing forks, only one fork will be finalized by the cluster and the abandoned blocks in competing forks are all discarded. - -This section describes how forks naturally occur as a consequence of [leader rotation](./leader-rotation.md). - -## Overview - -Nodes take turns being [leader](./../terminology.md#leader) and generating the PoH that encodes state changes. The cluster can tolerate loss of connection to any leader by synthesizing what the leader _**would**_ have generated had it been connected but not ingesting any state changes. - -The possible number of forks is thereby limited to a "there/not-there" skip list of forks that may arise on leader rotation slot boundaries. At any given slot, only a single leader's transactions will be accepted. - -### Forking example - -The table below illustrates what competing forks could look like. Time progresses from left to right and each slot is assigned to a validator that temporarily becomes the cluster “leader” and may produce a block for that slot. - -In this example, the leader for slot 3 chose to chain its “Block 3” directly to “Block 1” and in doing so skipped “Block 2”. Similarly, the leader for slot 5 chose to chain “Block 5” directly to “Block 3” and skipped “Block 4”. - -> Note that across different forks, the block produced for a given slot is _always_ the same because producing two different blocks for the same slot is a slashable offense. So the conflicting forks above can be distinguished from each other by which slots they have _skipped_. - -| | Slot 1 | Slot 2 | Slot 3 | Slot 4 | Slot 5 | -| ------ | ------- | ------- | ------- | ------- | ------- | -| Fork 1 | Block 1 | | Block 3 | | Block 5 | -| Fork 2 | Block 1 | | Block 3 | Block 4 | | -| Fork 3 | Block 1 | Block 2 | | | | - -## Message Flow - -1. Transactions are ingested by the current leader. -2. Leader filters valid transactions. -3. Leader executes valid transactions updating its state. -4. Leader packages transactions into entries based off its current PoH slot. -5. Leader transmits the entries to validator nodes \(in signed shreds\) - 1. The PoH stream includes ticks; empty entries that indicate liveness of the leader and the passage of time on the cluster. - 2. A leader's stream begins with the tick entries necessary to complete PoH back to the leader's most recently observed prior leader slot. -6. Validators retransmit entries to peers in their set and to further downstream nodes. -7. Validators validate the transactions and execute them on their state. -8. Validators compute the hash of the state. -9. At specific times, i.e. specific PoH tick counts, validators transmit votes to the leader. - 1. Votes are signatures of the hash of the computed state at that PoH tick count. - 2. Votes are also propagated via gossip. -10. Leader executes the votes, the same as any other transaction, and broadcasts them to the cluster. -11. Validators observe their votes and all the votes from the cluster. - -## Partitions, Forks - -Forks can arise at PoH tick counts that correspond to a vote. The next leader may not have observed the last vote slot and may start their slot with generated virtual PoH entries. These empty ticks are generated by all nodes in the cluster at a cluster-configured rate for hashes/per/tick `Z`. - -There are only two possible versions of the PoH during a voting slot: PoH with `T` ticks and entries generated by the current leader, or PoH with just ticks. The "just ticks" version of the PoH can be thought of as a virtual ledger, one that all nodes in the cluster can derive from the last tick in the previous slot. - -Validators can ignore forks at other points \(e.g. from the wrong leader\), or slash the leader responsible for the fork. - -Validators vote based on a greedy choice to maximize their reward described in [Tower BFT](../implemented-proposals/tower-bft.md). - -### Validator's View - -#### Time Progression - -The diagram below represents a validator's view of the PoH stream with possible forks over time. L1, L2, etc. are leader slots, and `E`s represent entries from that leader during that leader's slot. The `x`s represent ticks only, and time flows downwards in the diagram. - -![Fork generation](/img/fork-generation.svg) - -Note that an `E` appearing on 2 forks at the same slot is a slashable condition, so a validator observing `E3` and `E3'` can slash L3 and safely choose `x` for that slot. Once a validator commits to a fork, other forks can be discarded below that tick count. For any slot, validators need only consider a single "has entries" chain or a "ticks only" chain to be proposed by a leader. But multiple virtual entries may overlap as they link back to the a previous slot. - -#### Time Division - -It's useful to consider leader rotation over PoH tick count as time division of the job of encoding state for the cluster. The following table presents the above tree of forks as a time-divided ledger. - -| leader slot | L1 | L2 | L3 | L4 | L5 | -| :--------------- | :-- | :-- | :-- | :-- | :-- | -| data | E1 | E2 | E3 | E4 | E5 | -| ticks since prev | | | | x | xx | - -Note that only data from leader L3 will be accepted during leader slot L3. Data from L3 may include "catchup" ticks back to a slot other than L2 if L3 did not observe L2's data. L4 and L5's transmissions include the "ticks to prev" PoH entries. - -This arrangement of the network data streams permits nodes to save exactly this to the ledger for replay, restart, and checkpoints. - -### Leader's View - -When a new leader begins a slot, it must first transmit any PoH \(ticks\) required to link the new slot with the most recently observed and voted slot. The fork the leader proposes would link the current slot to a previous fork that the leader has voted on with virtual ticks. diff --git a/docs/src/cluster/rpc-endpoints.md b/docs/src/cluster/rpc-endpoints.md deleted file mode 100644 index 50173fa9087334..00000000000000 --- a/docs/src/cluster/rpc-endpoints.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: Solana Cluster RPC Endpoints ---- - -Solana maintains dedicated api nodes to fulfill [JSON-RPC](/api) -requests for each public cluster, and third parties may as well. Here are the -public RPC endpoints currently available and recommended for each public cluster: - -## Devnet - -#### Endpoint - -- `https://api.devnet.solana.com` - single Solana-hosted api node; rate-limited - -#### Rate Limits - -- Maximum number of requests per 10 seconds per IP: 100 -- Maximum number of requests per 10 seconds per IP for a single RPC: 40 -- Maximum concurrent connections per IP: 40 -- Maximum connection rate per 10 seconds per IP: 40 -- Maximum amount of data per 30 second: 100 MB - -## Testnet - -#### Endpoint - -- `https://api.testnet.solana.com` - single Solana-hosted api node; rate-limited - -#### Rate Limits - -- Maximum number of requests per 10 seconds per IP: 100 -- Maximum number of requests per 10 seconds per IP for a single RPC: 40 -- Maximum concurrent connections per IP: 40 -- Maximum connection rate per 10 seconds per IP: 40 -- Maximum amount of data per 30 second: 100 MB - -## Mainnet Beta - -#### Endpoints\* - -- `https://api.mainnet-beta.solana.com` - Solana-hosted api node cluster, backed by a load balancer; rate-limited - -#### Rate Limits - -- Maximum number of requests per 10 seconds per IP: 100 -- Maximum number of requests per 10 seconds per IP for a single RPC: 40 -- Maximum concurrent connections per IP: 40 -- Maximum connection rate per 10 seconds per IP: 40 -- Maximum amount of data per 30 second: 100 MB - -\*The public RPC endpoints are not intended for production applications. Please -use dedicated/private RPC servers when you launch your application, drop NFTs, -etc. The public services are subject to abuse and rate limits may change -without prior notice. Likewise, high-traffic websites may be blocked without -prior notice. - -## Common HTTP Error Codes - -- 403 -- Your IP address or website has been blocked. It is time to run your own RPC server(s) or find a private service. -- 429 -- Your IP address is exceeding the rate limits. Slow down! Use the - [Retry-After](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After) - HTTP response header to determine how long to wait before making another - request. diff --git a/docs/src/cluster/stake-delegation-and-rewards.md b/docs/src/cluster/stake-delegation-and-rewards.md deleted file mode 100644 index e147e0db3fbb87..00000000000000 --- a/docs/src/cluster/stake-delegation-and-rewards.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -title: Stake Delegation and Rewards ---- - -Stakers are rewarded for helping to validate the ledger. They do this by delegating their stake to validator nodes. Those validators do the legwork of replaying the ledger and sending votes to a per-node vote account to which stakers can delegate their stakes. The rest of the cluster uses those stake-weighted votes to select a block when forks arise. Both the validator and staker need some economic incentive to play their part. The validator needs to be compensated for its hardware and the staker needs to be compensated for the risk of getting its stake slashed. The economics are covered in [staking rewards](../implemented-proposals/staking-rewards.md). This section, on the other hand, describes the underlying mechanics of its implementation. - -## Basic Design - -The general idea is that the validator owns a Vote account. The Vote account tracks validator votes, counts validator generated credits, and provides any additional validator specific state. The Vote account is not aware of any stakes delegated to it and has no staking weight. - -A separate Stake account \(created by a staker\) names a Vote account to which the stake is delegated. Rewards generated are proportional to the amount of lamports staked. The Stake account is owned by the staker only. Some portion of the lamports stored in this account are the stake. - -## Passive Delegation - -Any number of Stake accounts can delegate to a single Vote account without an interactive action from the identity controlling the Vote account or submitting votes to the account. - -The total stake allocated to a Vote account can be calculated by the sum of all the Stake accounts that have the Vote account pubkey as the `StakeStateV2::Stake::voter_pubkey`. - -## Vote and Stake accounts - -The rewards process is split into two on-chain programs. The Vote program solves the problem of making stakes slashable. The Stake program acts as custodian of the rewards pool and provides for passive delegation. The Stake program is responsible for paying rewards to staker and voter when shown that a staker's delegate has participated in validating the ledger. - -### VoteState - -VoteState is the current state of all the votes the validator has submitted to the network. VoteState contains the following state information: - -- `votes` - The submitted votes data structure. -- `credits` - The total number of rewards this Vote program has generated over its lifetime. -- `root_slot` - The last slot to reach the full lockout commitment necessary for rewards. -- `commission` - The commission taken by this VoteState for any rewards claimed by staker's Stake accounts. This is the percentage ceiling of the reward. -- Account::lamports - The accumulated lamports from the commission. These do not count as stakes. -- `authorized_voter` - Only this identity is authorized to submit votes. This field can only modified by this identity. -- `node_pubkey` - The Solana node that votes in this account. -- `authorized_withdrawer` - the identity of the entity in charge of the lamports of this account, separate from the account's address and the authorized vote signer. - -### VoteInstruction::Initialize\(VoteInit\) - -- `account[0]` - RW - The VoteState. - - `VoteInit` carries the new vote account's `node_pubkey`, `authorized_voter`, `authorized_withdrawer`, and `commission`. - - other VoteState members defaulted. - -### VoteInstruction::Authorize\(Pubkey, VoteAuthorize\) - -Updates the account with a new authorized voter or withdrawer, according to the VoteAuthorize parameter \(`Voter` or `Withdrawer`\). The transaction must be signed by the Vote account's current `authorized_voter` or `authorized_withdrawer`. - -- `account[0]` - RW - The VoteState. - `VoteState::authorized_voter` or `authorized_withdrawer` is set to `Pubkey`. - -### VoteInstruction::AuthorizeWithSeed\(VoteAuthorizeWithSeedArgs\) - -Updates the account with a new authorized voter or withdrawer, according to the VoteAuthorize parameter \(`Voter` or `Withdrawer`\). Unlike `VoteInstruction::Authorize` this instruction is for use when the Vote account's current `authorized_voter` or `authorized_withdrawer` is a derived key. The transaction must be signed by someone who can sign for the base key of that derived key. - -- `account[0]` - RW - The VoteState. - `VoteState::authorized_voter` or `authorized_withdrawer` is set to `Pubkey`. - -### VoteInstruction::Vote\(Vote\) - -- `account[0]` - RW - The VoteState. - `VoteState::lockouts` and `VoteState::credits` are updated according to voting lockout rules see [Tower BFT](../implemented-proposals/tower-bft.md). -- `account[1]` - RO - `sysvar::slot_hashes` A list of some N most recent slots and their hashes for the vote to be verified against. -- `account[2]` - RO - `sysvar::clock` The current network time, expressed in slots, epochs. - -### StakeStateV2 - -A StakeStateV2 takes one of four forms, StakeStateV2::Uninitialized, StakeStateV2::Initialized, StakeStateV2::Stake, and StakeStateV2::RewardsPool. Only the first three forms are used in staking, but only StakeStateV2::Stake is interesting. All RewardsPools are created at genesis. - -### StakeStateV2::Stake - -StakeStateV2::Stake is the current delegation preference of the **staker** and contains the following state information: - -- Account::lamports - The lamports available for staking. -- `stake` - the staked amount \(subject to warmup and cooldown\) for generating rewards, always less than or equal to Account::lamports. -- `voter_pubkey` - The pubkey of the VoteState instance the lamports are delegated to. -- `credits_observed` - The total credits claimed over the lifetime of the program. -- `activated` - the epoch at which this stake was activated/delegated. The full stake will be counted after warmup. -- `deactivated` - the epoch at which this stake was de-activated, some cooldown epochs are required before the account is fully deactivated, and the stake available for withdrawal. -- `authorized_staker` - the pubkey of the entity that must sign delegation, activation, and deactivation transactions. -- `authorized_withdrawer` - the identity of the entity in charge of the lamports of this account, separate from the account's address, and the authorized staker. - -### StakeStateV2::RewardsPool - -To avoid a single network-wide lock or contention in redemption, 256 RewardsPools are part of genesis under pre-determined keys, each with std::u64::MAX credits to be able to satisfy redemptions according to point value. - -The Stakes and the RewardsPool are accounts that are owned by the same `Stake` program. - -### StakeInstruction::DelegateStake - -The Stake account is moved from Initialized to StakeStateV2::Stake form, or from a deactivated (i.e. fully cooled-down) StakeStateV2::Stake to activated StakeStateV2::Stake. This is how stakers choose the vote account and validator node to which their stake account lamports are delegated. The transaction must be signed by the stake's `authorized_staker`. - -- `account[0]` - RW - The StakeStateV2::Stake instance. `StakeStateV2::Stake::credits_observed` is initialized to `VoteState::credits`, `StakeStateV2::Stake::voter_pubkey` is initialized to `account[1]`. If this is the initial delegation of stake, `StakeStateV2::Stake::stake` is initialized to the account's balance in lamports, `StakeStateV2::Stake::activated` is initialized to the current Bank epoch, and `StakeStateV2::Stake::deactivated` is initialized to std::u64::MAX -- `account[1]` - R - The VoteState instance. -- `account[2]` - R - sysvar::clock account, carries information about current Bank epoch. -- `account[3]` - R - sysvar::stakehistory account, carries information about stake history. -- `account[4]` - R - stake::Config account, carries warmup, cooldown, and slashing configuration. - -### StakeInstruction::Authorize\(Pubkey, StakeAuthorize\) - -Updates the account with a new authorized staker or withdrawer, according to the StakeAuthorize parameter \(`Staker` or `Withdrawer`\). The transaction must be by signed by the Stakee account's current `authorized_staker` or `authorized_withdrawer`. Any stake lock-up must have expired, or the lock-up custodian must also sign the transaction. - -- `account[0]` - RW - The StakeStateV2. - - `StakeStateV2::authorized_staker` or `authorized_withdrawer` is set to to `Pubkey`. - -### StakeInstruction::Deactivate - -A staker may wish to withdraw from the network. To do so he must first deactivate his stake, and wait for cooldown. -The transaction must be signed by the stake's `authorized_staker`. - -- `account[0]` - RW - The StakeStateV2::Stake instance that is deactivating. -- `account[1]` - R - sysvar::clock account from the Bank that carries current epoch. - -StakeStateV2::Stake::deactivated is set to the current epoch + cooldown. The account's stake will ramp down to zero by that epoch, and Account::lamports will be available for withdrawal. - -### StakeInstruction::Withdraw\(u64\) - -Lamports build up over time in a Stake account and any excess over activated stake can be withdrawn. The transaction must be signed by the stake's `authorized_withdrawer`. - -- `account[0]` - RW - The StakeStateV2::Stake from which to withdraw. -- `account[1]` - RW - Account that should be credited with the withdrawn lamports. -- `account[2]` - R - sysvar::clock account from the Bank that carries current epoch, to calculate stake. -- `account[3]` - R - sysvar::stake_history account from the Bank that carries stake warmup/cooldown history. - -## Benefits of the design - -- Single vote for all the stakers. -- Clearing of the credit variable is not necessary for claiming rewards. -- Each delegated stake can claim its rewards independently. -- Commission for the work is deposited when a reward is claimed by the delegated stake. - -## Example Callflow - -![Passive Staking Callflow](/img/passive-staking-callflow.png) - -## Staking Rewards - -The specific mechanics and rules of the validator rewards regime is outlined here. Rewards are earned by delegating stake to a validator that is voting correctly. Voting incorrectly exposes that validator's stakes to [slashing](../proposals/slashing.md). - -### Basics - -The network pays rewards from a portion of network [inflation](../terminology.md#inflation). The number of lamports available to pay rewards for an epoch is fixed and must be evenly divided among all staked nodes according to their relative stake weight and participation. The weighting unit is called a [point](../terminology.md#point). - -Rewards for an epoch are not available until the end of that epoch. - -At the end of each epoch, the total number of points earned during the epoch is summed and used to divide the rewards portion of epoch inflation to arrive at a point value. This value is recorded in the bank in a [sysvar](../terminology.md#sysvar) that maps epochs to point values. - -During redemption, the stake program counts the points earned by the stake for each epoch, multiplies that by the epoch's point value, and transfers lamports in that amount from a rewards account into the stake and vote accounts according to the vote account's commission setting. - -### Economics - -Point value for an epoch depends on aggregate network participation. If participation in an epoch drops off, point values are higher for those that do participate. - -### Earning credits - -Validators earn one vote credit for every correct vote that exceeds maximum lockout, i.e. every time the validator's vote account retires a slot from its lockout list, making that vote a root for the node. - -Stakers who have delegated to that validator earn points in proportion to their stake. Points earned is the product of vote credits and stake. - -### Stake warmup, cooldown, withdrawal - -Stakes, once delegated, do not become effective immediately. They must first pass through a warmup period. During this period some portion of the stake is considered "effective", the rest is considered "activating". Changes occur on epoch boundaries. - -The stake program limits the rate of change to total network stake, reflected in the stake program's `config::warmup_rate` \(set to 25% per epoch in the current implementation\). - -The amount of stake that can be warmed up each epoch is a function of the previous epoch's total effective stake, total activating stake, and the stake program's configured warmup rate. - -Cooldown works the same way. Once a stake is deactivated, some part of it is considered "effective", and also "deactivating". As the stake cools down, it continues to earn rewards and be exposed to slashing, but it also becomes available for withdrawal. - -Bootstrap stakes are not subject to warmup. - -Rewards are paid against the "effective" portion of the stake for that epoch. - -#### Warmup example - -Consider the situation of a single stake of 1,000 activated at epoch N, with network warmup rate of 20%, and a quiescent total network stake at epoch N of 2,000. - -At epoch N+1, the amount available to be activated for the network is 400 \(20% of 2000\), and at epoch N, this example stake is the only stake activating, and so is entitled to all of the warmup room available. - -| epoch | effective | activating | total effective | total activating | -| :---- | --------: | ---------: | --------------: | ---------------: | -| N-1 | | | 2,000 | 0 | -| N | 0 | 1,000 | 2,000 | 1,000 | -| N+1 | 400 | 600 | 2,400 | 600 | -| N+2 | 880 | 120 | 2,880 | 120 | -| N+3 | 1000 | 0 | 3,000 | 0 | - -Were 2 stakes \(X and Y\) to activate at epoch N, they would be awarded a portion of the 20% in proportion to their stakes. At each epoch effective and activating for each stake is a function of the previous epoch's state. - -| epoch | X eff | X act | Y eff | Y act | total effective | total activating | -| :---- | ----: | ----: | ----: | ----: | --------------: | ---------------: | -| N-1 | | | | | 2,000 | 0 | -| N | 0 | 1,000 | 0 | 200 | 2,000 | 1,200 | -| N+1 | 333 | 667 | 67 | 133 | 2,400 | 800 | -| N+2 | 733 | 267 | 146 | 54 | 2,880 | 321 | -| N+3 | 1000 | 0 | 200 | 0 | 3,200 | 0 | - -### Withdrawal - -Only lamports in excess of effective+activating stake may be withdrawn at any time. This means that during warmup, effectively no stake can be withdrawn. During cooldown, any tokens in excess of effective stake may be withdrawn \(activating == 0\). Because earned rewards are automatically added to stake, withdrawal is generally only possible after deactivation. - -### Lock-up - -Stake accounts support the notion of lock-up, wherein the stake account balance is unavailable for withdrawal until a specified time. Lock-up is specified as an epoch height, i.e. the minimum epoch height that must be reached by the network before the stake account balance is available for withdrawal, unless the transaction is also signed by a specified custodian. This information is gathered when the stake account is created, and stored in the Lockup field of the stake account's state. Changing the authorized staker or withdrawer is also subject to lock-up, as such an operation is effectively a transfer. diff --git a/docs/src/clusters.md b/docs/src/clusters/available.md similarity index 94% rename from docs/src/clusters.md rename to docs/src/clusters/available.md index 5d59e7ea6cc40e..7abfb06880e858 100644 --- a/docs/src/clusters.md +++ b/docs/src/clusters/available.md @@ -5,7 +5,7 @@ title: Solana Clusters Solana maintains several different clusters with different purposes. Before you begin make sure you have first -[installed the Solana command line tools](cli/install-solana-cli-tools.md) +[installed the Solana command line tools](../cli/install.md) Explorers: @@ -63,7 +63,7 @@ $ solana-validator \ --limit-ledger-size ``` -The [`--known-validator`s](running-validator/validator-start.md#known-validators) +The [`--known-validator`s](../operations/guides/validator-start.md#known-validators) are operated by Solana Labs ## Testnet @@ -115,7 +115,7 @@ $ solana-validator \ ``` The identities of the -[`--known-validator`s](running-validator/validator-start.md#known-validators) are: +[`--known-validator`s](../operations/guides/validator-start.md#known-validators) are: - `5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on` - Solana Labs - `dDzy5SR3AXdYWVqbDEkVFdvSPCtS9ihF5kJkHCtXoFs` - MonkeDAO @@ -168,5 +168,7 @@ $ solana-validator \ --limit-ledger-size ``` -All four [`--known-validator`s](running-validator/validator-start.md#known-validators) -are operated by Solana Labs +:::info +The above four [`--known-validator`s](../operations/guides/validator-start.md#known-validators) +are operated by Solana Labs. +::: \ No newline at end of file diff --git a/docs/src/cluster/bench-tps.md b/docs/src/clusters/benchmark.md similarity index 100% rename from docs/src/cluster/bench-tps.md rename to docs/src/clusters/benchmark.md diff --git a/docs/src/cluster/overview.md b/docs/src/clusters/index.md similarity index 98% rename from docs/src/cluster/overview.md rename to docs/src/clusters/index.md index 0fbdd53859fad6..8ac1dee11d6e68 100644 --- a/docs/src/cluster/overview.md +++ b/docs/src/clusters/index.md @@ -1,5 +1,7 @@ --- title: A Solana Cluster +sidebar_label: Overview +sidebar_position: 0 --- A Solana cluster is a set of validators working together to serve client transactions and maintain the integrity of the ledger. Many clusters may coexist. When two clusters share a common genesis block, they attempt to converge. Otherwise, they simply ignore the existence of the other. Transactions sent to the wrong one are quietly rejected. In this section, we'll discuss how a cluster is created, how nodes join the cluster, how they share the ledger, how they ensure the ledger is replicated, and how they cope with buggy and malicious nodes. @@ -36,4 +38,4 @@ Solana rotates leaders at fixed intervals, called _slots_. Each leader may only Next, transactions are broken into batches so that a node can send transactions to multiple parties without making multiple copies. If, for example, the leader needed to send 60 transactions to 6 nodes, it would break that collection of 60 into batches of 10 transactions and send one to each node. This allows the leader to put 60 transactions on the wire, not 60 transactions for each node. Each node then shares its batch with its peers. Once the node has collected all 6 batches, it reconstructs the original set of 60 transactions. -A batch of transactions can only be split so many times before it is so small that header information becomes the primary consumer of network bandwidth. At the time of this writing (December, 2021), the approach is scaling well up to about 1,250 validators. To scale up to hundreds of thousands of validators, each node can apply the same technique as the leader node to another set of nodes of equal size. We call the technique [_Turbine Block Propagation_](turbine-block-propagation.md). +A batch of transactions can only be split so many times before it is so small that header information becomes the primary consumer of network bandwidth. At the time of this writing (December, 2021), the approach is scaling well up to about 1,250 validators. To scale up to hundreds of thousands of validators, each node can apply the same technique as the leader node to another set of nodes of equal size. We call the technique [_Turbine Block Propagation_](../consensus/turbine-block-propagation.md). diff --git a/docs/src/cluster/performance-metrics.md b/docs/src/clusters/metrics.md similarity index 100% rename from docs/src/cluster/performance-metrics.md rename to docs/src/clusters/metrics.md diff --git a/docs/src/cluster/commitments.md b/docs/src/consensus/commitments.md similarity index 66% rename from docs/src/cluster/commitments.md rename to docs/src/consensus/commitments.md index 6b7edd1c214525..0bfb55e9237a34 100644 --- a/docs/src/cluster/commitments.md +++ b/docs/src/consensus/commitments.md @@ -1,6 +1,8 @@ --- title: Commitment Status -description: "Processed, confirmed, and finalized. Learn the differences between the different commitment statuses on the Solana blockchain." +description: + "Processed, confirmed, and finalized. Learn the differences between the + different commitment statuses on the Solana blockchain." keywords: - processed - confirmed @@ -10,7 +12,9 @@ keywords: - blockhash --- -The [commitment](./../terminology.md#commitment) metric gives clients a standard measure of the network confirmation for the block. Clients can then use this information to derive their own measures of commitment. +The [commitment](https://solana.com/docs/terminology#commitment) metric gives +clients a standard measure of the network confirmation for the block. Clients +can then use this information to derive their own measures of commitment. There are three specific commitment statuses: diff --git a/docs/src/consensus/fork-generation.md b/docs/src/consensus/fork-generation.md new file mode 100644 index 00000000000000..6008c6c07a8206 --- /dev/null +++ b/docs/src/consensus/fork-generation.md @@ -0,0 +1,139 @@ +--- +title: Fork Generation +description: + "A fork is created when validators do not agree on a newly produced block. + Using a consensus algorithm validators vote on which will be finalized." +--- + +The Solana protocol doesn’t wait for all validators to agree on a newly produced +block before the next block is produced. Because of that, it’s quite common for +two different blocks to be chained to the same parent block. In those +situations, we call each conflicting chain a [“fork.”](./fork-generation.md) + +Solana validators need to vote on one of these forks and reach agreement on +which one to use through a consensus algorithm (that is beyond the scope of this +article). The main point you need to remember is that when there are competing +forks, only one fork will be finalized by the cluster and the abandoned blocks +in competing forks are all discarded. + +This section describes how forks naturally occur as a consequence of +[leader rotation](./leader-rotation.md). + +## Overview + +Nodes take turns being [leader](https://solana.com/docs/terminology#leader) and +generating the PoH that encodes state changes. The cluster can tolerate loss of +connection to any leader by synthesizing what the leader _**would**_ have +generated had it been connected but not ingesting any state changes. + +The possible number of forks is thereby limited to a "there/not-there" skip list +of forks that may arise on leader rotation slot boundaries. At any given slot, +only a single leader's transactions will be accepted. + +### Forking example + +The table below illustrates what competing forks could look like. Time +progresses from left to right and each slot is assigned to a validator that +temporarily becomes the cluster “leader” and may produce a block for that slot. + +In this example, the leader for slot 3 chose to chain its “Block 3” directly to +“Block 1” and in doing so skipped “Block 2”. Similarly, the leader for slot 5 +chose to chain “Block 5” directly to “Block 3” and skipped “Block 4”. + +> Note that across different forks, the block produced for a given slot is +> _always_ the same because producing two different blocks for the same slot is +> a slashable offense. So the conflicting forks above can be distinguished from +> each other by which slots they have _skipped_. + +| | Slot 1 | Slot 2 | Slot 3 | Slot 4 | Slot 5 | +| ------ | ------- | ------- | ------- | ------- | ------- | +| Fork 1 | Block 1 | | Block 3 | | Block 5 | +| Fork 2 | Block 1 | | Block 3 | Block 4 | | +| Fork 3 | Block 1 | Block 2 | | | | + +## Message Flow + +1. Transactions are ingested by the current leader. +2. Leader filters valid transactions. +3. Leader executes valid transactions updating its state. +4. Leader packages transactions into entries based off its current PoH slot. +5. Leader transmits the entries to validator nodes \(in signed shreds\) + 1. The PoH stream includes ticks; empty entries that indicate liveness of the + leader and the passage of time on the cluster. + 2. A leader's stream begins with the tick entries necessary to complete PoH + back to the leader's most recently observed prior leader slot. +6. Validators retransmit entries to peers in their set and to further downstream + nodes. +7. Validators validate the transactions and execute them on their state. +8. Validators compute the hash of the state. +9. At specific times, i.e. specific PoH tick counts, validators transmit votes + to the leader. + 1. Votes are signatures of the hash of the computed state at that PoH tick + count. + 2. Votes are also propagated via gossip. +10. Leader executes the votes, the same as any other transaction, and broadcasts + them to the cluster. +11. Validators observe their votes and all the votes from the cluster. + +## Partitions, Forks + +Forks can arise at PoH tick counts that correspond to a vote. The next leader +may not have observed the last vote slot and may start their slot with generated +virtual PoH entries. These empty ticks are generated by all nodes in the cluster +at a cluster-configured rate for hashes/per/tick `Z`. + +There are only two possible versions of the PoH during a voting slot: PoH with +`T` ticks and entries generated by the current leader, or PoH with just ticks. +The "just ticks" version of the PoH can be thought of as a virtual ledger, one +that all nodes in the cluster can derive from the last tick in the previous +slot. + +Validators can ignore forks at other points \(e.g. from the wrong leader\), or +slash the leader responsible for the fork. + +Validators vote based on a greedy choice to maximize their reward described in +[Tower BFT](../implemented-proposals/tower-bft.md). + +### Validator's View + +#### Time Progression + +The diagram below represents a validator's view of the PoH stream with possible +forks over time. L1, L2, etc. are leader slots, and `E`s represent entries from +that leader during that leader's slot. The `x`s represent ticks only, and time +flows downwards in the diagram. + +![Fork generation](/img/fork-generation.svg) + +Note that an `E` appearing on 2 forks at the same slot is a slashable condition, +so a validator observing `E3` and `E3'` can slash L3 and safely choose `x` for +that slot. Once a validator commits to a fork, other forks can be discarded +below that tick count. For any slot, validators need only consider a single "has +entries" chain or a "ticks only" chain to be proposed by a leader. But multiple +virtual entries may overlap as they link back to the a previous slot. + +#### Time Division + +It's useful to consider leader rotation over PoH tick count as time division of +the job of encoding state for the cluster. The following table presents the +above tree of forks as a time-divided ledger. + +| leader slot | L1 | L2 | L3 | L4 | L5 | +| :--------------- | :-- | :-- | :-- | :-- | :-- | +| data | E1 | E2 | E3 | E4 | E5 | +| ticks since prev | | | | x | xx | + +Note that only data from leader L3 will be accepted during leader slot L3. Data +from L3 may include "catchup" ticks back to a slot other than L2 if L3 did not +observe L2's data. L4 and L5's transmissions include the "ticks to prev" PoH +entries. + +This arrangement of the network data streams permits nodes to save exactly this +to the ledger for replay, restart, and checkpoints. + +### Leader's View + +When a new leader begins a slot, it must first transmit any PoH \(ticks\) +required to link the new slot with the most recently observed and voted slot. +The fork the leader proposes would link the current slot to a previous fork that +the leader has voted on with virtual ticks. diff --git a/docs/src/cluster/leader-rotation.md b/docs/src/consensus/leader-rotation.md similarity index 100% rename from docs/src/cluster/leader-rotation.md rename to docs/src/consensus/leader-rotation.md diff --git a/docs/src/cluster/managing-forks.md b/docs/src/consensus/managing-forks.md similarity index 100% rename from docs/src/cluster/managing-forks.md rename to docs/src/consensus/managing-forks.md diff --git a/docs/src/consensus/stake-delegation-and-rewards.md b/docs/src/consensus/stake-delegation-and-rewards.md new file mode 100644 index 00000000000000..72f29d5c0c4f6b --- /dev/null +++ b/docs/src/consensus/stake-delegation-and-rewards.md @@ -0,0 +1,339 @@ +--- +title: Stake Delegation and Rewards +--- + +Stakers are rewarded for helping to validate the ledger. They do this by +delegating their stake to validator nodes. Those validators do the legwork of +replaying the ledger and sending votes to a per-node vote account to which +stakers can delegate their stakes. The rest of the cluster uses those +stake-weighted votes to select a block when forks arise. Both the validator and +staker need some economic incentive to play their part. The validator needs to +be compensated for its hardware and the staker needs to be compensated for the +risk of getting its stake slashed. The economics are covered in +[staking rewards](../implemented-proposals/staking-rewards.md). This section, on +the other hand, describes the underlying mechanics of its implementation. + +## Basic Design + +The general idea is that the validator owns a Vote account. The Vote account +tracks validator votes, counts validator generated credits, and provides any +additional validator specific state. The Vote account is not aware of any stakes +delegated to it and has no staking weight. + +A separate Stake account \(created by a staker\) names a Vote account to which +the stake is delegated. Rewards generated are proportional to the amount of +lamports staked. The Stake account is owned by the staker only. Some portion of +the lamports stored in this account are the stake. + +## Passive Delegation + +Any number of Stake accounts can delegate to a single Vote account without an +interactive action from the identity controlling the Vote account or submitting +votes to the account. + +The total stake allocated to a Vote account can be calculated by the sum of all +the Stake accounts that have the Vote account pubkey as the +`StakeStateV2::Stake::voter_pubkey`. + +## Vote and Stake accounts + +The rewards process is split into two on-chain programs. The Vote program solves +the problem of making stakes slashable. The Stake program acts as custodian of +the rewards pool and provides for passive delegation. The Stake program is +responsible for paying rewards to staker and voter when shown that a staker's +delegate has participated in validating the ledger. + +### VoteState + +VoteState is the current state of all the votes the validator has submitted to +the network. VoteState contains the following state information: + +- `votes` - The submitted votes data structure. +- `credits` - The total number of rewards this Vote program has generated over + its lifetime. +- `root_slot` - The last slot to reach the full lockout commitment necessary for + rewards. +- `commission` - The commission taken by this VoteState for any rewards claimed + by staker's Stake accounts. This is the percentage ceiling of the reward. +- Account::lamports - The accumulated lamports from the commission. These do not + count as stakes. +- `authorized_voter` - Only this identity is authorized to submit votes. This + field can only modified by this identity. +- `node_pubkey` - The Solana node that votes in this account. +- `authorized_withdrawer` - the identity of the entity in charge of the lamports + of this account, separate from the account's address and the authorized vote + signer. + +### VoteInstruction::Initialize\(VoteInit\) + +- `account[0]` - RW - The VoteState. + + `VoteInit` carries the new vote account's `node_pubkey`, `authorized_voter`, + `authorized_withdrawer`, and `commission`. + + other VoteState members defaulted. + +### VoteInstruction::Authorize\(Pubkey, VoteAuthorize\) + +Updates the account with a new authorized voter or withdrawer, according to the +VoteAuthorize parameter \(`Voter` or `Withdrawer`\). The transaction must be +signed by the Vote account's current `authorized_voter` or +`authorized_withdrawer`. + +- `account[0]` - RW - The VoteState. `VoteState::authorized_voter` or + `authorized_withdrawer` is set to `Pubkey`. + +### VoteInstruction::AuthorizeWithSeed\(VoteAuthorizeWithSeedArgs\) + +Updates the account with a new authorized voter or withdrawer, according to the +VoteAuthorize parameter \(`Voter` or `Withdrawer`\). Unlike +`VoteInstruction::Authorize` this instruction is for use when the Vote account's +current `authorized_voter` or `authorized_withdrawer` is a derived key. The +transaction must be signed by someone who can sign for the base key of that +derived key. + +- `account[0]` - RW - The VoteState. `VoteState::authorized_voter` or + `authorized_withdrawer` is set to `Pubkey`. + +### VoteInstruction::Vote\(Vote\) + +- `account[0]` - RW - The VoteState. `VoteState::lockouts` and + `VoteState::credits` are updated according to voting lockout rules see + [Tower BFT](../implemented-proposals/tower-bft.md). +- `account[1]` - RO - `sysvar::slot_hashes` A list of some N most recent slots + and their hashes for the vote to be verified against. +- `account[2]` - RO - `sysvar::clock` The current network time, expressed in + slots, epochs. + +### StakeStateV2 + +A StakeStateV2 takes one of four forms, StakeStateV2::Uninitialized, +StakeStateV2::Initialized, StakeStateV2::Stake, and StakeStateV2::RewardsPool. +Only the first three forms are used in staking, but only StakeStateV2::Stake is +interesting. All RewardsPools are created at genesis. + +### StakeStateV2::Stake + +StakeStateV2::Stake is the current delegation preference of the **staker** and +contains the following state information: + +- Account::lamports - The lamports available for staking. +- `stake` - the staked amount \(subject to warmup and cooldown\) for generating + rewards, always less than or equal to Account::lamports. +- `voter_pubkey` - The pubkey of the VoteState instance the lamports are + delegated to. +- `credits_observed` - The total credits claimed over the lifetime of the + program. +- `activated` - the epoch at which this stake was activated/delegated. The full + stake will be counted after warmup. +- `deactivated` - the epoch at which this stake was de-activated, some cooldown + epochs are required before the account is fully deactivated, and the stake + available for withdrawal. +- `authorized_staker` - the pubkey of the entity that must sign delegation, + activation, and deactivation transactions. +- `authorized_withdrawer` - the identity of the entity in charge of the lamports + of this account, separate from the account's address, and the authorized + staker. + +### StakeStateV2::RewardsPool + +To avoid a single network-wide lock or contention in redemption, 256 +RewardsPools are part of genesis under pre-determined keys, each with +std::u64::MAX credits to be able to satisfy redemptions according to point +value. + +The Stakes and the RewardsPool are accounts that are owned by the same `Stake` +program. + +### StakeInstruction::DelegateStake + +The Stake account is moved from Initialized to StakeStateV2::Stake form, or from +a deactivated (i.e. fully cooled-down) StakeStateV2::Stake to activated +StakeStateV2::Stake. This is how stakers choose the vote account and validator +node to which their stake account lamports are delegated. The transaction must +be signed by the stake's `authorized_staker`. + +- `account[0]` - RW - The StakeStateV2::Stake instance. + `StakeStateV2::Stake::credits_observed` is initialized to + `VoteState::credits`, `StakeStateV2::Stake::voter_pubkey` is initialized to + `account[1]`. If this is the initial delegation of stake, + `StakeStateV2::Stake::stake` is initialized to the account's balance in + lamports, `StakeStateV2::Stake::activated` is initialized to the current Bank + epoch, and `StakeStateV2::Stake::deactivated` is initialized to std::u64::MAX +- `account[1]` - R - The VoteState instance. +- `account[2]` - R - sysvar::clock account, carries information about current + Bank epoch. +- `account[3]` - R - sysvar::stakehistory account, carries information about + stake history. +- `account[4]` - R - stake::Config account, carries warmup, cooldown, and + slashing configuration. + +### StakeInstruction::Authorize\(Pubkey, StakeAuthorize\) + +Updates the account with a new authorized staker or withdrawer, according to the +StakeAuthorize parameter \(`Staker` or `Withdrawer`\). The transaction must be +by signed by the Stakee account's current `authorized_staker` or +`authorized_withdrawer`. Any stake lock-up must have expired, or the lock-up +custodian must also sign the transaction. + +- `account[0]` - RW - The StakeStateV2. + + `StakeStateV2::authorized_staker` or `authorized_withdrawer` is set to to + `Pubkey`. + +### StakeInstruction::Deactivate + +A staker may wish to withdraw from the network. To do so he must first +deactivate his stake, and wait for cooldown. The transaction must be signed by +the stake's `authorized_staker`. + +- `account[0]` - RW - The StakeStateV2::Stake instance that is deactivating. +- `account[1]` - R - sysvar::clock account from the Bank that carries current + epoch. + +StakeStateV2::Stake::deactivated is set to the current epoch + cooldown. The +account's stake will ramp down to zero by that epoch, and Account::lamports will +be available for withdrawal. + +### StakeInstruction::Withdraw\(u64\) + +Lamports build up over time in a Stake account and any excess over activated +stake can be withdrawn. The transaction must be signed by the stake's +`authorized_withdrawer`. + +- `account[0]` - RW - The StakeStateV2::Stake from which to withdraw. +- `account[1]` - RW - Account that should be credited with the withdrawn + lamports. +- `account[2]` - R - sysvar::clock account from the Bank that carries current + epoch, to calculate stake. +- `account[3]` - R - sysvar::stake_history account from the Bank that carries + stake warmup/cooldown history. + +## Benefits of the design + +- Single vote for all the stakers. +- Clearing of the credit variable is not necessary for claiming rewards. +- Each delegated stake can claim its rewards independently. +- Commission for the work is deposited when a reward is claimed by the delegated + stake. + +## Example Callflow + +![Passive Staking Callflow](/img/passive-staking-callflow.png) + +## Staking Rewards + +The specific mechanics and rules of the validator rewards regime is outlined +here. Rewards are earned by delegating stake to a validator that is voting +correctly. Voting incorrectly exposes that validator's stakes to +[slashing](../proposals/slashing.md). + +### Basics + +The network pays rewards from a portion of network +[inflation](https://solana.com/docs/terminology#inflation). The number of +lamports available to pay rewards for an epoch is fixed and must be evenly +divided among all staked nodes according to their relative stake weight and +participation. The weighting unit is called a +[point](https://solana.com/docs/terminology#point). + +Rewards for an epoch are not available until the end of that epoch. + +At the end of each epoch, the total number of points earned during the epoch is +summed and used to divide the rewards portion of epoch inflation to arrive at a +point value. This value is recorded in the bank in a +[sysvar](https://solana.com/docs/terminology#sysvar) that maps epochs to point +values. + +During redemption, the stake program counts the points earned by the stake for +each epoch, multiplies that by the epoch's point value, and transfers lamports +in that amount from a rewards account into the stake and vote accounts according +to the vote account's commission setting. + +### Economics + +Point value for an epoch depends on aggregate network participation. If +participation in an epoch drops off, point values are higher for those that do +participate. + +### Earning credits + +Validators earn one vote credit for every correct vote that exceeds maximum +lockout, i.e. every time the validator's vote account retires a slot from its +lockout list, making that vote a root for the node. + +Stakers who have delegated to that validator earn points in proportion to their +stake. Points earned is the product of vote credits and stake. + +### Stake warmup, cooldown, withdrawal + +Stakes, once delegated, do not become effective immediately. They must first +pass through a warmup period. During this period some portion of the stake is +considered "effective", the rest is considered "activating". Changes occur on +epoch boundaries. + +The stake program limits the rate of change to total network stake, reflected in +the stake program's `config::warmup_rate` \(set to 25% per epoch in the current +implementation\). + +The amount of stake that can be warmed up each epoch is a function of the +previous epoch's total effective stake, total activating stake, and the stake +program's configured warmup rate. + +Cooldown works the same way. Once a stake is deactivated, some part of it is +considered "effective", and also "deactivating". As the stake cools down, it +continues to earn rewards and be exposed to slashing, but it also becomes +available for withdrawal. + +Bootstrap stakes are not subject to warmup. + +Rewards are paid against the "effective" portion of the stake for that epoch. + +#### Warmup example + +Consider the situation of a single stake of 1,000 activated at epoch N, with +network warmup rate of 20%, and a quiescent total network stake at epoch N of +2,000. + +At epoch N+1, the amount available to be activated for the network is 400 \(20% +of 2000\), and at epoch N, this example stake is the only stake activating, and +so is entitled to all of the warmup room available. + +| epoch | effective | activating | total effective | total activating | +| :---- | --------: | ---------: | --------------: | ---------------: | +| N-1 | | | 2,000 | 0 | +| N | 0 | 1,000 | 2,000 | 1,000 | +| N+1 | 400 | 600 | 2,400 | 600 | +| N+2 | 880 | 120 | 2,880 | 120 | +| N+3 | 1000 | 0 | 3,000 | 0 | + +Were 2 stakes \(X and Y\) to activate at epoch N, they would be awarded a +portion of the 20% in proportion to their stakes. At each epoch effective and +activating for each stake is a function of the previous epoch's state. + +| epoch | X eff | X act | Y eff | Y act | total effective | total activating | +| :---- | ----: | ----: | ----: | ----: | --------------: | ---------------: | +| N-1 | | | | | 2,000 | 0 | +| N | 0 | 1,000 | 0 | 200 | 2,000 | 1,200 | +| N+1 | 333 | 667 | 67 | 133 | 2,400 | 800 | +| N+2 | 733 | 267 | 146 | 54 | 2,880 | 321 | +| N+3 | 1000 | 0 | 200 | 0 | 3,200 | 0 | + +### Withdrawal + +Only lamports in excess of effective+activating stake may be withdrawn at any +time. This means that during warmup, effectively no stake can be withdrawn. +During cooldown, any tokens in excess of effective stake may be withdrawn +\(activating == 0\). Because earned rewards are automatically added to stake, +withdrawal is generally only possible after deactivation. + +### Lock-up + +Stake accounts support the notion of lock-up, wherein the stake account balance +is unavailable for withdrawal until a specified time. Lock-up is specified as an +epoch height, i.e. the minimum epoch height that must be reached by the network +before the stake account balance is available for withdrawal, unless the +transaction is also signed by a specified custodian. This information is +gathered when the stake account is created, and stored in the Lockup field of +the stake account's state. Changing the authorized staker or withdrawer is also +subject to lock-up, as such an operation is effectively a transfer. diff --git a/docs/src/cluster/synchronization.md b/docs/src/consensus/synchronization.md similarity index 97% rename from docs/src/cluster/synchronization.md rename to docs/src/consensus/synchronization.md index bfde654f84d763..3eb763e4daf836 100644 --- a/docs/src/cluster/synchronization.md +++ b/docs/src/consensus/synchronization.md @@ -16,7 +16,7 @@ The Proof of History technique was first described for use in blockchain by Sola A desirable property of a VDF is that verification time is very fast. Solana's approach to verifying its delay function is proportional to the time it took to create it. Split over a 4000 core GPU, it is sufficiently fast for Solana's needs, but if you asked the authors of the paper cited above, they might tell you \([and have](https://github.com/solana-labs/solana/issues/388)\) that Solana's approach is algorithmically slow and it shouldn't be called a VDF. We argue the term VDF should represent the category of verifiable delay functions and not just the subset with certain performance characteristics. Until that's resolved, Solana will likely continue using the term PoH for its application-specific VDF. -Another difference between PoH and VDFs is that a VDF is used only for tracking duration. PoH's hash chain, on the other hand, includes hashes of any data the application observed. That data is a double-edged sword. On one side, the data "proves history" - that the data most certainly existed before hashes after it. On the other side, it means the application can manipulate the hash chain by changing _when_ the data is hashed. The PoH chain therefore does not serve as a good source of randomness whereas a VDF without that data could. Solana's [leader rotation algorithm](synchronization.md#leader-rotation), for example, is derived only from the VDF _height_ and not its hash at that height. +Another difference between PoH and VDFs is that a VDF is used only for tracking duration. PoH's hash chain, on the other hand, includes hashes of any data the application observed. That data is a double-edged sword. On one side, the data "proves history" - that the data most certainly existed before hashes after it. On the other side, it means the application can manipulate the hash chain by changing _when_ the data is hashed. The PoH chain therefore does not serve as a good source of randomness whereas a VDF without that data could. Solana's [leader rotation algorithm](./leader-rotation.md), for example, is derived only from the VDF _height_ and not its hash at that height. ## Relationship to Consensus Mechanisms diff --git a/docs/src/cluster/turbine-block-propagation.md b/docs/src/consensus/turbine-block-propagation.md similarity index 100% rename from docs/src/cluster/turbine-block-propagation.md rename to docs/src/consensus/turbine-block-propagation.md diff --git a/docs/src/cluster/vote-signing.md b/docs/src/consensus/vote-signing.md similarity index 100% rename from docs/src/cluster/vote-signing.md rename to docs/src/consensus/vote-signing.md diff --git a/docs/src/developing/clients/javascript-api.md b/docs/src/developing/clients/javascript-api.md deleted file mode 100644 index 8dad0f46a00698..00000000000000 --- a/docs/src/developing/clients/javascript-api.md +++ /dev/null @@ -1,341 +0,0 @@ ---- -title: Web3 JavaScript API ---- - -## What is Solana-Web3.js? - -The Solana-Web3.js library aims to provide complete coverage of Solana. The library was built on top of the [Solana JSON RPC API](/api). - -You can find the full documentation for the `@solana/web3.js` library [here](https://solana-labs.github.io/solana-web3.js/). - -## Common Terminology - -| Term | Definition | -| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| Program | Stateless executable code written to interpret instructions. Programs are capable of performing actions based on the instructions provided. | -| Instruction | The smallest unit of a program that a client can include in a transaction. Within its processing code, an instruction may contain one or more cross-program invocations. | -| Transaction | One or more instructions signed by the client using one or more Keypairs and executed atomically with only two possible outcomes: success or failure. | - -For the full list of terms, see [Solana terminology](../../terminology#cross-program-invocation) - -## Getting Started - -### Installation - -#### yarn - -```bash -$ yarn add @solana/web3.js -``` - -#### npm - -```bash -$ npm install --save @solana/web3.js -``` - -#### Bundle - -```html - - - - - -``` - -### Usage - -#### Javascript - -```javascript -const solanaWeb3 = require("@solana/web3.js"); -console.log(solanaWeb3); -``` - -#### ES6 - -```javascript -import * as solanaWeb3 from "@solana/web3.js"; -console.log(solanaWeb3); -``` - -#### Browser Bundle - -```javascript -// solanaWeb3 is provided in the global namespace by the bundle script -console.log(solanaWeb3); -``` - -## Quickstart - -### Connecting to a Wallet - -To allow users to use your dApp or application on Solana, they will need to get access to their Keypair. A Keypair is a private key with a matching public key, used to sign transactions. - -There are two ways to obtain a Keypair: - -1. Generate a new Keypair -2. Obtain a Keypair using the secret key - -You can obtain a new Keypair with the following: - -```javascript -const { Keypair } = require("@solana/web3.js"); - -let keypair = Keypair.generate(); -``` - -This will generate a brand new Keypair for a user to fund and use within your application. - -You can allow entry of the secretKey using a textbox, and obtain the Keypair with `Keypair.fromSecretKey(secretKey)`. - -```javascript -const { Keypair } = require("@solana/web3.js"); - -let secretKey = Uint8Array.from([ - 202, 171, 192, 129, 150, 189, 204, 241, 142, 71, 205, 2, 81, 97, 2, 176, 48, - 81, 45, 1, 96, 138, 220, 132, 231, 131, 120, 77, 66, 40, 97, 172, 91, 245, 84, - 221, 157, 190, 9, 145, 176, 130, 25, 43, 72, 107, 190, 229, 75, 88, 191, 136, - 7, 167, 109, 91, 170, 164, 186, 15, 142, 36, 12, 23, -]); - -let keypair = Keypair.fromSecretKey(secretKey); -``` - -Many wallets today allow users to bring their Keypair using a variety of extensions or web wallets. The general recommendation is to use wallets, not Keypairs, to sign transactions. The wallet creates a layer of separation between the dApp and the Keypair, ensuring that the dApp never has access to the secret key. You can find ways to connect to external wallets with the [wallet-adapter](https://github.com/solana-labs/wallet-adapter) library. - -### Creating and Sending Transactions - -To interact with programs on Solana, you create, sign, and send transactions to the network. Transactions are collections of instructions with signatures. The order that instructions exist in a transaction determines the order they are executed. - -A transaction in Solana-Web3.js is created using the [`Transaction`](javascript-api.md#Transaction) object and adding desired messages, addresses, or instructions. - -Take the example of a transfer transaction: - -```javascript -const { - Keypair, - Transaction, - SystemProgram, - LAMPORTS_PER_SOL, -} = require("@solana/web3.js"); - -let fromKeypair = Keypair.generate(); -let toKeypair = Keypair.generate(); -let transaction = new Transaction(); - -transaction.add( - SystemProgram.transfer({ - fromPubkey: fromKeypair.publicKey, - toPubkey: toKeypair.publicKey, - lamports: LAMPORTS_PER_SOL, - }), -); -``` - -The above code achieves creating a transaction ready to be signed and broadcasted to the network. The `SystemProgram.transfer` instruction was added to the transaction, containing the amount of lamports to send, and the `to` and `from` public keys. - -All that is left is to sign the transaction with keypair and send it over the network. You can accomplish sending a transaction by using `sendAndConfirmTransaction` if you wish to alert the user or do something after a transaction is finished, or use `sendTransaction` if you don't need to wait for the transaction to be confirmed. - -```javascript -const { - sendAndConfirmTransaction, - clusterApiUrl, - Connection, -} = require("@solana/web3.js"); - -let keypair = Keypair.generate(); -let connection = new Connection(clusterApiUrl("testnet")); - -sendAndConfirmTransaction(connection, transaction, [keypair]); -``` - -The above code takes in a `TransactionInstruction` using `SystemProgram`, creates a `Transaction`, and sends it over the network. You use `Connection` in order to define which Solana network you are connecting to, namely `mainnet-beta`, `testnet`, or `devnet`. - -### Interacting with Custom Programs - -The previous section visits sending basic transactions. In Solana everything you do interacts with different programs, including the previous section's transfer transaction. At the time of writing programs on Solana are either written in Rust or C. - -Let's look at the `SystemProgram`. The method signature for allocating space in your account on Solana in Rust looks like this: - -```rust -pub fn allocate( - pubkey: &Pubkey, - space: u64 -) -> Instruction -``` - -In Solana when you want to interact with a program you must first know all the accounts you will be interacting with. - -You must always provide every account that the program will be interacting within the instruction. Not only that, but you must provide whether or not the account is `isSigner` or `isWritable`. - -In the `allocate` method above, a single account `pubkey` is required, as well as an amount of `space` for allocation. We know that the `allocate` method writes to the account by allocating space within it, making the `pubkey` required to be `isWritable`. `isSigner` is required when you are designating the account that is running the instruction. In this case, the signer is the account calling to allocate space within itself. - -Let's look at how to call this instruction using solana-web3.js: - -```javascript -let keypair = web3.Keypair.generate(); -let payer = web3.Keypair.generate(); -let connection = new web3.Connection(web3.clusterApiUrl("testnet")); - -let airdropSignature = await connection.requestAirdrop( - payer.publicKey, - web3.LAMPORTS_PER_SOL, -); - -await connection.confirmTransaction({ signature: airdropSignature }); -``` - -First, we set up the account Keypair and connection so that we have an account to make allocate on the testnet. We also create a payer Keypair and airdrop some sol so we can pay for the allocate transaction. - -```javascript -let allocateTransaction = new web3.Transaction({ - feePayer: payer.publicKey, -}); -let keys = [{ pubkey: keypair.publicKey, isSigner: true, isWritable: true }]; -let params = { space: 100 }; -``` - -We create the transaction `allocateTransaction`, keys, and params objects. `feePayer` is an optional field when creating a transaction that specifies who is paying for the transaction, defaulting to the pubkey of the first signer in the transaction. `keys` represents all accounts that the program's `allocate` function will interact with. Since the `allocate` function also required space, we created `params` to be used later when invoking the `allocate` function. - -```javascript -let allocateStruct = { - index: 8, - layout: struct([u32("instruction"), ns64("space")]), -}; -``` - -The above is created using `u32` and `ns64` from `@solana/buffer-layout` to facilitate the payload creation. The `allocate` function takes in the parameter `space`. To interact with the function we must provide the data as a Buffer format. The `buffer-layout` library helps with allocating the buffer and encoding it correctly for Rust programs on Solana to interpret. - -Let's break down this struct. - -```javascript -{ - index: 8, /* <-- */ - layout: struct([ - u32('instruction'), - ns64('space'), - ]) -} -``` - -`index` is set to 8 because the function `allocate` is in the 8th position in the instruction enum for `SystemProgram`. - -```rust -/* https://github.com/solana-labs/solana/blob/21bc43ed58c63c827ba4db30426965ef3e807180/sdk/program/src/system_instruction.rs#L142-L305 */ -pub enum SystemInstruction { - /** 0 **/CreateAccount {/**/}, - /** 1 **/Assign {/**/}, - /** 2 **/Transfer {/**/}, - /** 3 **/CreateAccountWithSeed {/**/}, - /** 4 **/AdvanceNonceAccount, - /** 5 **/WithdrawNonceAccount(u64), - /** 6 **/InitializeNonceAccount(Pubkey), - /** 7 **/AuthorizeNonceAccount(Pubkey), - /** 8 **/Allocate {/**/}, - /** 9 **/AllocateWithSeed {/**/}, - /** 10 **/AssignWithSeed {/**/}, - /** 11 **/TransferWithSeed {/**/}, - /** 12 **/UpgradeNonceAccount, -} -``` - -Next up is `u32('instruction')`. - -```javascript -{ - index: 8, - layout: struct([ - u32('instruction'), /* <-- */ - ns64('space'), - ]) -} -``` - -The `layout` in the allocate struct must always have `u32('instruction')` first when you are using it to call an instruction. - -```javascript -{ - index: 8, - layout: struct([ - u32('instruction'), - ns64('space'), /* <-- */ - ]) -} -``` - -`ns64('space')` is the argument for the `allocate` function. You can see in the original `allocate` function in Rust that space was of the type `u64`. `u64` is an unsigned 64bit integer. Javascript by default only provides up to 53bit integers. `ns64` comes from `@solana/buffer-layout` to help with type conversions between Rust and Javascript. You can find more type conversions between Rust and Javascript at [solana-labs/buffer-layout](https://github.com/solana-labs/buffer-layout). - -```javascript -let data = Buffer.alloc(allocateStruct.layout.span); -let layoutFields = Object.assign({ instruction: allocateStruct.index }, params); -allocateStruct.layout.encode(layoutFields, data); -``` - -Using the previously created bufferLayout, we can allocate a data buffer. We then assign our params `{ space: 100 }` so that it maps correctly to the layout, and encode it to the data buffer. Now the data is ready to be sent to the program. - -```javascript -allocateTransaction.add( - new web3.TransactionInstruction({ - keys, - programId: web3.SystemProgram.programId, - data, - }), -); - -await web3.sendAndConfirmTransaction(connection, allocateTransaction, [ - payer, - keypair, -]); -``` - -Finally, we add the transaction instruction with all the account keys, payer, data, and programId and broadcast the transaction to the network. - -The full code can be found below. - -```javascript -const { struct, u32, ns64 } = require("@solana/buffer-layout"); -const { Buffer } = require("buffer"); -const web3 = require("@solana/web3.js"); - -let keypair = web3.Keypair.generate(); -let payer = web3.Keypair.generate(); - -let connection = new web3.Connection(web3.clusterApiUrl("testnet")); - -let airdropSignature = await connection.requestAirdrop( - payer.publicKey, - web3.LAMPORTS_PER_SOL, -); - -await connection.confirmTransaction({ signature: airdropSignature }); - -let allocateTransaction = new web3.Transaction({ - feePayer: payer.publicKey, -}); -let keys = [{ pubkey: keypair.publicKey, isSigner: true, isWritable: true }]; -let params = { space: 100 }; - -let allocateStruct = { - index: 8, - layout: struct([u32("instruction"), ns64("space")]), -}; - -let data = Buffer.alloc(allocateStruct.layout.span); -let layoutFields = Object.assign({ instruction: allocateStruct.index }, params); -allocateStruct.layout.encode(layoutFields, data); - -allocateTransaction.add( - new web3.TransactionInstruction({ - keys, - programId: web3.SystemProgram.programId, - data, - }), -); - -await web3.sendAndConfirmTransaction(connection, allocateTransaction, [ - payer, - keypair, -]); -``` diff --git a/docs/src/developing/clients/javascript-reference.md b/docs/src/developing/clients/javascript-reference.md deleted file mode 100644 index bd13d64296e1cc..00000000000000 --- a/docs/src/developing/clients/javascript-reference.md +++ /dev/null @@ -1,802 +0,0 @@ ---- -title: Web3 API Reference ---- - -## Web3 API Reference Guide - -The `@solana/web3.js` library is a package that has coverage over the [Solana JSON RPC API](/api). - -You can find the full documentation for the `@solana/web3.js` library [here](https://solana-labs.github.io/solana-web3.js/). - -## General - -### Connection - -[Source Documentation](https://solana-labs.github.io/solana-web3.js/classes/Connection.html) - -Connection is used to interact with the [Solana JSON RPC](/api). You can use Connection to confirm transactions, get account info, and more. - -You create a connection by defining the JSON RPC cluster endpoint and the desired commitment. Once this is complete, you can use this connection object to interact with any of the Solana JSON RPC API. - -#### Example Usage - -```javascript -const web3 = require("@solana/web3.js"); - -let connection = new web3.Connection(web3.clusterApiUrl("devnet"), "confirmed"); - -let slot = await connection.getSlot(); -console.log(slot); -// 93186439 - -let blockTime = await connection.getBlockTime(slot); -console.log(blockTime); -// 1630747045 - -let block = await connection.getBlock(slot); -console.log(block); - -/* -{ - blockHeight: null, - blockTime: 1630747045, - blockhash: 'AsFv1aV5DGip9YJHHqVjrGg6EKk55xuyxn2HeiN9xQyn', - parentSlot: 93186438, - previousBlockhash: '11111111111111111111111111111111', - rewards: [], - transactions: [] -} -*/ - -let slotLeader = await connection.getSlotLeader(); -console.log(slotLeader); -//49AqLYbpJYc2DrzGUAH1fhWJy62yxBxpLEkfJwjKy2jr -``` - -The above example shows only a few of the methods on Connection. Please see the [source generated docs](https://solana-labs.github.io/solana-web3.js/classes/Connection.html) for the full list. - -### Transaction - -[SourceDocumentation](https://solana-labs.github.io/solana-web3.js/classes/Transaction.html) - -A transaction is used to interact with programs on the Solana blockchain. These transactions are constructed with TransactionInstructions, containing all the accounts possible to interact with, as well as any needed data or program addresses. Each TransactionInstruction consists of keys, data, and a programId. You can do multiple instructions in a single transaction, interacting with multiple programs at once. - -#### Example Usage - -```javascript -const web3 = require("@solana/web3.js"); -const nacl = require("tweetnacl"); - -// Airdrop SOL for paying transactions -let payer = web3.Keypair.generate(); -let connection = new web3.Connection(web3.clusterApiUrl("devnet"), "confirmed"); - -let airdropSignature = await connection.requestAirdrop( - payer.publicKey, - web3.LAMPORTS_PER_SOL, -); - -await connection.confirmTransaction({ signature: airdropSignature }); - -let toAccount = web3.Keypair.generate(); - -// Create Simple Transaction -let transaction = new web3.Transaction(); - -// Add an instruction to execute -transaction.add( - web3.SystemProgram.transfer({ - fromPubkey: payer.publicKey, - toPubkey: toAccount.publicKey, - lamports: 1000, - }), -); - -// Send and confirm transaction -// Note: feePayer is by default the first signer, or payer, if the parameter is not set -await web3.sendAndConfirmTransaction(connection, transaction, [payer]); - -// Alternatively, manually construct the transaction -let recentBlockhash = await connection.getRecentBlockhash(); -let manualTransaction = new web3.Transaction({ - recentBlockhash: recentBlockhash.blockhash, - feePayer: payer.publicKey, -}); -manualTransaction.add( - web3.SystemProgram.transfer({ - fromPubkey: payer.publicKey, - toPubkey: toAccount.publicKey, - lamports: 1000, - }), -); - -let transactionBuffer = manualTransaction.serializeMessage(); -let signature = nacl.sign.detached(transactionBuffer, payer.secretKey); - -manualTransaction.addSignature(payer.publicKey, signature); - -let isVerifiedSignature = manualTransaction.verifySignatures(); -console.log(`The signatures were verified: ${isVerifiedSignature}`); - -// The signatures were verified: true - -let rawTransaction = manualTransaction.serialize(); - -await web3.sendAndConfirmRawTransaction(connection, rawTransaction); -``` - -### Keypair - -[Source Documentation](https://solana-labs.github.io/solana-web3.js/classes/Keypair.html) - -The keypair is used to create an account with a public key and secret key within Solana. You can either generate, generate from a seed, or create from a secret key. - -#### Example Usage - -```javascript -const { Keypair } = require("@solana/web3.js"); - -let account = Keypair.generate(); - -console.log(account.publicKey.toBase58()); -console.log(account.secretKey); - -// 2DVaHtcdTf7cm18Zm9VV8rKK4oSnjmTkKE6MiXe18Qsb -// Uint8Array(64) [ -// 152, 43, 116, 211, 207, 41, 220, 33, 193, 168, 118, -// 24, 176, 83, 206, 132, 47, 194, 2, 203, 186, 131, -// 197, 228, 156, 170, 154, 41, 56, 76, 159, 124, 18, -// 14, 247, 32, 210, 51, 102, 41, 43, 21, 12, 170, -// 166, 210, 195, 188, 60, 220, 210, 96, 136, 158, 6, -// 205, 189, 165, 112, 32, 200, 116, 164, 234 -// ] - -let seed = Uint8Array.from([ - 70, 60, 102, 100, 70, 60, 102, 100, 70, 60, 102, 100, 70, 60, 102, 100, 70, - 60, 102, 100, 70, 60, 102, 100, 70, 60, 102, 100, 70, 60, 102, 100, -]); -let accountFromSeed = Keypair.fromSeed(seed); - -console.log(accountFromSeed.publicKey.toBase58()); -console.log(accountFromSeed.secretKey); - -// 3LDverZtSC9Duw2wyGC1C38atMG49toPNW9jtGJiw9Ar -// Uint8Array(64) [ -// 70, 60, 102, 100, 70, 60, 102, 100, 70, 60, 102, -// 100, 70, 60, 102, 100, 70, 60, 102, 100, 70, 60, -// 102, 100, 70, 60, 102, 100, 70, 60, 102, 100, 34, -// 164, 6, 12, 9, 193, 196, 30, 148, 122, 175, 11, -// 28, 243, 209, 82, 240, 184, 30, 31, 56, 223, 236, -// 227, 60, 72, 215, 47, 208, 209, 162, 59 -// ] - -let accountFromSecret = Keypair.fromSecretKey(account.secretKey); - -console.log(accountFromSecret.publicKey.toBase58()); -console.log(accountFromSecret.secretKey); - -// 2DVaHtcdTf7cm18Zm9VV8rKK4oSnjmTkKE6MiXe18Qsb -// Uint8Array(64) [ -// 152, 43, 116, 211, 207, 41, 220, 33, 193, 168, 118, -// 24, 176, 83, 206, 132, 47, 194, 2, 203, 186, 131, -// 197, 228, 156, 170, 154, 41, 56, 76, 159, 124, 18, -// 14, 247, 32, 210, 51, 102, 41, 43, 21, 12, 170, -// 166, 210, 195, 188, 60, 220, 210, 96, 136, 158, 6, -// 205, 189, 165, 112, 32, 200, 116, 164, 234 -// ] -``` - -Using `generate` generates a random Keypair for use as an account on Solana. Using `fromSeed`, you can generate a Keypair using a deterministic constructor. `fromSecret` creates a Keypair from a secret Uint8array. You can see that the publicKey for the `generate` Keypair and `fromSecret` Keypair are the same because the secret from the `generate` Keypair is used in `fromSecret`. - -**Warning**: Do not use `fromSeed` unless you are creating a seed with high entropy. Do not share your seed. Treat the seed like you would a private key. - -### PublicKey - -[Source Documentation](https://solana-labs.github.io/solana-web3.js/classes/PublicKey.html) - -PublicKey is used throughout `@solana/web3.js` in transactions, keypairs, and programs. You require publickey when listing each account in a transaction and as a general identifier on Solana. - -A PublicKey can be created with a base58 encoded string, buffer, Uint8Array, number, and an array of numbers. - -#### Example Usage - -```javascript -const { Buffer } = require("buffer"); -const web3 = require("@solana/web3.js"); -const crypto = require("crypto"); - -// Create a PublicKey with a base58 encoded string -let base58publicKey = new web3.PublicKey( - "5xot9PVkphiX2adznghwrAuxGs2zeWisNSxMW6hU6Hkj", -); -console.log(base58publicKey.toBase58()); - -// 5xot9PVkphiX2adznghwrAuxGs2zeWisNSxMW6hU6Hkj - -// Create a Program Address -let highEntropyBuffer = crypto.randomBytes(31); -let programAddressFromKey = await web3.PublicKey.createProgramAddress( - [highEntropyBuffer.slice(0, 31)], - base58publicKey, -); -console.log(`Generated Program Address: ${programAddressFromKey.toBase58()}`); - -// Generated Program Address: 3thxPEEz4EDWHNxo1LpEpsAxZryPAHyvNVXJEJWgBgwJ - -// Find Program address given a PublicKey -let validProgramAddress = await web3.PublicKey.findProgramAddress( - [Buffer.from("", "utf8")], - programAddressFromKey, -); -console.log(`Valid Program Address: ${validProgramAddress}`); - -// Valid Program Address: C14Gs3oyeXbASzwUpqSymCKpEyccfEuSe8VRar9vJQRE,253 -``` - -### SystemProgram - -[SourceDocumentation](https://solana-labs.github.io/solana-web3.js/classes/SystemProgram.html) - -The SystemProgram grants the ability to create accounts, allocate account data, assign an account to programs, work with nonce accounts, and transfer lamports. You can use the SystemInstruction class to help with decoding and reading individual instructions - -#### Example Usage - -```javascript -const web3 = require("@solana/web3.js"); - -// Airdrop SOL for paying transactions -let payer = web3.Keypair.generate(); -let connection = new web3.Connection(web3.clusterApiUrl("devnet"), "confirmed"); - -let airdropSignature = await connection.requestAirdrop( - payer.publicKey, - web3.LAMPORTS_PER_SOL, -); - -await connection.confirmTransaction({ signature: airdropSignature }); - -// Allocate Account Data -let allocatedAccount = web3.Keypair.generate(); -let allocateInstruction = web3.SystemProgram.allocate({ - accountPubkey: allocatedAccount.publicKey, - space: 100, -}); -let transaction = new web3.Transaction().add(allocateInstruction); - -await web3.sendAndConfirmTransaction(connection, transaction, [ - payer, - allocatedAccount, -]); - -// Create Nonce Account -let nonceAccount = web3.Keypair.generate(); -let minimumAmountForNonceAccount = - await connection.getMinimumBalanceForRentExemption(web3.NONCE_ACCOUNT_LENGTH); -let createNonceAccountTransaction = new web3.Transaction().add( - web3.SystemProgram.createNonceAccount({ - fromPubkey: payer.publicKey, - noncePubkey: nonceAccount.publicKey, - authorizedPubkey: payer.publicKey, - lamports: minimumAmountForNonceAccount, - }), -); - -await web3.sendAndConfirmTransaction( - connection, - createNonceAccountTransaction, - [payer, nonceAccount], -); - -// Advance nonce - Used to create transactions as an account custodian -let advanceNonceTransaction = new web3.Transaction().add( - web3.SystemProgram.nonceAdvance({ - noncePubkey: nonceAccount.publicKey, - authorizedPubkey: payer.publicKey, - }), -); - -await web3.sendAndConfirmTransaction(connection, advanceNonceTransaction, [ - payer, -]); - -// Transfer lamports between accounts -let toAccount = web3.Keypair.generate(); - -let transferTransaction = new web3.Transaction().add( - web3.SystemProgram.transfer({ - fromPubkey: payer.publicKey, - toPubkey: toAccount.publicKey, - lamports: 1000, - }), -); -await web3.sendAndConfirmTransaction(connection, transferTransaction, [payer]); - -// Assign a new account to a program -let programId = web3.Keypair.generate(); -let assignedAccount = web3.Keypair.generate(); - -let assignTransaction = new web3.Transaction().add( - web3.SystemProgram.assign({ - accountPubkey: assignedAccount.publicKey, - programId: programId.publicKey, - }), -); - -await web3.sendAndConfirmTransaction(connection, assignTransaction, [ - payer, - assignedAccount, -]); -``` - -### Secp256k1Program - -[Source Documentation](https://solana-labs.github.io/solana-web3.js/classes/Secp256k1Program.html) - -The Secp256k1Program is used to verify Secp256k1 signatures, which are used by both Bitcoin and Ethereum. - -#### Example Usage - -```javascript -const { keccak_256 } = require("js-sha3"); -const web3 = require("@solana/web3.js"); -const secp256k1 = require("secp256k1"); - -// Create a Ethereum Address from secp256k1 -let secp256k1PrivateKey; -do { - secp256k1PrivateKey = web3.Keypair.generate().secretKey.slice(0, 32); -} while (!secp256k1.privateKeyVerify(secp256k1PrivateKey)); - -let secp256k1PublicKey = secp256k1 - .publicKeyCreate(secp256k1PrivateKey, false) - .slice(1); - -let ethAddress = - web3.Secp256k1Program.publicKeyToEthAddress(secp256k1PublicKey); -console.log(`Ethereum Address: 0x${ethAddress.toString("hex")}`); - -// Ethereum Address: 0xadbf43eec40694eacf36e34bb5337fba6a2aa8ee - -// Fund a keypair to create instructions -let fromPublicKey = web3.Keypair.generate(); -let connection = new web3.Connection(web3.clusterApiUrl("devnet"), "confirmed"); - -let airdropSignature = await connection.requestAirdrop( - fromPublicKey.publicKey, - web3.LAMPORTS_PER_SOL, -); - -await connection.confirmTransaction({ signature: airdropSignature }); - -// Sign Message with Ethereum Key -let plaintext = Buffer.from("string address"); -let plaintextHash = Buffer.from(keccak_256.update(plaintext).digest()); -let { signature, recid: recoveryId } = secp256k1.ecdsaSign( - plaintextHash, - secp256k1PrivateKey, -); - -// Create transaction to verify the signature -let transaction = new Transaction().add( - web3.Secp256k1Program.createInstructionWithEthAddress({ - ethAddress: ethAddress.toString("hex"), - plaintext, - signature, - recoveryId, - }), -); - -// Transaction will succeed if the message is verified to be signed by the address -await web3.sendAndConfirmTransaction(connection, transaction, [fromPublicKey]); -``` - -### Message - -[Source Documentation](https://solana-labs.github.io/solana-web3.js/classes/Message.html) - -Message is used as another way to construct transactions. You can construct a message using the accounts, header, instructions, and recentBlockhash that are a part of a transaction. A [Transaction](javascript-api.md#Transaction) is a Message plus the list of required signatures required to execute the transaction. - -#### Example Usage - -```javascript -const { Buffer } = require("buffer"); -const bs58 = require("bs58"); -const web3 = require("@solana/web3.js"); - -let toPublicKey = web3.Keypair.generate().publicKey; -let fromPublicKey = web3.Keypair.generate(); - -let connection = new web3.Connection(web3.clusterApiUrl("devnet"), "confirmed"); - -let airdropSignature = await connection.requestAirdrop( - fromPublicKey.publicKey, - web3.LAMPORTS_PER_SOL, -); - -await connection.confirmTransaction({ signature: airdropSignature }); - -let type = web3.SYSTEM_INSTRUCTION_LAYOUTS.Transfer; -let data = Buffer.alloc(type.layout.span); -let layoutFields = Object.assign({ instruction: type.index }); -type.layout.encode(layoutFields, data); - -let recentBlockhash = await connection.getRecentBlockhash(); - -let messageParams = { - accountKeys: [ - fromPublicKey.publicKey.toString(), - toPublicKey.toString(), - web3.SystemProgram.programId.toString(), - ], - header: { - numReadonlySignedAccounts: 0, - numReadonlyUnsignedAccounts: 1, - numRequiredSignatures: 1, - }, - instructions: [ - { - accounts: [0, 1], - data: bs58.encode(data), - programIdIndex: 2, - }, - ], - recentBlockhash, -}; - -let message = new web3.Message(messageParams); - -let transaction = web3.Transaction.populate(message, [ - fromPublicKey.publicKey.toString(), -]); - -await web3.sendAndConfirmTransaction(connection, transaction, [fromPublicKey]); -``` - -### Struct - -[SourceDocumentation](https://solana-labs.github.io/solana-web3.js/classes/Struct.html) - -The struct class is used to create Rust compatible structs in javascript. This class is only compatible with Borsh encoded Rust structs. - -#### Example Usage - -Struct in Rust: - -```rust -pub struct Fee { - pub denominator: u64, - pub numerator: u64, -} -``` - -Using web3: - -```javascript -import BN from "bn.js"; -import { Struct } from "@solana/web3.js"; - -export class Fee extends Struct { - denominator: BN; - numerator: BN; -} -``` - -### Enum - -[Source Documentation](https://solana-labs.github.io/solana-web3.js/classes/Enum.html) - -The Enum class is used to represent a Rust compatible Enum in javascript. The enum will just be a string representation if logged but can be properly encoded/decoded when used in conjunction with [Struct](javascript-api.md#Struct). This class is only compatible with Borsh encoded Rust enumerations. - -#### Example Usage - -Rust: - -```rust -pub enum AccountType { - Uninitialized, - StakePool, - ValidatorList, -} -``` - -Web3: - -```javascript -import { Enum } from "@solana/web3.js"; - -export class AccountType extends Enum {} -``` - -### NonceAccount - -[Source Documentation](https://solana-labs.github.io/solana-web3.js/classes/NonceAccount.html) - -Normally a transaction is rejected if a transaction's `recentBlockhash` field is too old. To provide for certain custodial services, Nonce Accounts are used. Transactions which use a `recentBlockhash` captured on-chain by a Nonce Account do not expire as long at the Nonce Account is not advanced. - -You can create a nonce account by first creating a normal account, then using `SystemProgram` to make the account a Nonce Account. - -#### Example Usage - -```javascript -const web3 = require("@solana/web3.js"); - -// Create connection -let connection = new web3.Connection(web3.clusterApiUrl("devnet"), "confirmed"); - -// Generate accounts -let account = web3.Keypair.generate(); -let nonceAccount = web3.Keypair.generate(); - -// Fund account -let airdropSignature = await connection.requestAirdrop( - account.publicKey, - web3.LAMPORTS_PER_SOL, -); - -await connection.confirmTransaction({ signature: airdropSignature }); - -// Get Minimum amount for rent exemption -let minimumAmount = await connection.getMinimumBalanceForRentExemption( - web3.NONCE_ACCOUNT_LENGTH, -); - -// Form CreateNonceAccount transaction -let transaction = new web3.Transaction().add( - web3.SystemProgram.createNonceAccount({ - fromPubkey: account.publicKey, - noncePubkey: nonceAccount.publicKey, - authorizedPubkey: account.publicKey, - lamports: minimumAmount, - }), -); -// Create Nonce Account -await web3.sendAndConfirmTransaction(connection, transaction, [ - account, - nonceAccount, -]); - -let nonceAccountData = await connection.getNonce( - nonceAccount.publicKey, - "confirmed", -); - -console.log(nonceAccountData); -// NonceAccount { -// authorizedPubkey: PublicKey { -// _bn: -// }, -// nonce: '93zGZbhMmReyz4YHXjt2gHsvu5tjARsyukxD4xnaWaBq', -// feeCalculator: { lamportsPerSignature: 5000 } -// } - -let nonceAccountInfo = await connection.getAccountInfo( - nonceAccount.publicKey, - "confirmed", -); - -let nonceAccountFromInfo = web3.NonceAccount.fromAccountData( - nonceAccountInfo.data, -); - -console.log(nonceAccountFromInfo); -// NonceAccount { -// authorizedPubkey: PublicKey { -// _bn: -// }, -// nonce: '93zGZbhMmReyz4YHXjt2gHsvu5tjARsyukxD4xnaWaBq', -// feeCalculator: { lamportsPerSignature: 5000 } -// } -``` - -The above example shows both how to create a `NonceAccount` using `SystemProgram.createNonceAccount`, as well as how to retrieve the `NonceAccount` from accountInfo. Using the nonce, you can create transactions offline with the nonce in place of the `recentBlockhash`. - -### VoteAccount - -[SourceDocumentation](https://solana-labs.github.io/solana-web3.js/classes/VoteAccount.html) - -Vote account is an object that grants the capability of decoding vote accounts from the native vote account program on the network. - -#### Example Usage - -```javascript -const web3 = require("@solana/web3.js"); - -let voteAccountInfo = await connection.getProgramAccounts(web3.VOTE_PROGRAM_ID); -let voteAccountFromData = web3.VoteAccount.fromAccountData( - voteAccountInfo[0].account.data, -); -console.log(voteAccountFromData); -/* -VoteAccount { - nodePubkey: PublicKey { - _bn: - }, - authorizedWithdrawer: PublicKey { - _bn: - }, - commission: 10, - rootSlot: 104570885, - votes: [ - { slot: 104570886, confirmationCount: 31 }, - { slot: 104570887, confirmationCount: 30 }, - { slot: 104570888, confirmationCount: 29 }, - { slot: 104570889, confirmationCount: 28 }, - { slot: 104570890, confirmationCount: 27 }, - { slot: 104570891, confirmationCount: 26 }, - { slot: 104570892, confirmationCount: 25 }, - { slot: 104570893, confirmationCount: 24 }, - { slot: 104570894, confirmationCount: 23 }, - ... - ], - authorizedVoters: [ { epoch: 242, authorizedVoter: [PublicKey] } ], - priorVoters: [ - [Object], [Object], [Object], - [Object], [Object], [Object], - [Object], [Object], [Object], - [Object], [Object], [Object], - [Object], [Object], [Object], - [Object], [Object], [Object], - [Object], [Object], [Object], - [Object], [Object], [Object], - [Object], [Object], [Object], - [Object], [Object], [Object], - [Object], [Object] - ], - epochCredits: [ - { epoch: 179, credits: 33723163, prevCredits: 33431259 }, - { epoch: 180, credits: 34022643, prevCredits: 33723163 }, - { epoch: 181, credits: 34331103, prevCredits: 34022643 }, - { epoch: 182, credits: 34619348, prevCredits: 34331103 }, - { epoch: 183, credits: 34880375, prevCredits: 34619348 }, - { epoch: 184, credits: 35074055, prevCredits: 34880375 }, - { epoch: 185, credits: 35254965, prevCredits: 35074055 }, - { epoch: 186, credits: 35437863, prevCredits: 35254965 }, - { epoch: 187, credits: 35672671, prevCredits: 35437863 }, - { epoch: 188, credits: 35950286, prevCredits: 35672671 }, - { epoch: 189, credits: 36228439, prevCredits: 35950286 }, - ... - ], - lastTimestamp: { slot: 104570916, timestamp: 1635730116 } -} -*/ -``` - -## Staking - -### StakeProgram - -[SourceDocumentation](https://solana-labs.github.io/solana-web3.js/classes/StakeProgram.html) - -The StakeProgram facilitates staking SOL and delegating them to any validators on the network. You can use StakeProgram to create a stake account, stake some SOL, authorize accounts for withdrawal of your stake, deactivate your stake, and withdraw your funds. The StakeInstruction class is used to decode and read more instructions from transactions calling the StakeProgram - -#### Example Usage - -```javascript -const web3 = require("@solana/web3.js"); - -// Fund a key to create transactions -let fromPublicKey = web3.Keypair.generate(); -let connection = new web3.Connection(web3.clusterApiUrl("devnet"), "confirmed"); - -let airdropSignature = await connection.requestAirdrop( - fromPublicKey.publicKey, - web3.LAMPORTS_PER_SOL, -); -await connection.confirmTransaction({ signature: airdropSignature }); - -// Create Account -let stakeAccount = web3.Keypair.generate(); -let authorizedAccount = web3.Keypair.generate(); -/* Note: This is the minimum amount for a stake account -- Add additional Lamports for staking - For example, we add 50 lamports as part of the stake */ -let lamportsForStakeAccount = - (await connection.getMinimumBalanceForRentExemption( - web3.StakeProgram.space, - )) + 50; - -let createAccountTransaction = web3.StakeProgram.createAccount({ - fromPubkey: fromPublicKey.publicKey, - authorized: new web3.Authorized( - authorizedAccount.publicKey, - authorizedAccount.publicKey, - ), - lamports: lamportsForStakeAccount, - lockup: new web3.Lockup(0, 0, fromPublicKey.publicKey), - stakePubkey: stakeAccount.publicKey, -}); -await web3.sendAndConfirmTransaction(connection, createAccountTransaction, [ - fromPublicKey, - stakeAccount, -]); - -// Check that stake is available -let stakeBalance = await connection.getBalance(stakeAccount.publicKey); -console.log(`Stake balance: ${stakeBalance}`); -// Stake balance: 2282930 - -// We can verify the state of our stake. This may take some time to become active -let stakeState = await connection.getStakeActivation(stakeAccount.publicKey); -console.log(`Stake state: ${stakeState.state}`); -// Stake state: inactive - -// To delegate our stake, we get the current vote accounts and choose the first -let voteAccounts = await connection.getVoteAccounts(); -let voteAccount = voteAccounts.current.concat(voteAccounts.delinquent)[0]; -let votePubkey = new web3.PublicKey(voteAccount.votePubkey); - -// We can then delegate our stake to the voteAccount -let delegateTransaction = web3.StakeProgram.delegate({ - stakePubkey: stakeAccount.publicKey, - authorizedPubkey: authorizedAccount.publicKey, - votePubkey: votePubkey, -}); -await web3.sendAndConfirmTransaction(connection, delegateTransaction, [ - fromPublicKey, - authorizedAccount, -]); - -// To withdraw our funds, we first have to deactivate the stake -let deactivateTransaction = web3.StakeProgram.deactivate({ - stakePubkey: stakeAccount.publicKey, - authorizedPubkey: authorizedAccount.publicKey, -}); -await web3.sendAndConfirmTransaction(connection, deactivateTransaction, [ - fromPublicKey, - authorizedAccount, -]); - -// Once deactivated, we can withdraw our funds -let withdrawTransaction = web3.StakeProgram.withdraw({ - stakePubkey: stakeAccount.publicKey, - authorizedPubkey: authorizedAccount.publicKey, - toPubkey: fromPublicKey.publicKey, - lamports: stakeBalance, -}); - -await web3.sendAndConfirmTransaction(connection, withdrawTransaction, [ - fromPublicKey, - authorizedAccount, -]); -``` - -### Authorized - -[Source Documentation](https://solana-labs.github.io/solana-web3.js/classes/Authorized.html) - -Authorized is an object used when creating an authorized account for staking within Solana. You can designate a `staker` and `withdrawer` separately, allowing for a different account to withdraw other than the staker. - -You can find more usage of the `Authorized` object under [`StakeProgram`](javascript-api.md#StakeProgram) - -### Lockup - -[Source Documentation](https://solana-labs.github.io/solana-web3.js/classes/Lockup.html) - -Lockup is used in conjunction with the [StakeProgram](javascript-api.md#StakeProgram) to create an account. The Lockup is used to determine how long the stake will be locked, or unable to be retrieved. If the Lockup is set to 0 for both epoch and the Unix timestamp, the lockup will be disabled for the stake account. - -#### Example Usage - -```javascript -const { - Authorized, - Keypair, - Lockup, - StakeProgram, -} = require("@solana/web3.js"); - -let account = Keypair.generate(); -let stakeAccount = Keypair.generate(); -let authorized = new Authorized(account.publicKey, account.publicKey); -let lockup = new Lockup(0, 0, account.publicKey); - -let createStakeAccountInstruction = StakeProgram.createAccount({ - fromPubkey: account.publicKey, - authorized: authorized, - lamports: 1000, - lockup: lockup, - stakePubkey: stakeAccount.publicKey, -}); -``` - -The above code creates a `createStakeAccountInstruction` to be used when creating an account with the `StakeProgram`. The Lockup is set to 0 for both the epoch and Unix timestamp, disabling lockup for the account. - -See [StakeProgram](javascript-api.md#StakeProgram) for more. diff --git a/docs/src/developing/clients/rust-api.md b/docs/src/developing/clients/rust-api.md deleted file mode 100644 index 5f74cf78df32bc..00000000000000 --- a/docs/src/developing/clients/rust-api.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Rust API ---- - -Solana's Rust crates are [published to crates.io][crates.io] and can be found -[on docs.rs with the "solana-" prefix][docs.rs]. - -[crates.io]: https://crates.io/search?q=solana- -[docs.rs]: https://docs.rs/releases/search?query=solana- - -Some important crates: - -- [`solana-program`] — Imported by programs running on Solana, compiled - to SBF. This crate contains many fundamental data types and is re-exported from - [`solana-sdk`], which cannot be imported from a Solana program. - -- [`solana-sdk`] — The basic off-chain SDK, it re-exports - [`solana-program`] and adds more APIs on top of that. Most Solana programs - that do not run on-chain will import this. - -- [`solana-client`] — For interacting with a Solana node via the - [JSON RPC API](/api). - -- [`solana-cli-config`] — Loading and saving the Solana CLI configuration - file. - -- [`solana-clap-utils`] — Routines for setting up a CLI, using [`clap`], - as used by the main Solana CLI. Includes functions for loading all types of - signers supported by the CLI. - -[`solana-program`]: https://docs.rs/solana-program -[`solana-sdk`]: https://docs.rs/solana-sdk -[`solana-client`]: https://docs.rs/solana-client -[`solana-cli-config`]: https://docs.rs/solana-cli-config -[`solana-clap-utils`]: https://docs.rs/solana-clap-utils -[`clap`]: https://docs.rs/clap diff --git a/docs/src/developing/guides/compressed-nfts.md b/docs/src/developing/guides/compressed-nfts.md deleted file mode 100644 index 3dd613dfaf33a5..00000000000000 --- a/docs/src/developing/guides/compressed-nfts.md +++ /dev/null @@ -1,862 +0,0 @@ ---- -title: Creating Compressed NFTs with JavaScript -description: - "Compressed NFTs use the Bubblegum program from Metaplex to cheaply and - securely store NFT metadata using State Compression on Solana." -keywords: - - compression - - merkle tree - - read api - - metaplex ---- - -Compressed NFTs on Solana use the -[Bubblegum](https://docs.metaplex.com/programs/compression/) program from -Metaplex to cheaply and securely store NFT metadata using -[State Compression](../../learn/state-compression.md). - -This developer guide will use JavaScript/TypeScript to demonstrate: - -- [how to create a tree for compressed NFTs](#create-a-tree), -- [how to mint compressed NFTs into a tree](#mint-compressed-nfts), -- [how to get compressed NFT metadata from the Read API](#reading-compressed-nfts-metadata), - and -- [how to transfer compressed NFTs](#transfer-compressed-nfts) - -## Intro to Compressed NFTs - -Compressed NFTs use [State Compression](../../learn/state-compression.md) and -[merkle trees](../../learn/state-compression.md#what-is-a-merkle-tree) to -drastically reduce the storage cost for NFTs. Instead of storing an NFT's -metadata in a typical Solana account, compressed NFTs store the metadata within -the ledger. This allows compressed NFTs to still inherit the security and speed -of the Solana blockchain, while at the same time reducing the overall storage -costs. - -Even though the on-chain data storage mechanism is different than their -uncompressed counterparts, compressed NFTs still follow the exact same -[Metadata](https://docs.metaplex.com/programs/token-metadata/accounts#metadata) -schema/structure. Allowing you to define your Collection and NFT in an identical -way. - -However, the process to mint and transfer compressed NFTs is different from -uncompressed NFTs. Aside from using a different on-chain program, compressed -NFTs are minting into a merkle tree and require verification of a "proof" to -transfer. More on this below. - -### Compressed NFTs and indexers - -Since compressed NFTs store all of their metadata in the -[ledger](../../terminology.md#ledger), instead of in traditional -[accounts](../../terminology.md#account) like uncompressed NFTs, we will need to -help of indexing services to quickly fetch our compressed NFT's metadata. - -Supporting RPC providers are using the Digital Asset Standard Read API (or "Read -API" for short) to add additional RPC methods that developers can call. These -additional, NFT oriented methods, are loaded with all the information about -particular NFTs. Including support for **BOTH** compressed NFTs **AND** -uncompressed NFTs. - -:::caution Metadata is secured by the ledger and cached by indexers - -Since validators do not keep a very long history of the recent ledger data, -these indexers effectively "cache" the compressed NFT metadata passed through -the Solana ledger. Quickly serving it back on request to improve speed and user -experience of applications. - -However, since the metadata was already secured by the ledger when minting the -compressed NFT, anyone could re-index the metadata directly from the secure -ledger. Allowing for independent verification of the data, should the need or -desire arise. - -::: - -These indexing services are already available from some of the common RPC -providers, with more rolling out support in the near future. To name a few of -the RPC providers that already support the Read API: - -- Helius -- Triton -- SimpleHash - -### How to mint compressed NFTs - -The process to create or mint compressed NFTs on Solana is similar to creating a -"traditional NFT collection", with a few differences. The mint process will -happen in 3 primary steps: - -- create an NFT collection (or use an existing one) -- create a - [concurrent merkle tree](../../learn/state-compression.md#what-is-a-concurrent-merkle-tree) - (using the `@solana/spl-account-compression` SDK) -- mint compressed NFTs into your tree (to any owner's address you want) - -### How to transfer a compressed NFT - -Once your compressed NFT exists on the Solana blockchain, the process to -transfer ownership of a compressed NFT happens in a few broad steps: - -1. get the NFT "asset" information (from the indexer) -2. get the NFT's "proof" (from the indexer) -3. get the Merkle tree account (from the Solana blockchain) -4. prepare the asset proof (by parsing and formatting it) -5. build and send the transfer instruction - -The first three steps primarily involve gathering specific pieces of information -(the `proof` and the tree's canopy depth) for the NFT to be transferred. These -pieces of information are needed to correctly parse/format the `proof` to -actually be sent within the transfer instruction itself. - -## Getting started - -For this guide, we are going to make a few assumptions about the compressed NFT -collection we are going to create: - -- we are going to use TypeScript and NodeJS for this example -- we will use a single, **new** Metaplex collection - -### Project Setup - -Before we start creating our compressed NFT collection, we need to install a few -packages: - -- [`@solana/web3.js`](https://www.npmjs.com/package/@solana/web3.js) - the base - Solana JS SDK for interacting with the blockchain, including making our RPC - connection and sending transactions -- [`@solana/spl-token`](https://www.npmjs.com/package/@solana/spl-token) - used - in creating our collection and mint on-chain -- [`@solana/spl-account-compression`](https://www.npmjs.com/package/@solana/spl-account-compression) - - used to create the on-chain tree to store our compressed NFTs -- [`@metaplex-foundation/mpl-bubblegum`](https://www.npmjs.com/package/@metaplex-foundation/mpl-bubblegum) - - used to get the types and helper functions for minting and transferring - compressed NFTs on-chain -- [`@metaplex-foundation/mpl-token-metadata`](https://www.npmjs.com/package/@metaplex-foundation/mpl-token-metadata) - -used to get the types and helper functions for our NFT's metadata - - -Using your preferred package manager (e.g. npm, yarn, pnpm, etc), install these -packages into your project: - -```sh -yarn add @solana/web3.js @solana/spl-token @solana/spl-account-compression -``` - -```sh -yarn add @metaplex-foundation/mpl-bubblegum @metaplex-foundation/mpl-token-metadata -``` - -## Create a Collection - -NFTs are normally grouped together into a -[Collection](https://docs.metaplex.com/programs/token-metadata/certified-collections#collection-nfts) -using the Metaplex standard. This is true for **BOTH** traditional NFTs **AND** -compressed NFTs. The NFT Collection will store all the broad metadata for our -NFT grouping, such as the collection image and name that will appear in wallets -and explorers. - -Under the hood, an NFT collection acts similar to any other token on Solana. -More specifically, a Collection is effectively a uncompressed NFT. So we -actually create them following the same process of creating an -[SPL token](https://spl.solana.com/token): - -- create a new token "mint" -- create a associated token account (`ata`) for our token mint -- actually mint a single token -- store the collection's metadata in an Account on-chain - -Since NFT Collections having nothing special to do with -[State Compression](../../learn/state-compression.md) or -[compressed NFTs](./compressed-nfts.md), we will not cover creating one in this -guide. - -### Collection addresses - -Even though this guide does not cover creating one, we will need the many of the -various addresses for your Collection, including: - -- `collectionAuthority` - this may be your `payer` but it also might not be -- `collectionMint` - the collection's mint address -- `collectionMetadata` - the collection's metadata account -- `editionAccount` - for example, the `masterEditionAccount` created for your - collection - -## Create a tree - -One of the most important decisions to make when creating compressed NFTs is -[how to setup your tree](../../learn/state-compression.md#sizing-a-concurrent-merkle-tree). -Especially since the values used to size your tree will determine the overall -cost of creation, and **CANNOT** be changed after creation. - -:::caution - -A tree is **NOT** the same thing as a collection. A single collection can use -_any_ number of trees. In fact, this is usually recommended for larger -collections due to smaller trees having greater composability. - -Conversely, even though a tree **could** be used in multiple collections, it is -generally considered an anti-pattern and is not recommended. - -::: - -Using the helper functions provided by the -[`@solana/spl-account-compression`](https://www.npmjs.com/package/@solana/spl-account-compression) -SDK, we can create our tree in the following steps: - -- decide on our tree size -- generate a new Keypair and allocated space for the tree on-chain -- actually create the tree (making it owned by the Bubblegum program) - -### Size your tree - -Your tree size is set by 3 values, each serving a very specific purpose: - -1. `maxDepth` - used to determine how many NFTs we can have in the tree -2. `maxBufferSize` - used to determine how many updates to your tree are - possible in the same block -3. `canopyDepth` - used to store a portion of the proof on chain, and as such is - a large of cost and composability of your compressed NFT collection - -:::info - -Read more about the details about -[State Compression](../../learn/state-compression.md), including -[how to size a tree](../../learn/state-compression.md#sizing-a-concurrent-merkle-tree) -and potential composability concerns. - -::: - -Let's assume we are going to create a compressed NFT collection with 10k NFTs in -it. And since our collection is relatively small, we only need a single smaller -tree to store all the NFTs: - -```ts -// define the depth and buffer size of our tree to be created -const maxDepthSizePair: ValidDepthSizePair = { - // max=16,384 nodes (for a `maxDepth` of 14) - maxDepth: 14, - maxBufferSize: 64, -}; - -// define the canopy depth of our tree to be created -const canopyDepth = 10; -``` - -Setting a `maxDepth` of `14` will allow our tree to hold up to `16,384` -compressed NFTs, more than exceeding our `10k` collection size. - -Since only specific -[`ValidDepthSizePair`](https://solana-labs.github.io/solana-program-library/account-compression/sdk/docs/modules/index.html#ValidDepthSizePair) -pairs are allowed, simply set the `maxBufferSize` to the corresponding value -tied to your desired `maxDepth`. - -Next, setting `canopyDepth` of `10` tells our tree to store `10` of our "proof -node hashes" on-chain. Thus requiring us to always include `4` proof node values -(i.e. `maxDepth - canopyDepth`) in every compressed NFT transfer instruction. - -### Generate addresses for the tree - -When creating a new tree, we need to generate a new -[Keypair](../../terminology.md#keypair) address for the tree to have: - -```ts -const treeKeypair = Keypair.generate(); -``` - -Since our tree will be used for compressed NFTs, we will also need to derive an -Account with authority that is owned by the Bubblegum program (i.e. PDA): - -```ts -// derive the tree's authority (PDA), owned by Bubblegum -const [treeAuthority, _bump] = PublicKey.findProgramAddressSync( - [treeKeypair.publicKey.toBuffer()], - BUBBLEGUM_PROGRAM_ID, -); -``` - -### Build the tree creation instructions - -With our tree size values defined, and our addresses generated, we need to build -two related instructions: - -1. allocate enough space on-chain for our tree -2. actually create the tree, owned by the Bubblegum program - -Using the -[`createAllocTreeIx`](https://solana-labs.github.io/solana-program-library/account-compression/sdk/docs/modules/index.html#createAllocTreeIx) -helper function, we allocate enough space on-chain for our tree. - -```ts -// allocate the tree's account on chain with the `space` -const allocTreeIx = await createAllocTreeIx( - connection, - treeKeypair.publicKey, - payer.publicKey, - maxDepthSizePair, - canopyDepth, -); -``` - -Then using the -[`createCreateTreeInstruction`](https://metaplex-foundation.github.io/metaplex-program-library/docs/bubblegum/functions/createCreateTreeInstruction.html) -from the Bubblegum SDK, we actually create the tree on-chain. Making it owned by -the Bubblegum program. - -```ts -// create the instruction to actually create the tree -const createTreeIx = createCreateTreeInstruction( - { - payer: payer.publicKey, - treeCreator: payer.publicKey, - treeAuthority, - merkleTree: treeKeypair.publicKey, - compressionProgram: SPL_ACCOUNT_COMPRESSION_PROGRAM_ID, - // NOTE: this is used for some on chain logging - logWrapper: SPL_NOOP_PROGRAM_ID, - }, - { - maxBufferSize: maxDepthSizePair.maxBufferSize, - maxDepth: maxDepthSizePair.maxDepth, - public: false, - }, - BUBBLEGUM_PROGRAM_ID, -); -``` - -### Build and send the transaction - -With our two instructions built, we can add them into a transaction and send -them to the blockchain, making sure both the `payer` and generated `treeKeypair` -sign the transaction: - -```ts -// build the transaction -const tx = new Transaction().add(allocTreeIx).add(createTreeIx); -tx.feePayer = payer.publicKey; - -// send the transaction -const txSignature = await sendAndConfirmTransaction( - connection, - tx, - // ensuring the `treeKeypair` PDA and the `payer` are BOTH signers - [treeKeypair, payer], - { - commitment: "confirmed", - skipPreflight: true, - }, -); -``` - -After a few short moments, and once the transaction is confirmed, we are ready -to start minting compressed NFTs into our tree. - -## Mint compressed NFTs - -Since compressed NFTs follow the same Metaplex -[metadata standards](https://docs.metaplex.com/programs/token-metadata/accounts#metadata) -as traditional NFTs, we can define our actual NFTs data the same way. - -The primary difference is that with compressed NFTs the metadata is actually -stored in the ledger (unlike traditional NFTs that store them in accounts). The -metadata gets "hashed" and stored in our tree, and by association, secured by -the Solana ledger. - -Allowing us to cryptographically verify that our original metadata has not -changed (unless we want it to). - -:::info - -Learn more about how State Compression uses -[concurrent merkle trees](../../learn/state-compression.md#what-is-a-concurrent-merkle-tree) -to cryptographically secure off-chain data using the Solana ledger. - -::: - -### Define our NFT's metadata - -We can define the specific metadata for the single NFT we are about to mint: - -```ts -const compressedNFTMetadata: MetadataArgs = { - name: "NFT Name", - symbol: "ANY", - // specific json metadata for each NFT - uri: "https://supersweetcollection.notarealurl/token.json", - creators: null, - editionNonce: 0, - uses: null, - collection: null, - primarySaleHappened: false, - sellerFeeBasisPoints: 0, - isMutable: false, - // these values are taken from the Bubblegum package - tokenProgramVersion: TokenProgramVersion.Original, - tokenStandard: TokenStandard.NonFungible, -}; -``` - -In this demo, the key pieces of our NFT's metadata to note are: - -- `name` - this is the actual name of our NFT that will be displayed in wallets - and on explorers. -- `uri` - this is the address for your NFTs metadata JSON file. -- `creators` - for this example, we are not storing a list of creators. If you - want your NFTs to have royalties, you will need to store actual data here. You - can checkout the Metaplex docs for more info on it. - -### Derive the Bubblegum signer - -When minting new compressed NFTs, the Bubblegum program needs a PDA to perform a -[cross-program invocation](../programming-model/calling-between-programs#cross-program-invocations) -(`cpi`) to the SPL compression program. - -:::caution - -This `bubblegumSigner` PDA is derived using a hard coded seed string of -`collection_cpi` and owned by the Bubblegum program. If this hard coded value is -not provided correctly, your compressed NFT minting will fail. - -::: - -Below, we derive this PDA using the **required** hard coded seed string of -`collection_cpi`: - -```ts -// derive a PDA (owned by Bubblegum) to act as the signer of the compressed minting -const [bubblegumSigner, _bump2] = PublicKey.findProgramAddressSync( - // `collection_cpi` is a custom prefix required by the Bubblegum program - [Buffer.from("collection_cpi", "utf8")], - BUBBLEGUM_PROGRAM_ID, -); -``` - -### Create the mint instruction - -Now we should have all the information we need to actually mint our compressed -NFT. - -Using the `createMintToCollectionV1Instruction` helper function provided in the -Bubblegum SDK, we can craft the instruction to actually mint our compressed NFT -directly into our collection. - -If you have minted traditional NFTs on Solana, this will look fairly similar. We -are creating a new instruction, giving several of the account addresses you -might expect (e.g. the `payer`, `tokenMetadataProgram`, and various collection -addresses), and then some tree specific addresses. - -The addresses to pay special attention to are: - -- `leafOwner` - this will be the owner of the compressed NFT. You can either - mint it your self (i.e. the `payer`), or airdrop to any other Solana address -- `leafDelegate` - this is the delegated authority of this specific NFT we are - about to mint. If you do not want to have a delegated authority for the NFT we - are about to mint, then this value should be set to the same address of - `leafOwner`. - -```ts -const compressedMintIx = createMintToCollectionV1Instruction( - { - payer: payer.publicKey, - - merkleTree: treeAddress, - treeAuthority, - treeDelegate: payer.publicKey, - - // set the receiver of the NFT - leafOwner: receiverAddress || payer.publicKey, - // set a delegated authority over this NFT - leafDelegate: payer.publicKey, - - // collection details - collectionAuthority: payer.publicKey, - collectionAuthorityRecordPda: BUBBLEGUM_PROGRAM_ID, - collectionMint: collectionMint, - collectionMetadata: collectionMetadata, - editionAccount: collectionMasterEditionAccount, - - // other accounts - bubblegumSigner: bubblegumSigner, - compressionProgram: SPL_ACCOUNT_COMPRESSION_PROGRAM_ID, - logWrapper: SPL_NOOP_PROGRAM_ID, - tokenMetadataProgram: TOKEN_METADATA_PROGRAM_ID, - }, - { - metadataArgs: Object.assign(compressedNFTMetadata, { - collection: { key: collectionMint, verified: false }, - }), - }, -); -``` - -Some of the other tree specific addresses are: - -- `merkleTree` - the address of our tree we created -- `treeAuthority` - the authority of the tree -- `treeDelegate` - the delegated authority of the entire tree - -Then we also have all of our NFT collection's addresses, including the mint -address, metadata account, and edition account. These addresses are also -standard to pass in when minting uncompressed NFTs. - -#### Sign and send the transaction - -Once our compressed mint instruction has been created, we can add it to a -transaction and send it to the Solana network: - -```ts -const tx = new Transaction().add(compressedMintIx); -tx.feePayer = payer.publicKey; - -// send the transaction to the cluster -const txSignature = await sendAndConfirmTransaction(connection, tx, [payer], { - commitment: "confirmed", - skipPreflight: true, -}); -``` - -## Reading compressed NFTs metadata - -With the help of a supporting RPC provider, developers can use the Digital Asset -Standard Read API (or "Read API" for short) to fetch the metadata of NFTs. - -:::info - -The Read API supports both compressed NFTs and traditional/uncompressed NFTs. -You can use the same RPC endpoints to retrieve all the assorted information for -both types of NFTs, including auto-fetching the NFTs' JSON URI. - -::: - -### Using the Read API - -When working with the Read API and a supporting RPC provider, developers can -make `POST` requests to the RPC endpoint using your preferred method of making -such requests (e.g. `curl`, JavaScript `fetch()`, etc). - -:::warning Asset ID - -Within the Read API, digital assets (i.e. NFTs) are indexed by their `id`. This -asset `id` value differs slightly between traditional NFTs and compressed NFTs: - -- for traditional/uncompressed NFTs: this is the token's address for the actual - Account on-chain that stores the metadata for the asset. -- for compressed NFTs: this is the `id` of the compressed NFT within the tree - and is **NOT** an actual on-chain Account address. While a compressed NFT's - `assetId` resembles a traditional Solana Account address, it is not. - -::: - -### Common Read API Methods - -While the Read API supports more than these listed below, the most commonly used -methods are: - -- `getAsset` - get a specific NFT asset by its `id` -- `getAssetProof` - returns the merkle proof that is required to transfer a - compressed NFT, by its asset `id` -- `getAssetsByOwner` - get the assets owned by a specific address -- `getAssetsByGroup` - get the assets by a specific grouping (i.e. a collection) - -:::info Read API Methods, Schema, and Specification - -Explore all the additional RPC methods added by Digital Asset Standard Read API -on [Metaplex's RPC Playground](https://metaplex-read-api.surge.sh/). Here you -will also find the expected inputs and response schema for each supported RPC -method. - -::: - -### Example Read API Request - -For demonstration, below is an example request for the `getAsset` method using -the -[JavaScript Fetch API](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API), -which is built into modern JavaScript runtimes: - -```ts -// make a POST request to the RPC using the JavaScript `fetch` api -const response = await fetch(rpcEndpointUrl, { - method: "POST", - headers: { - "Content-Type": "application/json", - }, - body: JSON.stringify({ - jsonrpc: "2.0", - id: "rpd-op-123", - method: "getAsset", - params: { - id: "5q7qQ4FWYyj4vnFrivRBe6beo6p88X8HTkkyVPjPkQmF", - }, - }), -}); -``` - -### Example Read API Response - -With a successful response from the RPC, you should seem similar data to this: - -```ts -{ - interface: 'V1_NFT', - id: '5q7qQ4FWYyj4vnFrivRBe6beo6p88X8HTkkyVPjPkQmF', - content: [Object], - authorities: [Array], - compression: [Object], - grouping: [], - royalty: [Object], - creators: [], - ownership: [Object], - supply: [Object], - mutable: false -} -``` - -The response fields to pay special attention to are: - -- `id` - this is your asset's `id` -- `grouping` - can tell you the collection address that the NFT belongs to. The - collection address will be the `group_value`. -- `metadata` - contains the actual metadata for the NFT, including the auto - fetched JSON uri set when the NFT was minted -- `ownership` - gives you the NFT owner's address (and also if the NFT has - delegated authority to another address) -- `compression` - tells you if this NFT is actually using compression or not. - For compressed NFTs, this will also give you the tree address that is storing - the compressed NFT on chain. - -:::caution - -Some of the returned values may be empty if the NFT is **not** a compressed NFT, -such as many of the `compression` fields. This is expected. - -::: - -## Transfer compressed NFTs - -Transferring compressed NFTs is different from transferring uncompressed NFTs. -Aside from using a different on-chain program, compressed NFTs require the use -of a asset's "merkle proof" (or `proof` for short) to actually change ownership. - -:::info What is a merkle proof? - -An asset's "merkle proof" is a listing of all the "adjacent hashes" within the -tree that are required to validate a specific leaf within said tree. - -These proof hashes themselves, and the specific asset's leaf data, are hashed -together in a deterministic way to compute the "root hash". Therefore, allowing -for cryptographic validation of an asset within the merkle tree. - -**NOTE:** While each of these hash values resemble a Solana Account's -[address/public key](../../terminology.md#public-key-pubkey), they are not -addresses. - -::: - -Transferring ownership of a compressed NFT happens in 5 broad steps: - -1. get the NFT's "asset" data (from the indexer) -2. get the NFT's proof (from the indexer) -3. get the Merkle tree account (directly from the Solana blockchain) -4. prepare the asset proof -5. build and send the transfer instruction - -The first three steps primarily involve gathering specific pieces of information -(the `proof` and the tree's canopy depth) for the NFT to be transferred. These -pieces of information are needed to correctly parse/format the `proof` to -actually be sent within the transfer instruction itself. - -### Get the asset - -To perform the transfer of our compressed NFT, we will need to retrieve a few -pieces of information about the NFT. - -For starters, we will need to get some the asset's information in order to allow -the on-chain compression program to correctly perform validation and security -checks. - -We can use the `getAsset` RPC method to retrieve two important pieces of -information for the compressed NFT: the `data_hash` and `creator_hash`. - -#### Example response from the `getAsset` method - -Below is an example response from the `getAsset` method: - -```ts -compression: { - eligible: false, - compressed: true, - data_hash: 'D57LAefACeaJesajt6VPAxY4QFXhHjPyZbjq9efrt3jP', - creator_hash: '6Q7xtKPmmLihpHGVBA6u1ENE351YKoyqd3ssHACfmXbn', - asset_hash: 'F3oDH1mJ47Z7tNBHvrpN5UFf4VAeQSwTtxZeJmn7q3Fh', - tree: 'BBUkS4LZQ7mU8iZXYLVGNUjSxCYnB3x44UuPVHVXS9Fo', - seq: 3, - leaf_id: 0 -} -``` - -### Get the asset proof - -The next step in preparing your compressed NFT transfer instruction, is to get a -**valid** asset `proof` to perform the transfer. This proof is required by the -on-chain compression program to validate on-chain information. - -We can use the `getAssetProof` RPC method to retrieve two important pieces of -information: - -- `proof` - the "full proof" that is required to perform the transfer (more on - this below) -- `tree_id` - the on-chain address of the compressed NFTs tree - -:::info Full proof is returned - -The `getAssetProof` RPC method returns the complete listing of "proof hashes" -that are used to perform the compressed NFT transfer. Since this "full proof" is -returned from the RPC, we will need to remove the portion of the "full proof" -that is stored on-chain via the tree's `canopy`. - -::: - -#### Example response from the `getAssetProof` method - -Below is an example response from the `getAssetProof` method: - -```ts -{ - root: '7dy5bzgaRcUnNH2KMExwNXXNaCJnf7wQqxc2VrGXy9qr', - proof: [ - 'HdvzZ4hrPEdEarJfEzAavNJEZcCS1YU1fg2uBvQGwAAb', - ... - '3e2oBSLfSDVdUdS7jRGFKa8nreJUA9sFPEELrHaQyd4J' - ], - node_index: 131072, - leaf: 'F3oDH1mJ47Z7tNBHvrpN5UFf4VAeQSwTtxZeJmn7q3Fh', - tree_id: 'BBUkS4LZQ7mU8iZXYLVGNUjSxCYnB3x44UuPVHVXS9Fo' -} -``` - -### Get the Merkle tree account - -Since the `getAssetProof` will always return the "full proof", we will have to -reduce it down in order to remove the proof hashes that are stored on-chain in -the tree's canopy. But in order to remove the correct number of proof addresses, -we need to know the tree's `canopyDepth`. - -Once we have our compressed NFT's tree address (the `tree_id` value from -`getAssetProof`), we can use the -[`ConcurrentMerkleTreeAccount`](https://solana-labs.github.io/solana-program-library/account-compression/sdk/docs/classes/index.ConcurrentMerkleTreeAccount.html) -class, from the `@solana/spl-account-compression` SDK: - -```ts -// retrieve the merkle tree's account from the blockchain -const treeAccount = await ConcurrentMerkleTreeAccount.fromAccountAddress( - connection, - treeAddress, -); - -// extract the needed values for our transfer instruction -const treeAuthority = treeAccount.getAuthority(); -const canopyDepth = treeAccount.getCanopyDepth(); -``` - -For the transfer instruction, we will also need the current `treeAuthority` -address which we can also get via the `treeAccount`. - -### Prepare the asset proof - -With our "full proof" and `canopyDepth` values on hand, we can correctly format -the `proof` to be submitted within the transfer instruction itself. - -Since we will use the `createTransferInstruction` helper function from the -Bubblegum SDK to actually build our transfer instruction, we need to: - -- remove the proof values that are already stored on-chain in the - [tree's canopy](../../learn/state-compression.md#canopy-depth), and -- convert the remaining proof values into the valid `AccountMeta` structure that - the instruction builder function accepts - -```ts -// parse the list of proof addresses into a valid AccountMeta[] -const proof: AccountMeta[] = assetProof.proof - .slice(0, assetProof.proof.length - (!!canopyDepth ? canopyDepth : 0)) - .map((node: string) => ({ - pubkey: new PublicKey(node), - isSigner: false, - isWritable: false, - })); -``` - -In the TypeScript code example above, we are first taking a `slice` of our "full -proof", starting at the beginning of the array, and ensuring we only have -`proof.length - canopyDepth` number of proof values. This will remove the -portion of the proof that is already stored on-chain in the tree's canopy. - -Then we are structuring each of the remaining proof values as a valid -`AccountMeta`, since the proof is submitted on-chain in the form of "extra -accounts" within the transfer instruction. - -### Build the transfer instruction - -Finally, with all the required pieces of data about our tree and compressed -NFTs, and a correctly formatted proof, we are ready to actually create the -transfer instruction. - -Build your transfer instruction using the -[`createTransferInstruction`](https://metaplex-foundation.github.io/metaplex-program-library/docs/bubblegum/functions/createTransferInstruction.html) -helper function from the Bubblegum SDK: - -```ts -// create the NFT transfer instruction (via the Bubblegum package) -const transferIx = createTransferInstruction( - { - merkleTree: treeAddress, - treeAuthority, - leafOwner, - leafDelegate, - newLeafOwner, - logWrapper: SPL_NOOP_PROGRAM_ID, - compressionProgram: SPL_ACCOUNT_COMPRESSION_PROGRAM_ID, - anchorRemainingAccounts: proof, - }, - { - root: [...new PublicKey(assetProof.root.trim()).toBytes()], - dataHash: [...new PublicKey(asset.compression.data_hash.trim()).toBytes()], - creatorHash: [ - ...new PublicKey(asset.compression.creator_hash.trim()).toBytes(), - ], - nonce: asset.compression.leaf_id, - index: asset.compression.leaf_id, - }, - BUBBLEGUM_PROGRAM_ID, -); -``` - -Aside from passing in our assorted Account addresses and the asset's proof, we -are converting the string values of our `data_hash`, `creator_hash`, `root` hash -into an array of bytes that is accepted by the `createTransferInstruction` -helper function. - -Since each of these hash values resemble and are formatted similar to -PublicKeys, we can use the -[`PublicKey`](https://solana-labs.github.io/solana-web3.js/classes/PublicKey.html) -class in web3.js to convert them into a accepted byte array format. - -#### Send the transaction - -With our transfer instructions built, we can add it into a transaction and send -it to the blockchain similar to before. Making sure either the current -`leafOwner` or the `leafDelegate` signs the transaction. - -:::note - -After each successful transfer of a compressed NFT, the `leafDelegate` should -reset to an empty value. Meaning the specific asset will not have delegated -authority to an address other than its owner. - -::: - -And once confirmed by the cluster, we will have successfully transferred a -compressed NFT. - -## Example code repository - -You can find an example code repository for this developer guide on the Solana -Developers GitHub: https://github.com/solana-developers/compressed-nfts diff --git a/docs/src/developing/intro/programs.md b/docs/src/developing/intro/programs.md deleted file mode 100644 index 161e3b47d2b03f..00000000000000 --- a/docs/src/developing/intro/programs.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: What are Solana Programs? -description: "A Solana Program, aka smart contract, is the executable code that interprets the instructions on the blockchain. There are two types: Native and on chain." ---- - -Solana Programs, often referred to as "_smart contracts_" on other blockchains, are the executable code that interprets the instructions sent inside of each transaction on the blockchain. They can be deployed directly into the core of the network as [Native Programs](#native-programs), or published by anyone as [On Chain Programs](#on-chain-programs). Programs are the core building blocks of the network and handle everything from sending tokens between wallets, to accepting votes of a DAOs, to tracking ownership of NFTs. - -Both types of programs run on top of the [Sealevel runtime](https://medium.com/solana-labs/sealevel-parallel-processing-thousands-of-smart-contracts-d814b378192), which is Solana's _parallel processing_ model that helps to enable the high transactions speeds of the blockchain. - -## Key points - -- Programs are essentially special type of [Accounts](../programming-model/accounts.md) that is marked as "_executable_" -- Programs can own other Accounts -- Programs can only _change the data_ or _debit_ accounts they own -- Any program can _read_ or _credit_ another account -- Programs are considered stateless since the primary data stored in a program account is the compiled SBF code -- Programs can be upgraded by their owner (see more on that below) - -## Types of programs - -The Solana blockchain has two types of programs: - -- Native programs -- On chain programs - -### On chain programs - -These user written programs, often referred to as "_smart contracts_" on other blockchains, are deployed directly to the blockchain for anyone to interact with and execute. Hence the name "on chain"! - -In effect, "on chain programs" are any program that is not baked directly into the Solana cluster's core code (like the native programs discussed below). - -And even though Solana Labs maintains a small subset of these on chain programs (collectively known as the [Solana Program Library](https://spl.solana.com/)), anyone can create or publish one. On chain programs can also be updated directly on the blockchain by the respective program's Account owner. - -### Native programs - -_Native programs_ are programs that are built directly into the core of the Solana blockchain. - -Similar to other "on chain" programs in Solana, native programs can be called by any other program/user. However, they can only be upgraded as part of the core blockchain and cluster updates. These native program upgrades are controlled via the releases to the [different clusters](../../cluster/overview.md). - -#### Examples of native programs include: - -- [System Program](../runtime-facilities/programs.md#system-program): Create new accounts, transfer tokens, and more -- [BPF Loader Program](../runtime-facilities/programs.md#bpf-loader): Deploys, upgrades, and executes programs on chain -- [Vote program](../runtime-facilities/programs.md#vote-program): Create and manage accounts that track validator voting state and rewards. - -## Executable - -When a Solana program is deployed onto the network, it is marked as "executable" by the [BPF Loader Program](../runtime-facilities/programs.md#bpf-loader). This allows the Solana runtime to efficiently and properly execute the compiled program code. - -## Upgradable - -Unlike other blockchains, Solana programs can be upgraded after they are deployed to the network. - -Native programs can only be upgraded as part of cluster updates when new software releases are made. - -On chain programs can be upgraded by the account that is marked as the "_Upgrade Authority_", which is usually the Solana account/address that deployed the program to begin with. diff --git a/docs/src/developing/intro/rent.md b/docs/src/developing/intro/rent.md deleted file mode 100644 index 2938e7044b4fb6..00000000000000 --- a/docs/src/developing/intro/rent.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: What is rent? -description: "Rent: the small fee Solana accounts incur to store data on the blockchain. Accounts with >2 years of rent are rent exempt and do not pay the periodic fee." ---- - -The fee for every Solana Account to store data on the blockchain is called "_rent_". This _time and space_ based fee is required to keep an account, and therefore its data, alive on the blockchain since [clusters](../../cluster/overview.md) must actively maintain this data. - -All Solana Accounts (and therefore Programs) are required to maintain a high enough LAMPORT balance to become [rent exempt](#rent-exempt) and remain on the Solana blockchain. - -When an Account no longer has enough LAMPORTS to pay its rent, it will be removed from the network in a process known as [Garbage Collection](#garbage-collection). - -> **Note:** Rent is different from [transactions fees](../../transaction_fees.md). Rent is paid (or held in an Account) to keep data stored on the Solana blockchain. Whereas transaction fees are paid to process [instructions](../developing/../programming-model/transactions.md#instructions) on the network. - -### Rent rate - -The Solana rent rate is set on a network wide basis, primarily based on the set LAMPORTS _per_ byte _per_ year. - -Currently, the rent rate is a static amount and stored in the [Rent sysvar](../runtime-facilities/sysvars.md#rent). - -## Rent exempt - -Accounts that maintain a minimum LAMPORT balance greater than 2 years worth of rent payments are considered "_rent exempt_" and will not incur a rent collection. - -> At the time of writing this, new Accounts and Programs **are required** to be initialized with enough LAMPORTS to become rent-exempt. The RPC endpoints have the ability to calculate this [estimated rent exempt balance](../../api/http#getminimumbalanceforrentexemption) and is recommended to be used. - -Every time an account's balance is reduced, a check is performed to see if the account is still rent exempt. Transactions that would cause an account's balance to drop below the rent exempt threshold will fail. - -## Garbage collection - -Accounts that do not maintain their rent exempt status, or have a balance high enough to pay rent, are removed from the network in a process known as _garbage collection_. This process is done to help reduce the network wide storage of no longer used/maintained data. - -You can learn more about [garbage collection here](../../implemented-proposals/persistent-account-storage.md#garbage-collection) in this implemented proposal. - -## Learn more about Rent - -You can learn more about Solana Rent with the following articles and documentation: - -- [Implemented Proposals - Rent](../../implemented-proposals/rent.md) -- [Implemented Proposals - Account Storage](../../implemented-proposals/persistent-account-storage.md) diff --git a/docs/src/developing/intro/transaction_fees.md b/docs/src/developing/intro/transaction_fees.md deleted file mode 100644 index 1013665bd9eeb3..00000000000000 --- a/docs/src/developing/intro/transaction_fees.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Transaction Fees -description: "Transaction fees are the small fees paid to process instructions on the network. These fees are based on computation and an optional prioritization fee." -keywords: - - instruction fee - - processing fee - - storage fee - - low fee blockchain - - gas - - gwei - - cheap network - - affordable blockchain ---- - -The small fees paid to process [instructions](./../../terminology.md#instruction) on the Solana blockchain are known as "_transaction fees_". - -As each transaction (which contains one or more instructions) is sent through the network, it gets processed by the current leader validation-client. Once confirmed as a global state transaction, this _transaction fee_ is paid to the network to help support the economic design of the Solana blockchain. - -> NOTE: Transactions fees are different from the blockchain's data storage fee called [rent](./rent.md) - -### Transaction Fee Calculation - -Currently, the amount of resources consumed by a transaction do not impact fees in any way. This is because the runtime imposes a small cap on the amount of resources that transaction instructions can use, not to mention that the size of transactions is limited as well. So right now, transaction fees are solely determined by the number of signatures that need to be verified in a transaction. The only limit on the number of signatures in a transaction is the max size of transaction itself. Each signature (64 bytes) in a transaction (max 1232 bytes) must reference a unique public key (32 bytes) so a single transaction could contain as many as 12 signatures (not sure why you would do that). The fee per transaction signature can be fetched with the `solana` cli: - -```bash -$ solana fees -Blockhash: 8eULQbYYp67o5tGF2gxACnBCKAE39TetbYYMGTx3iBFc -Lamports per signature: 5000 -Last valid block height: 94236543 -``` - -The `solana` cli `fees` subcommand calls the `getFees` RPC API method to retrieve the above output information, so your application can call that method directly as well: - -```bash -$ curl http://api.mainnet-beta.solana.com -H "Content-Type: application/json" -d ' - {"jsonrpc":"2.0","id":1, "method":"getFees"} -' - -# RESULT (lastValidSlot removed since it's inaccurate) -{ - "jsonrpc": "2.0", - "result": { - "context": { - "slot": 106818885 - }, - "value": { - "blockhash": "78e3YBCMXJBiPD1HpyVtVfFzZFPG6nUycnQcyNMSUQzB", - "feeCalculator": { - "lamportsPerSignature": 5000 - }, - "lastValidBlockHeight": 96137823 - } - }, - "id": 1 -} -``` - -### Fee Determinism - -It's important to keep in mind that fee rates (such as `lamports_per_signature`) are subject to change from block to block (though that hasn't happened in the full history of the `mainnet-beta` cluster). Despite the fact that fees can fluctuate, fees for a transaction can still be calculated deterministically when creating (and before signing) a transaction. This determinism comes from the fact that fees are applied using the rates from the block whose blockhash matches the `recent_blockhash` field in a transaction. Blockhashes can only be referenced by a transaction for a few minutes before they expire. - -Transactions with expired blockhashes will be ignored and dropped by the cluster, so it's important to understand how expiration actually works. Before transactions are added to a block and during block validation, [each transaction's recent blockhash is checked](https://github.com/solana-labs/solana/blob/647aa926673e3df4443d8b3d9e3f759e8ca2c44b/runtime/src/bank.rs#L3482) to ensure it hasn't expired yet. The max age of a transaction's blockhash is only 150 blocks. This means that if no slots are skipped in between, the blockhash for block 100 would be usable by transactions processed in blocks 101 to 252, inclusive (during block 101 the age of block 100 is "0" and during block 252 its age is "150"). However, it's important to remember that slots may be skipped and that age checks use "block height" _not_ "slot height". Since slots are skipped occasionally, the actual age of a blockhash can be a bit longer than 150 slots. At the time of writing, slot times are about 500ms and skip rate is about 5% so the expected lifetime of a transaction which uses the most recent blockhash is about 1min 19s. - -### Fee Collection - -Transactions are required to have at least one account which has signed the transaction and is writable. Writable signer accounts are serialized first in the list of transaction accounts and the first of these accounts is always used as the "fee payer". - -Before any transaction instructions are processed, the fee payer account balance will be deducted to pay for transaction fees. If the fee payer balance is not sufficient to cover transaction fees, the transaction will be dropped by the cluster. If the balance was sufficient, the fees will be deducted whether the transaction is processed successfully or not. In fact, if any of the transaction instructions return an error or violate runtime restrictions, all account changes _except_ the transaction fee deduction will be rolled back. - -### Fee Distribution - -Transaction fees are partially burned and the remaining fees are collected by the validator that produced the block that the corresponding transactions were included in. The transaction fee burn rate was initialized as 50% when inflation rewards were enabled at the beginning of 2021 and has not changed so far. These fees incentivize a validator to process as many transactions as possible during its slots in the leader schedule. Collected fees are deposited in the validator's account (listed in the leader schedule for the current slot) after processing all of the transactions included in a block. diff --git a/docs/src/developing/lookup-tables.md b/docs/src/developing/lookup-tables.md deleted file mode 100644 index 1d02c899fa1b56..00000000000000 --- a/docs/src/developing/lookup-tables.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -title: Address Lookup Tables -description: "" ---- - -Address Lookup Tables, commonly referred to as "_lookup tables_" or "_ALTs_" for short, allow developers to create a collection of related addresses to efficiently load more addresses in a single transaction. - -Since each transaction on the Solana blockchain requires a listing of every address that is interacted with as part of the transaction, this listing would effectively be capped at 32 addresses per transaction. With the help of [Address Lookup Tables](./lookup-tables.md), a transaction would now be able to raise that limit to 256 addresses per transaction. - -## Compressing on chain addresses - -After all the desired addresses have been stored on chain in an Address Lookup Table, each address can be referenced inside a transaction by its 1-byte index within the table (instead of their full 32-byte address). This lookup method effectively "_compresses_" a 32-byte address into a 1-byte index value. - -This "_compression_" enables storing up to 256 addresses in a single lookup table for use inside any given transaction. - -## Versioned Transactions - -To utilize an Address Lookup Table inside a transaction, developers must use v0 transactions that were introduced with the new [Versioned Transaction format](./versioned-transactions.md). - -## How to create an address lookup table - -Creating a new lookup table with the `@solana/web3.js` library is similar to the older `legacy` transactions, but with some differences. - -Using the `@solana/web3.js` library, you can use the [`createLookupTable`](https://solana-labs.github.io/solana-web3.js/classes/AddressLookupTableProgram.html#createLookupTable) function to construct the instruction needed to create a new lookup table, as well as determine its address: - -```js -const web3 = require("@solana/web3.js"); - -// connect to a cluster and get the current `slot` -const connection = new web3.Connection(web3.clusterApiUrl("devnet")); -const slot = await connection.getSlot(); - -// Assumption: -// `payer` is a valid `Keypair` with enough SOL to pay for the execution - -const [lookupTableInst, lookupTableAddress] = - web3.AddressLookupTableProgram.createLookupTable({ - authority: payer.publicKey, - payer: payer.publicKey, - recentSlot: slot, - }); - -console.log("lookup table address:", lookupTableAddress.toBase58()); - -// To create the Address Lookup Table on chain: -// send the `lookupTableInst` instruction in a transaction -``` - -> NOTE: -> Address lookup tables can be **created** with either a `v0` transaction or a `legacy` transaction. But the Solana runtime can only retrieve and handle the additional addresses within a lookup table while using [v0 Versioned Transactions](./versioned-transactions.md#current-transaction-versions). - -## Add addresses to a lookup table - -Adding addresses to a lookup table is known as "_extending_". Using the `@solana/web3.js` library, you can create a new _extend_ instruction using the [`extendLookupTable`](https://solana-labs.github.io/solana-web3.js/classes/AddressLookupTableProgram.html#extendLookupTable) method: - -```js -// add addresses to the `lookupTableAddress` table via an `extend` instruction -const extendInstruction = web3.AddressLookupTableProgram.extendLookupTable({ - payer: payer.publicKey, - authority: payer.publicKey, - lookupTable: lookupTableAddress, - addresses: [ - payer.publicKey, - web3.SystemProgram.programId, - // list more `publicKey` addresses here - ], -}); - -// Send this `extendInstruction` in a transaction to the cluster -// to insert the listing of `addresses` into your lookup table with address `lookupTableAddress` -``` - -> NOTE: -> Due to the same memory limits of `legacy` transactions, any transaction used to _extend_ an Address Lookup Table is also limited in how many addresses can be added at a time. Because of this, you will need to use multiple transactions to _extend_ any table with more addresses (~20) that can fit within a single transaction's memory limits. - -Once these addresses have been inserted into the table, and stored on chain, you will be able to utilize the Address Lookup Table in future transactions. Enabling up to 256 addresses in those future transactions. - -## Fetch an Address Lookup Table - -Similar to requesting another account (or PDA) from the cluster, you can fetch a complete Address Lookup Table with the [`getAddressLookupTable`](https://solana-labs.github.io/solana-web3.js/classes/Connection.html#getAddressLookupTable) method: - -```js -// define the `PublicKey` of the lookup table to fetch -const lookupTableAddress = new web3.PublicKey(""); - -// get the table from the cluster -const lookupTableAccount = (await connection.getAddressLookupTable(lookupTableAddress)).value; - -// `lookupTableAccount` will now be a `AddressLookupTableAccount` object - -console.log("Table address from cluster:", lookupTableAccount.key.toBase58()); -``` - -Our `lookupTableAccount` variable will now be a `AddressLookupTableAccount` object which we can parse to read the listing of all the addresses stored on chain in the lookup table: - -```js -// loop through and parse all the addresses stored in the table -for (let i = 0; i < lookupTableAccount.state.addresses.length; i++) { - const address = lookupTableAccount.state.addresses[i]; - console.log(i, address.toBase58()); -} -``` - -## How to use an address lookup table in a transaction - -After you have created your lookup table, and stored your needed address on chain (via extending the lookup table), you can create a `v0` transaction to utilize the on chain lookup capabilities. - -Just like older `legacy` transactions, you can create all the [instructions](./../terminology.md#instruction) your transaction will execute on chain. You can then provide an array of these instructions to the [Message](./../terminology.md#message) used in the `v0 transaction. - -> NOTE: -> The instructions used inside a `v0` transaction can be constructed using the same methods and functions used to create the instructions in the past. There is no required change to the instructions used involving an Address Lookup Table. - -```js -// Assumptions: -// - `arrayOfInstructions` has been created as an `array` of `TransactionInstruction` -// - we are using the `lookupTableAccount` obtained above - -// construct a v0 compatible transaction `Message` -const messageV0 = new web3.TransactionMessage({ - payerKey: payer.publicKey, - recentBlockhash: blockhash, - instructions: arrayOfInstructions, // note this is an array of instructions -}).compileToV0Message([lookupTableAccount]); - -// create a v0 transaction from the v0 message -const transactionV0 = new web3.VersionedTransaction(messageV0); - -// sign the v0 transaction using the file system wallet we created named `payer` -transactionV0.sign([payer]); - -// send and confirm the transaction -// (NOTE: There is NOT an array of Signers here; see the note below...) -const txid = await web3.sendAndConfirmTransaction(connection, transactionV0); - -console.log( - `Transaction: https://explorer.solana.com/tx/${txid}?cluster=devnet`, -); -``` - -> NOTE: -> When sending a `VersionedTransaction` to the cluster, it must be signed BEFORE calling the -> `sendAndConfirmTransaction` method. If you pass an array of `Signer` -> (like with `legacy` transactions) the method will trigger an error! - -## More Resources - -- Read the [proposal](./../proposals/versioned-transactions.md) for Address Lookup Tables and Versioned transactions -- [Example Rust program using Address Lookup Tables](https://github.com/TeamRaccoons/address-lookup-table-multi-swap) diff --git a/docs/src/developing/on-chain-programs/debugging.md b/docs/src/developing/on-chain-programs/debugging.md deleted file mode 100644 index fdbb1aebaa8002..00000000000000 --- a/docs/src/developing/on-chain-programs/debugging.md +++ /dev/null @@ -1,266 +0,0 @@ ---- -title: "Debugging Programs" ---- - -Solana programs run on-chain, so debugging them in the wild can be challenging. -To make debugging programs easier, developers can write unit tests that directly -test their program's execution via the Solana runtime, or run a local cluster -that will allow RPC clients to interact with their program. - -## Running unit tests - -- [Testing with Rust](developing-rust.md#how-to-test) -- [Testing with C](developing-c.md#how-to-test) - -## Logging - -During program execution both the runtime and the program log status and error -messages. - -For information about how to log from a program see the language specific -documentation: - -- [Logging from a Rust program](developing-rust.md#logging) -- [Logging from a C program](developing-c.md#logging) - -When running a local cluster the logs are written to stdout as long as they are -enabled via the `RUST_LOG` log mask. From the perspective of program -development it is helpful to focus on just the runtime and program logs and not -the rest of the cluster logs. To focus in on program specific information the -following log mask is recommended: - -`export RUST_LOG=solana_runtime::system_instruction_processor=trace,solana_runtime::message_processor=info,solana_bpf_loader=debug,solana_rbpf=debug` - -Log messages coming directly from the program (not the runtime) will be -displayed in the form: - -`Program log: ` - -## Error Handling - -The amount of information that can be communicated via a transaction error is -limited but there are many points of possible failures. The following are -possible failure points and information about what errors to expect and where to -get more information: - -- The SBF loader may fail to parse the program, this should not happen since the - loader has already _finalized_ the program's account data. - - `InstructionError::InvalidAccountData` will be returned as part of the - transaction error. -- The SBF loader may fail to setup the program's execution environment - - `InstructionError::Custom(0x0b9f_0001)` will be returned as part of the - transaction error. "0x0b9f_0001" is the hexadecimal representation of - [`VirtualMachineCreationFailed`](https://github.com/solana-labs/solana/blob/bc7133d7526a041d1aaee807b80922baa89b6f90/programs/bpf_loader/src/lib.rs#L44). -- The SBF loader may have detected a fatal error during program executions - (things like panics, memory violations, system call errors, etc...) - - `InstructionError::Custom(0x0b9f_0002)` will be returned as part of the - transaction error. "0x0b9f_0002" is the hexadecimal representation of - [`VirtualMachineFailedToRunProgram`](https://github.com/solana-labs/solana/blob/bc7133d7526a041d1aaee807b80922baa89b6f90/programs/bpf_loader/src/lib.rs#L46). -- The program itself may return an error - - `InstructionError::Custom()` will be returned. The - "user defined value" must not conflict with any of the [builtin runtime - program - errors](https://github.com/solana-labs/solana/blob/bc7133d7526a041d1aaee807b80922baa89b6f90/sdk/program/src/program_error.rs#L87). - Programs typically use enumeration types to define error codes starting at - zero so they won't conflict. - -In the case of `VirtualMachineFailedToRunProgram` errors, more information about -the specifics of what failed are written to the [program's execution -logs](debugging.md#logging). - -For example, an access violation involving the stack will look something like -this: - -`SBF program 4uQeVj5tqViQh7yWWGStvkEG1Zmhx6uasJtWCJziofM failed: out of bounds memory store (insn #615), addr 0x200001e38/8` - -## Monitoring Compute Budget Consumption - -The program can log the remaining number of compute units it will be allowed -before program execution is halted. Programs can use these logs to wrap -operations they wish to profile. - -- [Log the remaining compute units from a Rust - program](developing-rust.md#compute-budget) -- [Log the remaining compute units from a C - program](developing-c.md#compute-budget) - -See [compute budget](developing/programming-model/runtime.md#compute-budget) -for more information. - -## ELF Dump - -The SBF shared object internals can be dumped to a text file to gain more -insight into a program's composition and what it may be doing at runtime. - -- [Create a dump file of a Rust program](developing-rust.md#elf-dump) -- [Create a dump file of a C program](developing-c.md#elf-dump) - -## Instruction Tracing - -During execution the runtime SBF interpreter can be configured to log a trace -message for each SBF instruction executed. This can be very helpful for things -like pin-pointing the runtime context leading up to a memory access violation. - -The trace logs together with the [ELF dump](#elf-dump) can provide a lot of -insight (though the traces produce a lot of information). - -To turn on SBF interpreter trace messages in a local cluster configure the -`solana_rbpf` level in `RUST_LOG` to `trace`. For example: - -`export RUST_LOG=solana_rbpf=trace` - - -## Source level debugging - -Source level debugging of on-chain programs written in Rust or C can -be done using the `program run` subcommand of `solana-ledger-tool`, -and lldb, distributed with Solana Rust and Clang compiler binary -package platform-tools. - -The `solana-ledger-tool program run` subcommand loads a compiled -on-chain program, executes it in RBPF virtual machine and runs a gdb -server that accepts incoming connections from LLDB or GDB. Once lldb -is connected to `solana-ledger-tool` gdbserver, it can control -execution of an on-chain program. Run `solana-ledger-tool program run ---help` for an example of specifying input data for parameters of the -program entrypoint function. - -To compile a program for debugging use cargo-build-sbf build utility -with the command line option `--debug`. The utility will generate two -loadable files, one a usual loadable module with the extension `.so`, -and another the same loadable module but containing Dwarf debug -information, a file with extension `.debug`. - -To execute a program in debugger, run `solana-ledger-tool program run` -with `-e debugger` command line option. For example, a crate named -'helloworld' is compiled and an executable program is built in -`target/deploy` directory. There should be three files in that -directory -- helloworld-keypair.json -- a keypair for deploying the program, -- helloworld.debug -- a binary file containing debug information, -- helloworld.so -- an executable file loadable into the virtual machine. -The command line for running `solana-ledger-tool` would be something like this -``` -solana-ledger-tool program run -l test-ledger -e debugger target/deploy/helloworld.so -``` -Note that `solana-ledger-tool` always loads a ledger database. Most -on-chain programs interact with a ledger in some manner. Even if for -debugging purpose a ledger is not needed, it has to be provided to -`solana-ledger-tool`. A minimal ledger database can be created by -running `solana-test-validator`, which creates a ledger in -`test-ledger` subdirectory. - -In debugger mode `solana-ledger-tool program run` loads an `.so` file and -starts listening for an incoming connection from a debugger -``` -Waiting for a Debugger connection on "127.0.0.1:9001"... -``` - -To connect to `solana-ledger-tool` and execute the program, run lldb. For -debugging rust programs it may be beneficial to run solana-lldb -wrapper to lldb, i.e. at a new shell prompt (other than the one used -to start `solana-ledger-tool`) run the command - -``` -solana-lldb -``` - -This script is installed in platform-tools path. If that path is not -added to `PATH` environment variable, it may be necessary to specify -the full path, e.g. -``` -~/.cache/solana/v1.35/platform-tools/llvm/bin/solana-lldb -``` -After starting the debugger, load the .debug file by entering the -following command at the debugger prompt -``` -(lldb) file target/deploy/helloworld.debug -``` -If the debugger finds the file, it will print something like this -``` -Current executable set to '/path/helloworld.debug' (bpf). -``` - -Now, connect to the gdb server that `solana-ledger-tool` implements, and -debug the program as usual. Enter the following command at lldb prompt -``` -(lldb) gdb-remote 127.0.0.1:9001 -``` -If the debugger and the gdb server establish a connection, the -execution of the program will be stopped at the entrypoint function, -and lldb should print several lines of the source code around the -entrypoint function signature. From this point on, normal lldb -commands can be used to control execution of the program being -debugged. - - -### Debugging in an IDE - -To debug on-chain programs in Visual Studio IDE, install the CodeLLDB -extension. Open CodeLLDB Extension Settings. In -Advanced settings change the value of `Lldb: Library` field to the -path of `liblldb.so` (or liblldb.dylib on macOS). For example on Linux a -possible path to Solana customized lldb can be -`/home//.cache/solana/v1.33/platform-tools/llvm/lib/liblldb.so.` -where `` is your Linux system username. This can also be added -directly to `~/.config/Code/User/settings.json` file, e.g. -``` -{ - "lldb.library": "/home//.cache/solana/v1.35/platform-tools/llvm/lib/liblldb.so" -} -``` - -In `.vscode` subdirectory of your on-chain project, create two files - -First file is `tasks.json` with the following content -``` -{ - "version": "2.0.0", - "tasks": [ - { - "label": "build", - "type": "shell", - "command": "cargo build-sbf --debug", - "problemMatcher": [], - "group": { - "kind": "build", - "isDefault": true - } - }, - { - "label": "solana-debugger", - "type": "shell", - "command": "solana-ledger-tool program run -l test-ledger -e debugger ${workspaceFolder}/target/deploy/helloworld.so" - } - ] -} -``` -The first task is to build the on-chain program using cargo-build-sbf -utility. The second task is to run `solana-ledger-tool program run` in debugger mode. - -Another file is `launch.json` with the following content -``` -{ - "version": "0.2.0", - "configurations": [ - { - "type": "lldb", - "request": "custom", - "name": "Debug", - "targetCreateCommands": ["target create ${workspaceFolder}/target/deploy/helloworld.debug"], - "processCreateCommands": ["gdb-remote 127.0.0.1:9001"] - } - ] -} -``` -This file specifies how to run debugger and to connect it to the gdb -server implemented by `solana-ledger-tool`. - -To start debugging a program, first build it by running the build -task. The next step is to run `solana-debugger` task. The tasks specified in -`tasks.json` file are started from `Terminal >> Run Task...` menu of -VSCode. When `solana-ledger-tool` is running and listening from incoming -connections, it's time to start the debugger. Launch it from VSCode -`Run and Debug` menu. If everything is set up correctly, VSCode will -start a debugging session and the program execution should stop on -the entrance into the `entrypoint` function. diff --git a/docs/src/developing/on-chain-programs/deploying.md b/docs/src/developing/on-chain-programs/deploying.md deleted file mode 100644 index 2f0d095444dcf3..00000000000000 --- a/docs/src/developing/on-chain-programs/deploying.md +++ /dev/null @@ -1,143 +0,0 @@ ---- -title: "Deploying Programs" -description: "Deploying on-chain programs can be done using the Solana CLI using the Upgradable BPF loader to upload the compiled byte-code to the Solana blockchain." ---- - -Solana on-chain programs (otherwise known as "smart contracts") are stored in "executable" accounts on Solana. These accounts are identical to any other account but with the exception of: - -- having the "executable" flag enabled, and -- the owner being assigned to a BPF loader - -Besides those exceptions, they are governed by the same runtime rules as non-executable accounts, hold SOL tokens for rent fees, and store a data buffer which is managed by the BPF loader program. The latest BPF loader is called the "Upgradeable BPF Loader". - -## Overview of the Upgradeable BPF Loader - -### State accounts - -The Upgradeable BPF loader program supports three different types of state accounts: - -1. [Program account](https://github.com/solana-labs/solana/blob/master/sdk/program/src/bpf_loader_upgradeable.rs#L34): This is the main account of an on-chain program and its address is commonly referred to as a "program id." Program id's are what transaction instructions reference in order to invoke a program. Program accounts are immutable once deployed, so you can think of them as a proxy account to the byte-code and state stored in other accounts. -2. [Program data account](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/sdk/program/src/bpf_loader_upgradeable.rs#L39): This account is what stores the executable byte-code of an on-chain program. When a program is upgraded, this account's data is updated with new byte-code. In addition to byte-code, program data accounts are also responsible for storing the slot when it was last modified and the address of the sole account authorized to modify the account (this address can be cleared to make a program immutable). -3. [Buffer accounts](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/sdk/program/src/bpf_loader_upgradeable.rs#L27): These accounts temporarily store byte-code while a program is being actively deployed through a series of transactions. They also each store the address of the sole account which is authorized to do writes. - -### Instructions - -The state accounts listed above can only be modified with one of the following instructions supported by the Upgradeable BPF Loader program: - -1. [Initialize buffer](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/sdk/program/src/loader_upgradeable_instruction.rs#L21): Creates a buffer account and stores an authority address which is allowed to modify the buffer. -2. [Write](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/sdk/program/src/loader_upgradeable_instruction.rs#L28): Writes byte-code at a specified byte offset inside a buffer account. Writes are processed in small chunks due to a limitation of Solana transactions having a maximum serialized size of 1232 bytes. -3. [Deploy](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/sdk/program/src/loader_upgradeable_instruction.rs#L77): Creates both a program account and a program data account. It fills the program data account by copying the byte-code stored in a buffer account. If the byte-code is valid, the program account will be set as executable, allowing it to be invoked. If the byte-code is invalid, the instruction will fail and all changes are reverted. -4. [Upgrade](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/sdk/program/src/loader_upgradeable_instruction.rs#L102): Fills an existing program data account by copying executable byte-code from a buffer account. Similar to the deploy instruction, it will only succeed if the byte-code is valid. -5. [Set authority](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/sdk/program/src/loader_upgradeable_instruction.rs#L114): Updates the authority of a program data or buffer account if the account's current authority has signed the transaction being processed. If the authority is deleted without replacement, it can never be set to a new address and the account can never be closed. -6. [Close](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/sdk/program/src/loader_upgradeable_instruction.rs#L127): Clears the data of a program data account or buffer account and reclaims the SOL used for the rent exemption deposit. - -## How `solana program deploy` works - -Deploying a program on Solana requires hundreds, if not thousands of transactions, due to the max size limit of 1232 bytes for Solana transactions. The Solana CLI takes care of this rapid firing of transactions with the `solana program deploy` subcommand. The process can be broken down into the following 3 phases: - -1. [Buffer initialization](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/cli/src/program.rs#L2113): First, the CLI sends a transaction which [creates a buffer account](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/cli/src/program.rs#L1903) large enough for the byte-code being deployed. It also invokes the [initialize buffer instruction](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/programs/bpf_loader/src/lib.rs#L320) to set the buffer authority to restrict writes to the deployer's chosen address. -2. [Buffer writes](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/cli/src/program.rs#L2129): Once the buffer account is initialized, the CLI [breaks up the program byte-code](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/cli/src/program.rs#L1940) into ~1KB chunks and [sends transactions at a rate of 100 transactions per second](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/client/src/tpu_client.rs#L133) to write each chunk with [the write buffer instruction](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/programs/bpf_loader/src/lib.rs#L334). These transactions are sent directly to the current leader's transaction processing (TPU) port and are processed in parallel with each other. Once all transactions have been sent, the CLI [polls the RPC API with batches of transaction signatures](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/client/src/tpu_client.rs#L216) to ensure that every write was successful and confirmed. -3. [Finalization](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/cli/src/program.rs#L1807): Once writes are completed, the CLI [sends a final transaction](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/cli/src/program.rs#L2150) to either [deploy a new program](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/programs/bpf_loader/src/lib.rs#L362) or [upgrade an existing program](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/programs/bpf_loader/src/lib.rs#L513). In either case, the byte-code written to the buffer account will be copied into a program data account and verified. - -## Reclaim rent from program accounts - -The storage of data on the Solana blockchain requires the payment of [rent](./../intro/rent.md), including for the byte-code for on-chain programs. Therefore as you deploy more or larger programs, the amount of rent paid to remain rent-exempt will also become larger. - -Using the current rent cost model configuration, a rent-exempt account requires a deposit of ~0.7 SOL per 100KB stored. These costs can have an outsized impact on developers who deploy their own programs since [program accounts](./../programming-model/accounts.md#executable) are among the largest we typically see on Solana. - -#### Example of how much data is used for programs - -As a data point of the number of accounts and potential data stored on-chain, below is the distribution of the largest accounts (at least 100KB) at slot `103,089,804` on `mainnet-beta` by assigned on-chain program: - -1. **Serum Dex v3**: 1798 accounts -2. **Metaplex Candy Machine**: 1089 accounts -3. **Serum Dex v2**: 864 accounts -4. **Upgradeable BPF Program Loader**: 824 accounts -5. **BPF Program Loader v2**: 191 accounts -6. **BPF Program Loader v1**: 150 accounts - -> _Note: this data was pulled with a modified `solana-ledger-tool` built from this branch: [https://github.com/jstarry/solana/tree/large-account-stats](https://github.com/jstarry/solana/tree/large-account-stats)_ - -### Reclaiming buffer accounts - -Buffer accounts are used by the Upgradeable BPF loader to temporarily store byte-code that is in the process of being deployed on-chain. This temporary buffer is required when upgrading programs because the currently deployed program's byte-code cannot be affected by an in-progress upgrade. - -Unfortunately, deploys fail occasionally and instead of reusing the buffer account, developers might retry their deployment with a new buffer and not realize that they stored a good chunk of SOL in a forgotten buffer account from an earlier deploy. - -> As of slot `103,089,804` on `mainnet-beta` there are 276 abandoned buffer accounts that could be reclaimed! - -Developers can check if they own any abandoned buffer accounts by using the Solana CLI: - -```bash -solana program show --buffers --keypair ~/.config/solana/MY_KEYPAIR.json - -Buffer Address | Authority | Balance -9vXW2c3qo6DrLHa1Pkya4Mw2BWZSRYs9aoyoP3g85wCA | 2nr1bHFT86W9tGnyvmYW4vcHKsQB3sVQfnddasz4kExM | 3.41076888 SOL -``` - -And they can close those buffers to reclaim the SOL balance with the following command: - -```bash -solana program close --buffers --keypair ~/.config/solana/MY_KEYPAIR.json -``` - -#### Fetch the owners of buffer accounts via RPC API - -The owners of all abandoned program deploy buffer accounts can be fetched via the RPC API: - -```bash -curl http://api.mainnet-beta.solana.com -H "Content-Type: application/json" \ ---data-binary @- << EOF | jq --raw-output '.result | .[] | .account.data[0]' -{ - "jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", - "params":[ - "BPFLoaderUpgradeab1e11111111111111111111111", - { - "dataSlice": {"offset": 5, "length": 32}, - "filters": [{"memcmp": {"offset": 0, "bytes": "2UzHM"}}], - "encoding": "base64" - } - ] -} -EOF -``` - -After re-encoding the base64 encoded keys into base58 and grouping by key, we see some accounts have over 10 buffer accounts they could close, yikes! - -```bash -'BE3G2F5jKygsSNbPFKHHTxvKpuFXSumASeGweLcei6G3' => 10 buffer accounts -'EsQ179Q8ESroBnnmTDmWEV4rZLkRc3yck32PqMxypE5z' => 10 buffer accounts -'6KXtB89kAgzW7ApFzqhBg5tgnVinzP4NSXVqMAWnXcHs' => 12 buffer accounts -'FinVobfi4tbdMdfN9jhzUuDVqGXfcFnRGX57xHcTWLfW' => 15 buffer accounts -'TESAinbTL2eBLkWqyGA82y1RS6kArHvuYWfkL9dKkbs' => 42 buffer accounts -``` - -### Reclaiming program data accounts - -You may now realize that program data accounts (the accounts that store the executable byte-code for an on-chain program) can also be closed. - -> **Note:** This does _not_ mean that _program accounts_ can be closed (those are immutable and can never be reclaimed, but it's fine they're pretty small). It's also important to keep in mind that once program data accounts are deleted, they can never be recreated for an existing program. Therefore, the corresponding program (and its program id) for any closed program data account is effectively disabled forever and may not be re-deployed - -While it would be uncommon for developers to need to close program data accounts since they can be rewritten during upgrades, one potential scenario is that since program data accounts can't be _resized_. You may wish to deploy your program at a new address to accommodate larger executables. - -The ability to reclaim program data account rent deposits also makes testing and experimentation on the `mainnet-beta` cluster a lot less costly since you could reclaim everything except the transaction fees and a small amount of rent for the program account. Lastly, this could help developers recover most of their funds if they mistakenly deploy a program at an unintended address or on the wrong cluster. - -To view the programs which are owned by your wallet address, you can run: - -```bash -solana -V # must be 1.7.11 or higher! -solana program show --programs --keypair ~/.config/solana/MY_KEYPAIR.json - -Program Id | Slot | Authority | Balance -CN5x9WEusU6pNH66G22SnspVx4cogWLqMfmb85Z3GW7N | 53796672 | 2nr1bHFT86W9tGnyvmYW4vcHKsQB3sVQfnddasz4kExM | 0.54397272 SOL -``` - -To close those program data accounts and reclaim their SOL balance, you can run: - -```bash -solana program close --programs --keypair ~/.config/solana/MY_KEYPAIR.json -``` - -You might be concerned about this feature allowing malicious actors to close a program in a way that negatively impacts end users. While this is a valid concern in general, closing program data accounts doesn't make this any more exploitable than was already possible. - -Even without the ability to close a program data account, any upgradeable program could be upgraded to a no-op implementation and then have its upgrade authority cleared to make it immutable forever. This new feature for closing program data accounts merely adds the ability to reclaim the rent deposit, disabling a program was already technically possible. diff --git a/docs/src/developing/on-chain-programs/developing-c.md b/docs/src/developing/on-chain-programs/developing-c.md deleted file mode 100644 index cffbd1006bb067..00000000000000 --- a/docs/src/developing/on-chain-programs/developing-c.md +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: "Developing with C" ---- - -Solana supports writing on-chain programs using the C and C++ programming -languages. - -## Project Layout - -C projects are laid out as follows: - -``` -/src/ -/makefile -``` - -The `makefile` should contain the following: - -```bash -OUT_DIR := -include ~/.local/share/solana/install/active_release/bin/sdk/sbf/c/sbf.mk -``` - -The sbf-sdk may not be in the exact place specified above but if you setup your -environment per [How to Build](#how-to-build) then it should be. - -## How to Build - -First setup the environment: - -- Install the latest Rust stable from https://rustup.rs -- Install the latest [Solana command-line tools](../../cli/install-solana-cli-tools.md) - -Then build using make: - -```bash -make -C -``` - -## How to Test - -Solana uses the [Criterion](https://github.com/Snaipe/Criterion) test framework -and tests are executed each time the program is built [How to -Build](#how-to-build). - -To add tests, create a new file next to your source file named `test_.c` -and populate it with criterion test cases. See the [Criterion docs](https://criterion.readthedocs.io/en/master) -for information on how to write a test case. - -## Program Entrypoint - -Programs export a known entrypoint symbol which the Solana runtime looks up and -calls when invoking a program. Solana supports multiple versions of the SBF loader and the entrypoints may vary between them. -Programs must be written for and deployed to the same loader. For more details -see the [FAQ section on Loaders](./faq.md#loaders). - -Currently there are two supported loaders [SBF -Loader](https://github.com/solana-labs/solana/blob/7ddf10e602d2ed87a9e3737aa8c32f1db9f909d8/sdk/program/src/bpf_loader.rs#L17) -and [SBF loader -deprecated](https://github.com/solana-labs/solana/blob/7ddf10e602d2ed87a9e3737aa8c32f1db9f909d8/sdk/program/src/bpf_loader_deprecated.rs#L14). - -They both have the same raw entrypoint definition, the following is the raw -symbol that the runtime looks up and calls: - -```c -extern uint64_t entrypoint(const uint8_t *input) -``` - -This entrypoint takes a generic byte array which contains the serialized program -parameters (program id, accounts, instruction data, etc...). To deserialize the -parameters each loader contains its own [helper function](#Serialization). - -### Serialization - -Each loader provides a helper function that deserializes the program's input -parameters into C types: - -- [SBF Loader - deserialization](https://github.com/solana-labs/solana/blob/d2ee9db2143859fa5dc26b15ee6da9c25cc0429c/sdk/sbf/c/inc/solana_sdk.h#L304) -- [SBF Loader deprecated - deserialization](https://github.com/solana-labs/solana/blob/8415c22b593f164020adc7afe782e8041d756ddf/sdk/sbf/c/inc/deserialize_deprecated.h#L25) - -Some programs may want to perform deserialization themselves, and they can by -providing their own implementation of the [raw entrypoint](#program-entrypoint). -Take note that the provided deserialization functions retain references back to -the serialized byte array for variables that the program is allowed to modify -(lamports, account data). The reason for this is that upon return the loader -will read those modifications so they may be committed. If a program implements -their own deserialization function they need to ensure that any modifications -the program wishes to commit must be written back into the input byte array. - -Details on how the loader serializes the program inputs can be found in the -[Input Parameter Serialization](./faq.md#input-parameter-serialization) docs. - -## Data Types - -The loader's deserialization helper function populates the -[SolParameters](https://github.com/solana-labs/solana/blob/8415c22b593f164020adc7afe782e8041d756ddf/sdk/sbf/c/inc/solana_sdk.h#L276) -structure: - -```c -/** - * Structure that the program's entrypoint input data is deserialized into. - */ -typedef struct { - SolAccountInfo* ka; /** Pointer to an array of SolAccountInfo, must already - point to an array of SolAccountInfos */ - uint64_t ka_num; /** Number of SolAccountInfo entries in `ka` */ - const uint8_t *data; /** pointer to the instruction data */ - uint64_t data_len; /** Length in bytes of the instruction data */ - const SolPubkey *program_id; /** program_id of the currently executing program */ -} SolParameters; -``` - -'ka' is an ordered array of the accounts referenced by the instruction and -represented as a -[SolAccountInfo](https://github.com/solana-labs/solana/blob/8415c22b593f164020adc7afe782e8041d756ddf/sdk/sbf/c/inc/solana_sdk.h#L173) -structures. An account's place in the array signifies its meaning, for example, -when transferring lamports an instruction may define the first account as the -source and the second as the destination. - -The members of the `SolAccountInfo` structure are read-only except for -`lamports` and `data`. Both may be modified by the program in accordance with -the [runtime enforcement -policy](developing/programming-model/accounts.md#policy). When an instruction -reference the same account multiple times there may be duplicate -`SolAccountInfo` entries in the array but they both point back to the original -input byte array. A program should handle these cases delicately to avoid -overlapping read/writes to the same buffer. If a program implements their own -deserialization function care should be taken to handle duplicate accounts -appropriately. - -`data` is the general purpose byte array from the [instruction's instruction -data](developing/programming-model/transactions.md#instruction-data) being -processed. - -`program_id` is the public key of the currently executing program. - -## Heap - -C programs can allocate memory via the system call -[`calloc`](https://github.com/solana-labs/solana/blob/c3d2d2134c93001566e1e56f691582f379b5ae55/sdk/sbf/c/inc/solana_sdk.h#L245) -or implement their own heap on top of the 32KB heap region starting at virtual -address x300000000. The heap region is also used by `calloc` so if a program -implements their own heap it should not also call `calloc`. - -## Logging - -The runtime provides two system calls that take data and log it to the program -logs. - -- [`sol_log(const char*)`](https://github.com/solana-labs/solana/blob/d2ee9db2143859fa5dc26b15ee6da9c25cc0429c/sdk/sbf/c/inc/solana_sdk.h#L128) -- [`sol_log_64(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t)`](https://github.com/solana-labs/solana/blob/d2ee9db2143859fa5dc26b15ee6da9c25cc0429c/sdk/sbf/c/inc/solana_sdk.h#L134) - -The [debugging](debugging.md#logging) section has more information about working -with program logs. - -## Compute Budget - -Use the system call `sol_remaining_compute_units()` to return a `u64` indicating -the number of compute units remaining for this transaction. - -Use the system call -[`sol_log_compute_units()`](https://github.com/solana-labs/solana/blob/d3a3a7548c857f26ec2cb10e270da72d373020ec/sdk/sbf/c/inc/solana_sdk.h#L140) -to log a message containing the remaining number of compute units the program -may consume before execution is halted - -See [compute budget](developing/programming-model/runtime.md#compute-budget) -for more information. - -## ELF Dump - -The SBF shared object internals can be dumped to a text file to gain more -insight into a program's composition and what it may be doing at runtime. The -dump will contain both the ELF information as well as a list of all the symbols -and the instructions that implement them. Some of the SBF loader's error log -messages will reference specific instruction numbers where the error occurred. -These references can be looked up in the ELF dump to identify the offending -instruction and its context. - -To create a dump file: - -```bash -$ cd -$ make dump_ -``` - -## Examples - -The [Solana Program Library github](https://github.com/solana-labs/solana-program-library/tree/master/examples/c) repo contains a collection of C examples diff --git a/docs/src/developing/on-chain-programs/developing-rust.md b/docs/src/developing/on-chain-programs/developing-rust.md deleted file mode 100644 index d1f8423ecdbe6f..00000000000000 --- a/docs/src/developing/on-chain-programs/developing-rust.md +++ /dev/null @@ -1,395 +0,0 @@ ---- -title: "Developing with Rust" ---- - -Solana supports writing on-chain programs using the -[Rust](https://www.rust-lang.org/) programming language. - -## Project Layout - -Solana Rust programs follow the typical [Rust project -layout](https://doc.rust-lang.org/cargo/guide/project-layout.html): - -``` -/inc/ -/src/ -/Cargo.toml -``` - -Solana Rust programs may depend directly on each other in order to gain access -to instruction helpers when making [cross-program invocations](developing/programming-model/calling-between-programs.md#cross-program-invocations). -When doing so it's important to not pull in the dependent program's entrypoint -symbols because they may conflict with the program's own. To avoid this, -programs should define an `no-entrypoint` feature in `Cargo.toml` and use -to exclude the entrypoint. - -- [Define the - feature](https://github.com/solana-labs/solana-program-library/blob/fca9836a2c8e18fc7e3595287484e9acd60a8f64/token/program/Cargo.toml#L12) -- [Exclude the - entrypoint](https://github.com/solana-labs/solana-program-library/blob/fca9836a2c8e18fc7e3595287484e9acd60a8f64/token/program/src/lib.rs#L12) - -Then when other programs include this program as a dependency, they should do so -using the `no-entrypoint` feature. - -- [Include without - entrypoint](https://github.com/solana-labs/solana-program-library/blob/fca9836a2c8e18fc7e3595287484e9acd60a8f64/token-swap/program/Cargo.toml#L22) - -## Project Dependencies - -At a minimum, Solana Rust programs must pull in the -[solana-program](https://crates.io/crates/solana-program) crate. - -Solana SBF programs have some [restrictions](#restrictions) that may prevent the -inclusion of some crates as dependencies or require special handling. - -For example: - -- Crates that require the architecture be a subset of the ones supported by the - official toolchain. There is no workaround for this unless that crate is - forked and SBF added to that those architecture checks. -- Crates may depend on `rand` which is not supported in Solana's deterministic - program environment. To include a `rand` dependent crate refer to [Depending - on Rand](#depending-on-rand). -- Crates may overflow the stack even if the stack overflowing code isn't - included in the program itself. For more information refer to - [Stack](./faq.md#stack). - -## How to Build - -First setup the environment: - -- Install the latest Rust stable from https://rustup.rs/ -- Install the latest [Solana command-line tools](../../cli/install-solana-cli-tools.md) - -The normal cargo build is available for building programs against your host -machine which can be used for unit testing: - -```bash -$ cargo build -``` - -To build a specific program, such as SPL Token, for the Solana SBF target which -can be deployed to the cluster: - -```bash -$ cd -$ cargo build-bpf -``` - -## How to Test - -Solana programs can be unit tested via the traditional `cargo test` mechanism by -exercising program functions directly. - -To help facilitate testing in an environment that more closely matches a live -cluster, developers can use the -[`program-test`](https://crates.io/crates/solana-program-test) crate. The -`program-test` crate starts up a local instance of the runtime and allows tests -to send multiple transactions while keeping state for the duration of the test. - -For more information the [test in sysvar -example](https://github.com/solana-labs/solana-program-library/blob/master/examples/rust/sysvar/tests/functional.rs) -shows how an instruction containing sysvar account is sent and processed by the -program. - -## Program Entrypoint - -Programs export a known entrypoint symbol which the Solana runtime looks up and -calls when invoking a program. Solana supports multiple versions of the BPF -loader and the entrypoints may vary between them. -Programs must be written for and deployed to the same loader. For more details -see the [FAQ section on Loaders](./faq.md#loaders). - -Currently there are two supported loaders [BPF -Loader](https://github.com/solana-labs/solana/blob/d9b0fc0e3eec67dfe4a97d9298b15969b2804fab/sdk/program/src/bpf_loader.rs#L17) -and [BPF loader -deprecated](https://github.com/solana-labs/solana/blob/d9b0fc0e3eec67dfe4a97d9298b15969b2804fab/sdk/program/src/bpf_loader_deprecated.rs#L14) - -They both have the same raw entrypoint definition, the following is the raw -symbol that the runtime looks up and calls: - -```rust -#[no_mangle] -pub unsafe extern "C" fn entrypoint(input: *mut u8) -> u64; -``` - -This entrypoint takes a generic byte array which contains the serialized program -parameters (program id, accounts, instruction data, etc...). To deserialize the -parameters each loader contains its own wrapper macro that exports the raw -entrypoint, deserializes the parameters, calls a user defined instruction -processing function, and returns the results. - -You can find the entrypoint macros here: - -- [BPF Loader's entrypoint - macro](https://github.com/solana-labs/solana/blob/9b1199cdb1b391b00d510ed7fc4866bdf6ee4eb3/sdk/program/src/entrypoint.rs#L42) -- [BPF Loader deprecated's entrypoint - macro](https://github.com/solana-labs/solana/blob/9b1199cdb1b391b00d510ed7fc4866bdf6ee4eb3/sdk/program/src/entrypoint_deprecated.rs#L38) - -The program defined instruction processing function that the entrypoint macros -call must be of this form: - -```rust -pub type ProcessInstruction = - fn(program_id: &Pubkey, accounts: &[AccountInfo], instruction_data: &[u8]) -> ProgramResult; -``` - -### Parameter Deserialization - -Each loader provides a helper function that deserializes the program's input -parameters into Rust types. The entrypoint macros automatically calls the -deserialization helper: - -- [BPF Loader - deserialization](https://github.com/solana-labs/solana/blob/d9b0fc0e3eec67dfe4a97d9298b15969b2804fab/sdk/program/src/entrypoint.rs#L146) -- [BPF Loader deprecated - deserialization](https://github.com/solana-labs/solana/blob/d9b0fc0e3eec67dfe4a97d9298b15969b2804fab/sdk/program/src/entrypoint_deprecated.rs#L57) - -Some programs may want to perform deserialization themselves and they can by -providing their own implementation of the [raw entrypoint](#program-entrypoint). -Take note that the provided deserialization functions retain references back to -the serialized byte array for variables that the program is allowed to modify -(lamports, account data). The reason for this is that upon return the loader -will read those modifications so they may be committed. If a program implements -their own deserialization function they need to ensure that any modifications -the program wishes to commit be written back into the input byte array. - -Details on how the loader serializes the program inputs can be found in the -[Input Parameter Serialization](./faq.md#input-parameter-serialization) docs. - -### Data Types - -The loader's entrypoint macros call the program defined instruction processor -function with the following parameters: - -```rust -program_id: &Pubkey, -accounts: &[AccountInfo], -instruction_data: &[u8] -``` - -The program id is the public key of the currently executing program. - -The accounts is an ordered slice of the accounts referenced by the instruction -and represented as an -[AccountInfo](https://github.com/solana-labs/solana/blob/d9b0fc0e3eec67dfe4a97d9298b15969b2804fab/sdk/program/src/account_info.rs#L12) -structures. An account's place in the array signifies its meaning, for example, -when transferring lamports an instruction may define the first account as the -source and the second as the destination. - -The members of the `AccountInfo` structure are read-only except for `lamports` -and `data`. Both may be modified by the program in accordance with the [runtime -enforcement policy](developing/programming-model/accounts.md#policy). Both of -these members are protected by the Rust `RefCell` construct, so they must be -borrowed to read or write to them. The reason for this is they both point back -to the original input byte array, but there may be multiple entries in the -accounts slice that point to the same account. Using `RefCell` ensures that the -program does not accidentally perform overlapping read/writes to the same -underlying data via multiple `AccountInfo` structures. If a program implements -their own deserialization function care should be taken to handle duplicate -accounts appropriately. - -The instruction data is the general purpose byte array from the [instruction's -instruction data](developing/programming-model/transactions.md#instruction-data) -being processed. - -## Heap - -Rust programs implement the heap directly by defining a custom -[`global_allocator`](https://github.com/solana-labs/solana/blob/d9b0fc0e3eec67dfe4a97d9298b15969b2804fab/sdk/program/src/entrypoint.rs#L72) - -Programs may implement their own `global_allocator` based on its specific needs. -Refer to the [custom heap example](#examples) for more information. - -## Restrictions - -On-chain Rust programs support most of Rust's libstd, libcore, and liballoc, as -well as many 3rd party crates. - -There are some limitations since these programs run in a resource-constrained, -single-threaded environment, as well as being deterministic: - -- No access to - - `rand` - - `std::fs` - - `std::net` - - `std::future` - - `std::process` - - `std::sync` - - `std::task` - - `std::thread` - - `std::time` -- Limited access to: - - `std::hash` - - `std::os` -- Bincode is extremely computationally expensive in both cycles and call depth - and should be avoided -- String formatting should be avoided since it is also computationally - expensive. -- No support for `println!`, `print!`, the Solana [logging helpers](#logging) - should be used instead. -- The runtime enforces a limit on the number of instructions a program can - execute during the processing of one instruction. See - [computation budget](developing/programming-model/runtime.md#compute-budget) for more - information. - -## Depending on Rand - -Programs are constrained to run deterministically, so random numbers are not -available. Sometimes a program may depend on a crate that depends itself on -`rand` even if the program does not use any of the random number functionality. -If a program depends on `rand`, the compilation will fail because there is no -`get-random` support for Solana. The error will typically look like this: - -``` -error: target is not supported, for more information see: https://docs.rs/getrandom/#unsupported-targets - --> /Users/jack/.cargo/registry/src/github.com-1ecc6299db9ec823/getrandom-0.1.14/src/lib.rs:257:9 - | -257 | / compile_error!("\ -258 | | target is not supported, for more information see: \ -259 | | https://docs.rs/getrandom/#unsupported-targets\ -260 | | "); - | |___________^ -``` - -To work around this dependency issue, add the following dependency to the -program's `Cargo.toml`: - -``` -getrandom = { version = "0.1.14", features = ["dummy"] } -``` - -or if the dependency is on getrandom v0.2 add: - -``` -getrandom = { version = "0.2.2", features = ["custom"] } -``` - -## Logging - -Rust's `println!` macro is computationally expensive and not supported. Instead -the helper macro -[`msg!`](https://github.com/solana-labs/solana/blob/d9b0fc0e3eec67dfe4a97d9298b15969b2804fab/sdk/program/src/log.rs#L33) -is provided. - -`msg!` has two forms: - -```rust -msg!("A string"); -``` - -or - -```rust -msg!(0_64, 1_64, 2_64, 3_64, 4_64); -``` - -Both forms output the results to the program logs. If a program so wishes they -can emulate `println!` by using `format!`: - -```rust -msg!("Some variable: {:?}", variable); -``` - -The [debugging](debugging.md#logging) section has more information about working -with program logs the [Rust examples](#examples) contains a logging example. - -## Panicking - -Rust's `panic!`, `assert!`, and internal panic results are printed to the -[program logs](debugging.md#logging) by default. - -``` -INFO solana_runtime::message_processor] Finalized account CGLhHSuWsp1gT4B7MY2KACqp9RUwQRhcUFfVSuxpSajZ -INFO solana_runtime::message_processor] Call SBF program CGLhHSuWsp1gT4B7MY2KACqp9RUwQRhcUFfVSuxpSajZ -INFO solana_runtime::message_processor] Program log: Panicked at: 'assertion failed: `(left == right)` - left: `1`, - right: `2`', rust/panic/src/lib.rs:22:5 -INFO solana_runtime::message_processor] SBF program consumed 5453 of 200000 units -INFO solana_runtime::message_processor] SBF program CGLhHSuWsp1gT4B7MY2KACqp9RUwQRhcUFfVSuxpSajZ failed: BPF program panicked -``` - -### Custom Panic Handler - -Programs can override the default panic handler by providing their own -implementation. - -First define the `custom-panic` feature in the program's `Cargo.toml` - -```toml -[features] -default = ["custom-panic"] -custom-panic = [] -``` - -Then provide a custom implementation of the panic handler: - -```rust -#[cfg(all(feature = "custom-panic", target_os = "solana"))] -#[no_mangle] -fn custom_panic(info: &core::panic::PanicInfo<'_>) { - solana_program::msg!("program custom panic enabled"); - solana_program::msg!("{}", info); -} -``` - -In the above snippit, the default implementation is shown, but developers may -replace that with something that better suits their needs. - -One of the side effects of supporting full panic messages by default is that -programs incur the cost of pulling in more of Rust's `libstd` implementation -into program's shared object. Typical programs will already be pulling in a -fair amount of `libstd` and may not notice much of an increase in the shared -object size. But programs that explicitly attempt to be very small by avoiding -`libstd` may take a significant impact (~25kb). To eliminate that impact, -programs can provide their own custom panic handler with an empty -implementation. - -```rust -#[cfg(all(feature = "custom-panic", target_os = "solana"))] -#[no_mangle] -fn custom_panic(info: &core::panic::PanicInfo<'_>) { - // Do nothing to save space -} -``` - -## Compute Budget - -Use the system call `sol_remaining_compute_units()` to return a `u64` indicating -the number of compute units remaining for this transaction. - -Use the system call -[`sol_log_compute_units()`](https://github.com/solana-labs/solana/blob/d9b0fc0e3eec67dfe4a97d9298b15969b2804fab/sdk/program/src/log.rs#L141) -to log a message containing the remaining number of compute units the program -may consume before execution is halted - -See [compute budget](developing/programming-model/runtime.md#compute-budget) -for more information. - -## ELF Dump - -The SBF shared object internals can be dumped to a text file to gain more -insight into a program's composition and what it may be doing at runtime. The -dump will contain both the ELF information as well as a list of all the symbols -and the instructions that implement them. Some of the BPF loader's error log -messages will reference specific instruction numbers where the error occurred. -These references can be looked up in the ELF dump to identify the offending -instruction and its context. - -To create a dump file: - -```bash -$ cd -$ cargo build-bpf --dump -``` - -## Examples - -The [Solana Program Library -GitHub](https://github.com/solana-labs/solana-program-library/tree/master/examples/rust) -repo contains a collection of Rust examples. - -The [Solana Developers -Program Examples GitHub](https://github.com/solana-developers/program-examples) -repo also contains a collection of beginner to intermediate Rust program -examples. \ No newline at end of file diff --git a/docs/src/developing/on-chain-programs/examples.md b/docs/src/developing/on-chain-programs/examples.md deleted file mode 100644 index 1aaf154c74b4cc..00000000000000 --- a/docs/src/developing/on-chain-programs/examples.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: "Program Examples" ---- - -## Break - -[Break](https://break.solana.com/) is a React app that gives users a visceral -feeling for just how fast and high-performance the Solana network really is. Can -you _break_ the Solana blockchain? During a 15 second play-though, each click of -a button or keystroke sends a new transaction to the cluster. Smash the keyboard -as fast as you can and watch your transactions get finalized in real time while -the network takes it all in stride! - -Break can be played on our Devnet, Testnet and Mainnet Beta networks. Plays are -free on Devnet and Testnet, where the session is funded by a network faucet. On -Mainnet Beta, users pay to play 0.08 SOL per game. The session account can be -funded by a local keystore wallet or by scanning a QR code from Trust Wallet to -transfer the tokens. - -[Click here to play Break](https://break.solana.com/) - -### Build and Run - -First fetch the latest version of the example code: - -```bash -$ git clone https://github.com/solana-labs/break.git -$ cd break -``` - -Next, follow the steps in the git repository's -[README](https://github.com/solana-labs/break/blob/master/README.md). - -## Language Specific - -- [Rust](developing-rust.md#examples) -- [C](developing-c.md#examples) diff --git a/docs/src/developing/on-chain-programs/faq.md b/docs/src/developing/on-chain-programs/faq.md deleted file mode 100644 index 04093f64c54d00..00000000000000 --- a/docs/src/developing/on-chain-programs/faq.md +++ /dev/null @@ -1,215 +0,0 @@ ---- -title: "FAQ" ---- - -When writing or interacting with Solana programs, there are common questions or -challenges that often come up. Below are resources to help answer these -questions. - -If not addressed here, ask on [StackExchange](https://solana.stackexchange.com/questions/ask?tags=solana-program) with the `solana-program` tag. - -## Limitations - -Developing programs on the Solana blockchain have some inherent limitation associated with them. Below is a list of common limitation that you may run into. - -See [Limitations of developing programs](./limitations.md) for more details - -## Berkeley Packet Filter (BPF) - -Solana on-chain programs are compiled via the [LLVM compiler infrastructure](https://llvm.org/) to an [Executable and Linkable Format (ELF)](https://en.wikipedia.org/wiki/Executable_and_Linkable_Format) containing -a variation of the [Berkeley Packet Filter (BPF)](https://en.wikipedia.org/wiki/Berkeley_Packet_Filter) bytecode. - -Because Solana uses the LLVM compiler infrastructure, a program may be written in any programming language that can target the LLVM's BPF backend. - -BPF provides an efficient [instruction set](https://github.com/iovisor/bpf-docs/blob/master/eBPF.md) that can be executed in an interpreted virtual machine or as efficient just-in-time compiled native instructions. - -## Memory map - -The virtual address memory map used by Solana SBF programs is fixed and laid out -as follows - -- Program code starts at 0x100000000 -- Stack data starts at 0x200000000 -- Heap data starts at 0x300000000 -- Program input parameters start at 0x400000000 - -The above virtual addresses are start addresses but programs are given access to -a subset of the memory map. The program will panic if it attempts to read or -write to a virtual address that it was not granted access to, and an -`AccessViolation` error will be returned that contains the address and size of -the attempted violation. - -## InvalidAccountData - -This program error can happen for a lot of reasons. Usually, it's caused by -passing an account to the program that the program is not expecting, either in -the wrong position in the instruction or an account not compatible with the -instruction being executed. - -An implementation of a program might also cause this error when performing a -cross-program instruction and forgetting to provide the account for the program -that you are calling. - -## InvalidInstructionData - -This program error can occur while trying to deserialize the instruction, check -that the structure passed in matches exactly the instruction. There may be some -padding between fields. If the program implements the Rust `Pack` trait then try -packing and unpacking the instruction type `T` to determine the exact encoding -the program expects: - -https://github.com/solana-labs/solana/blob/v1.4/sdk/program/src/program_pack.rs - -## MissingRequiredSignature - -Some instructions require the account to be a signer; this error is returned if -an account is expected to be signed but is not. - -An implementation of a program might also cause this error when performing a -cross-program invocation that requires a signed program address, but the passed -signer seeds passed to [`invoke_signed`](developing/programming-model/calling-between-programs.md) -don't match the signer seeds used to create the program address -[`create_program_address`](developing/programming-model/calling-between-programs.md#program-derived-addresses). - -## `rand` Rust dependency causes compilation failure - -See [Rust Project Dependencies](developing-rust.md#project-dependencies) - -## Rust restrictions - -See [Rust restrictions](developing-rust.md#restrictions) - -## Stack - -SBF uses stack frames instead of a variable stack pointer. Each stack frame is -4KB in size. - -If a program violates that stack frame size, the compiler will report the -overrun as a warning. - -For example: - -``` -Error: Function _ZN16curve25519_dalek7edwards21EdwardsBasepointTable6create17h178b3d2411f7f082E Stack offset of -30728 exceeded max offset of -4096 by 26632 bytes, please minimize large stack variables -``` - -The message identifies which symbol is exceeding its stack frame, but the name -might be mangled if it is a Rust or C++ symbol. - -> To demangle a Rust symbol use [rustfilt](https://github.com/luser/rustfilt). - -The above warning came from a Rust program, so the demangled symbol name is: - -```bash -rustfilt _ZN16curve25519_dalek7edwards21EdwardsBasepointTable6create17h178b3d2411f7f082E -curve25519_dalek::edwards::EdwardsBasepointTable::create -``` - -To demangle a C++ symbol use `c++filt` from binutils. - -The reason a warning is reported rather than an error is because some dependent -crates may include functionality that violates the stack frame restrictions even -if the program doesn't use that functionality. If the program violates the stack -size at runtime, an `AccessViolation` error will be reported. - -SBF stack frames occupy a virtual address range starting at `0x200000000`. - -## Heap size - -Programs have access to a runtime heap either directly in C or via the Rust -`alloc` APIs. To facilitate fast allocations, a simple 32KB bump heap is -utilized. The heap does not support `free` or `realloc` so use it wisely. - -Internally, programs have access to the 32KB memory region starting at virtual -address 0x300000000 and may implement a custom heap based on the program's -specific needs. - -- [Rust program heap usage](developing-rust.md#heap) -- [C program heap usage](developing-c.md#heap) - -## Loaders - -Programs are deployed with and executed by runtime loaders, currently there are -two supported loaders [BPF -Loader](https://github.com/solana-labs/solana/blob/7ddf10e602d2ed87a9e3737aa8c32f1db9f909d8/sdk/program/src/bpf_loader.rs#L17) -and [BPF loader -deprecated](https://github.com/solana-labs/solana/blob/7ddf10e602d2ed87a9e3737aa8c32f1db9f909d8/sdk/program/src/bpf_loader_deprecated.rs#L14) - -Loaders may support different application binary interfaces so developers must -write their programs for and deploy them to the same loader. If a program -written for one loader is deployed to a different one the result is usually a -`AccessViolation` error due to mismatched deserialization of the program's input -parameters. - -For all practical purposes program should always be written to target the latest -BPF loader and the latest loader is the default for the command-line interface -and the javascript APIs. - -For language specific information about implementing a program for a particular -loader see: - -- [Rust program entrypoints](developing-rust.md#program-entrypoint) -- [C program entrypoints](developing-c.md#program-entrypoint) - -### Deployment - -SBF program deployment is the process of uploading a BPF shared object into a -program account's data and marking the account executable. A client breaks the -SBF shared object into smaller pieces and sends them as the instruction data of -[`Write`](https://github.com/solana-labs/solana/blob/bc7133d7526a041d1aaee807b80922baa89b6f90/sdk/program/src/loader_instruction.rs#L13) -instructions to the loader where loader writes that data into the program's -account data. Once all the pieces are received the client sends a -[`Finalize`](https://github.com/solana-labs/solana/blob/bc7133d7526a041d1aaee807b80922baa89b6f90/sdk/program/src/loader_instruction.rs#L30) -instruction to the loader, the loader then validates that the SBF data is valid -and marks the program account as _executable_. Once the program account is -marked executable, subsequent transactions may issue instructions for that -program to process. - -When an instruction is directed at an executable SBF program the loader -configures the program's execution environment, serializes the program's input -parameters, calls the program's entrypoint, and reports any errors encountered. - -For further information see [deploying](deploying.md) - -### Input Parameter Serialization - -SBF loaders serialize the program input parameters into a byte array that is -then passed to the program's entrypoint, where the program is responsible for -deserializing it on-chain. One of the changes between the deprecated loader and -the current loader is that the input parameters are serialized in a way that -results in various parameters falling on aligned offsets within the aligned byte -array. This allows deserialization implementations to directly reference the -byte array and provide aligned pointers to the program. - -For language specific information about serialization see: - -- [Rust program parameter - deserialization](developing-rust.md#parameter-deserialization) -- [C program parameter - deserialization](developing-c.md#parameter-deserialization) - -The latest loader serializes the program input parameters as follows (all -encoding is little endian): - -- 8 bytes unsigned number of accounts -- For each account - - 1 byte indicating if this is a duplicate account, if not a duplicate then - the value is 0xff, otherwise the value is the index of the account it is a - duplicate of. - - If duplicate: 7 bytes of padding - - If not duplicate: - - 1 byte boolean, true if account is a signer - - 1 byte boolean, true if account is writable - - 1 byte boolean, true if account is executable - - 4 bytes of padding - - 32 bytes of the account public key - - 32 bytes of the account's owner public key - - 8 bytes unsigned number of lamports owned by the account - - 8 bytes unsigned number of bytes of account data - - x bytes of account data - - 10k bytes of padding, used for realloc - - enough padding to align the offset to 8 bytes. - - 8 bytes rent epoch -- 8 bytes of unsigned number of instruction data -- x bytes of instruction data -- 32 bytes of the program id diff --git a/docs/src/developing/on-chain-programs/limitations.md b/docs/src/developing/on-chain-programs/limitations.md deleted file mode 100644 index f5c5e17427fe2f..00000000000000 --- a/docs/src/developing/on-chain-programs/limitations.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: "Limitations" ---- - -Developing programs on the Solana blockchain have some inherent limitation associated with them. Below is a list of common limitation that you may run into. - -## Rust libraries - -Since Rust based on-chain programs must run be deterministic while running in a resource-constrained, single-threaded environment, they have some limitations on various libraries. - -See [Developing with Rust - Restrictions](./developing-rust.md#restrictions) for a detailed breakdown these restrictions and limitations. - -## Compute budget - -To prevent abuse of the blockchain's computational resources, each transaction is allocated a [compute budget](./../../terminology.md#compute-budget). Exceeding this compute budget will result in the transaction failing. - -See [computational constraints](../programming-model/runtime.md#compute-budget) in the Runtime for more specific details. - -## Call stack depth - `CallDepthExceeded` error - -Solana programs are constrained to run quickly, and to facilitate this, the program's call stack is limited to a max depth of **64 frames**. - -When a program exceeds the allowed call stack depth limit, it will receive the `CallDepthExceeded` error. - -## CPI call depth - `CallDepth` error - -Cross-program invocations allow programs to invoke other programs directly, but the depth is constrained currently to `4`. - -When a program exceeds the allowed [cross-program invocation call depth](../programming-model/calling-between-programs.md#call-depth), it will receive a `CallDepth` error - -## Float Rust types support - -Programs support a limited subset of Rust's float operations. If a program -attempts to use a float operation that is not supported, the runtime will report -an unresolved symbol error. - -Float operations are performed via software libraries, specifically LLVM's float -built-ins. Due to the software emulated, they consume more compute units than -integer operations. In general, fixed point operations are recommended where -possible. - -The Solana Program Library math tests will report the performance of some math -operations: https://github.com/solana-labs/solana-program-library/tree/master/libraries/math - -To run the test: sync the repo and run: - -```sh -cargo test-sbf -- --nocapture --test-threads=1 -``` - -Recent results show the float operations take more instructions compared to -integers equivalents. Fixed point implementations may vary but will also be -less than the float equivalents: - -``` - u64 f32 -Multiply 8 176 -Divide 9 219 -``` - -## Static writable data - -Program shared objects do not support writable shared data. Programs are shared -between multiple parallel executions using the same shared read-only code and -data. This means that developers should not include any static writable or -global variables in programs. In the future a copy-on-write mechanism could be -added to support writable data. - -## Signed division - -The SBF instruction set does not support -[signed division](https://www.kernel.org/doc/html/latest/bpf/bpf_design_QA.Html#q-why-there-is-no-bpf-sdiv-for-signed-divide-operation). Adding a signed division instruction is a consideration. diff --git a/docs/src/developing/on-chain-programs/overview.md b/docs/src/developing/on-chain-programs/overview.md deleted file mode 100644 index af92a05128dc6d..00000000000000 --- a/docs/src/developing/on-chain-programs/overview.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: "Overview of Writing Programs" -sidebar_label: "Overview" ---- - -Developers can write and deploy their own programs to the Solana blockchain. While developing these "on-chain" programs can seem cumbersome, the entire process can be broadly summarized into a few key steps. - -## Solana Development Lifecycle - -1. Setup your development environment -2. Write your program -3. Compile the program -4. Generate the program's public address -5. Deploy the program - -### 1. Setup your development environment - -The most robust way of getting started with Solana development, is [installing the Solana CLI](./../../cli/install-solana-cli-tools.md) tools on your local computer. This will allow you to have the most powerful development environment. - -Some developers may also opt for using [Solana Playground](https://beta.solpg.io/), a browser based IDE. It will let you write, build, and deploy on-chain programs. All from your browser. No installation needed. - -### 2. Write your program - -Writing Solana programs is most commonly done so using the Rust language. These Rust programs are effectively the same as creating a traditional [Rust library](https://doc.rust-lang.org/rust-by-example/crates/lib.html). - -> You can read more about other [supported languages](#support-languages) below. - -### 3. Compile the program - -Once the program is written, it must be complied down to [Berkley Packet Filter](./faq.md#berkeley-packet-filter-bpf) byte-code that will then be deployed to the blockchain. - -### 4. Generate the program's public address - -Using the [Solana CLI](./../../cli/install-solana-cli-tools.md), the developer will generate a new unique [Keypair](./../../terminology.md#keypair) for the new program. The public address (aka [Pubkey](./../../terminology.md#public-key-pubkey)) from this Keypair will be used on-chain as the program's public address (aka [`programId`](./../../terminology.md#program-id)). - -### 5. Deploying the program - -Then again using the CLI, the compiled program can be deployed to the selected blockchain cluster by creating many transactions containing the program's byte-code. Due to the transaction memory size limitations, each transaction effectively sends small chunks of the program to the blockchain in a rapid-fire manner. - -Once the entire program has been sent to the blockchain, a final transaction is sent to write all of the buffered byte-code to the program's data account. This either mark the new program as [`executable`](./../programming-model/accounts.md#executable), or complete the process to upgrade an existing program (if it already existed). - -## Support languages - -Solana programs are typically written in the [Rust language](./developing-rust.md), but [C/C++](./developing-c.md) are also supported. - -There are also various community driven efforts to enable writing on-chain programs using other languages, including: - -- Python via [Seahorse](https://seahorse-lang.org/) (that acts as a wrapper the Rust based Anchor framework) - -## Example programs - -You can also explore the [Program Examples](./examples.md) for examples of on-chain programs. - -## Limitations - -As you dive deeper into program development, it is important to understand some of the important limitations associated with on-chain programs. - -Read more details on the [Limitations](./limitations.md) page - -## Frequently asked questions - -Discover many of the [frequently asked questions](./faq.md) other developers have about writing/understanding Solana programs. diff --git a/docs/src/developing/programming-model/accounts.md b/docs/src/developing/programming-model/accounts.md deleted file mode 100644 index 09265b7bcabec7..00000000000000 --- a/docs/src/developing/programming-model/accounts.md +++ /dev/null @@ -1,176 +0,0 @@ ---- -title: "Accounts" ---- - -## Storing State between Transactions - -If the program needs to store state between transactions, it does so using -_accounts_. Accounts are similar to files in operating systems such as Linux in -that they may hold arbitrary data that persists beyond -the lifetime of a program. Also like a file, an account includes metadata that -tells the runtime who is allowed to access the data and how. - -Unlike a file, the account includes metadata for the lifetime of the file. That -lifetime is expressed by a number of fractional native -tokens called _lamports_. Accounts are held in validator memory and pay -["rent"](#rent) to stay there. Each validator periodically scans all accounts -and collects rent. Any account that drops to zero lamports is purged. Accounts -can also be marked [rent-exempt](#rent-exemption) if they contain a sufficient -number of lamports. - -In the same way that a Linux user uses a path to look up a file, a Solana client -uses an _address_ to look up an account. The address is a 256-bit public key. - -## Signers - -Transactions include one or more digital [signatures](terminology.md#signature) -each corresponding to an account address referenced by the transaction. Each of these -addresses must be the public key of an ed25519 keypair, and the signature signifies -that the holder of the matching private key signed, and thus, "authorized" the transaction. -In this case, the account is referred to as a _signer_. Whether an account is a signer or not -is communicated to the program as part of the account's metadata. Programs can -then use that information to make authority decisions. - -## Read-only - -Transactions can [indicate](transactions.md#message-header-format) that some of -the accounts it references be treated as _read-only accounts_ in order to enable -parallel account processing between transactions. The runtime permits read-only -accounts to be read concurrently by multiple programs. If a program attempts to -modify a read-only account, the transaction is rejected by the runtime. - -## Executable - -If an account is marked "executable" in its metadata, then it is considered a -program which can be executed by including the account's public key in an -instruction's [program id](transactions.md#program-id). Accounts are marked as -executable during a successful program deployment process by the loader that -owns the account. When a program is deployed to the execution engine (SBF deployment), -the loader determines that the bytecode in the account's data is valid. -If so, the loader permanently marks the program account as executable. - -If a program is marked as final (non-upgradeable), the runtime enforces that the -account's data (the program) is immutable. Through the upgradeable loader, it is -possible to upload a totally new program to an existing program address. - -## Creating - -To create an account, a client generates a _keypair_ and registers its public key -using the `SystemProgram::CreateAccount` instruction with a fixed -storage size in bytes preallocated. -The current maximum size of an account's data is 10 MiB, which can be changed -(increased or decreased) at a rate over all accounts of 20 MiB per transaction, -and the size can be increased by 10 KiB per account and per instruction. - -An account address can be any arbitrary 256 bit value, and there are mechanisms -for advanced users to create derived addresses -(`SystemProgram::CreateAccountWithSeed`, -[`Pubkey::CreateProgramAddress`](calling-between-programs.md#program-derived-addresses)). - -Accounts that have never been created via the system program can also be passed -to programs. When an instruction references an account that hasn't been -previously created, the program will be passed an account with no data and zero lamports -that is owned by the system program. - -Such newly created accounts reflect -whether they sign the transaction, and therefore, can be used as an -authority. Authorities in this context convey to the program that the holder of -the private key associated with the account's public key signed the transaction. -The account's public key may be known to the program or recorded in another -account, signifying some kind of ownership or authority over an asset or -operation the program controls or performs. - -## Ownership and Assignment to Programs - -A created account is initialized to be _owned_ by a built-in program called the -System program and is called a _system account_ aptly. An account includes -"owner" metadata. The owner is a program id. The runtime grants the program -write access to the account if its id matches the owner. For the case of the -System program, the runtime allows clients to transfer lamports and importantly -_assign_ account ownership, meaning changing the owner to a different program id. If -an account is not owned by a program, the program is only permitted to read its -data and credit the account. - -## Verifying validity of unmodified, reference-only accounts - -For security purposes, it is recommended that programs check the validity of any -account it reads, but does not modify. - -This is because a malicious user -could create accounts with arbitrary data and then pass these accounts to the -program in place of valid accounts. The arbitrary data could be crafted in -a way that leads to unexpected or harmful program behavior. - -The security model enforces that an account's data can only be modified by the -account's `Owner` program. This allows the program to trust that the data -is passed to them via accounts they own. The -runtime enforces this by rejecting any transaction containing a program that -attempts to write to an account it does not own. - -If a program were to not check account validity, it might read an account -it thinks it owns, but doesn't. Anyone can -issue instructions to a program, and the runtime does not know that those -accounts are expected to be owned by the program. - -To check an account's validity, the program should either check the account's -address against a known value, or check that the account is indeed owned -correctly (usually owned by the program itself). - -One example is when programs use a sysvar account. Unless the program checks the -account's address or owner, it's impossible to be sure whether it's a real and -valid sysvar account merely by successful deserialization of the account's data. - -Accordingly, the Solana SDK [checks the sysvar account's validity during -deserialization](https://github.com/solana-labs/solana/blob/a95675a7ce1651f7b59443eb146b356bc4b3f374/sdk/program/src/sysvar/mod.rs#L65). -An alternative and safer way to read a sysvar is via the sysvar's [`get()` -function](https://github.com/solana-labs/solana/blob/64bfc14a75671e4ec3fe969ded01a599645080eb/sdk/program/src/sysvar/mod.rs#L73) -which doesn't require these checks. - -If the program always modifies the account in question, the address/owner check -isn't required because modifying an unowned account will be rejected by the runtime, -and the containing transaction will be thrown out. - -## Rent - -Keeping accounts alive on Solana incurs a storage cost called _rent_ because the -blockchain cluster must actively maintain the data to process any future transactions. -This is different from Bitcoin and Ethereum, where storing accounts doesn't -incur any costs. - -Currently, all new accounts are required to be rent-exempt. - -### Rent exemption - -An account is considered rent-exempt if it holds at least 2 years worth of rent. -This is checked every time an account's balance is reduced, and transactions -that would reduce the balance to below the minimum amount will fail. - -Program executable accounts are required by the runtime to be rent-exempt to -avoid being purged. - -:::info -Use the [`getMinimumBalanceForRentExemption`](../../api/http#getminimumbalanceforrentexemption) RPC -endpoint to calculate the -minimum balance for a particular account size. The following calculation is -illustrative only. -::: - -For example, a program executable with the size of 15,000 bytes requires a -balance of 105,290,880 lamports (=~ 0.105 SOL) to be rent-exempt: - -```text -105,290,880 = 19.055441478439427 (fee rate) * (128 + 15_000)(account size including metadata) * ((365.25/2) * 2)(epochs in 2 years) -``` - -Rent can also be estimated via the [`solana rent` CLI subcommand](cli/usage.md#solana-rent) - -```text -$ solana rent 15000 -Rent per byte-year: 0.00000348 SOL -Rent per epoch: 0.000288276 SOL -Rent-exempt minimum: 0.10529088 SOL -``` - -Note: Rest assured that, should the storage rent rate need to be increased at some -point in the future, steps will be taken to ensure that accounts that are rent-exempt -before the increase will remain rent-exempt afterwards diff --git a/docs/src/developing/programming-model/calling-between-programs.md b/docs/src/developing/programming-model/calling-between-programs.md deleted file mode 100644 index 4ed95dba1cf774..00000000000000 --- a/docs/src/developing/programming-model/calling-between-programs.md +++ /dev/null @@ -1,359 +0,0 @@ ---- -title: Calling Between Programs ---- - -## Cross-Program Invocations - -The Solana runtime allows programs to call each other via a mechanism called -cross-program invocation. Calling between programs is achieved by one program -invoking an instruction of the other. The invoking program is halted until the -invoked program finishes processing the instruction. - -For example, a client could create a transaction that modifies two accounts, -each owned by separate on-chain programs: - -```rust,ignore -let message = Message::new(vec![ - token_instruction::pay(&alice_pubkey), - acme_instruction::launch_missiles(&bob_pubkey), -]); -client.send_and_confirm_message(&[&alice_keypair, &bob_keypair], &message); -``` - -A client may instead allow the `acme` program to conveniently invoke `token` -instructions on the client's behalf: - -```rust,ignore -let message = Message::new(vec![ - acme_instruction::pay_and_launch_missiles(&alice_pubkey, &bob_pubkey), -]); -client.send_and_confirm_message(&[&alice_keypair, &bob_keypair], &message); -``` - -Given two on-chain programs, `token` and `acme`, each implementing instructions -`pay()` and `launch_missiles()` respectively, `acme` can be implemented with a -call to a function defined in the `token` module by issuing a cross-program -invocation: - -```rust,ignore -mod acme { - use token_instruction; - - fn launch_missiles(accounts: &[AccountInfo]) -> Result<()> { - ... - } - - fn pay_and_launch_missiles(accounts: &[AccountInfo]) -> Result<()> { - let alice_pubkey = accounts[1].key; - let instruction = token_instruction::pay(&alice_pubkey); - invoke(&instruction, accounts)?; - - launch_missiles(accounts)?; - } -``` - -`invoke()` is built into Solana's runtime and is responsible for routing the -given instruction to the `token` program via the instruction's `program_id` -field. - -Note that `invoke` requires the caller to pass all the accounts required by the -instruction being invoked, except for the executable account (the `program_id`). - -Before invoking `pay()`, the runtime must ensure that `acme` didn't modify any -accounts owned by `token`. It does this by applying the runtime's policy to the -current state of the accounts at the time `acme` calls `invoke` vs. the initial -state of the accounts at the beginning of the `acme`'s instruction. After -`pay()` completes, the runtime must again ensure that `token` didn't modify any -accounts owned by `acme` by again applying the runtime's policy, but this time -with the `token` program ID. Lastly, after `pay_and_launch_missiles()` -completes, the runtime must apply the runtime policy one more time where it -normally would, but using all updated `pre_*` variables. If executing -`pay_and_launch_missiles()` up to `pay()` made no invalid account changes, -`pay()` made no invalid changes, and executing from `pay()` until -`pay_and_launch_missiles()` returns made no invalid changes, then the runtime -can transitively assume `pay_and_launch_missiles()` as a whole made no invalid -account changes, and therefore commit all these account modifications. - -### Instructions that require privileges - -The runtime uses the privileges granted to the caller program to determine what -privileges can be extended to the callee. Privileges in this context refer to -signers and writable accounts. For example, if the instruction the caller is -processing contains a signer or writable account, then the caller can invoke an -instruction that also contains that signer and/or writable account. - -This privilege extension relies on the fact that programs are immutable, except -during the special case of program upgrades. - -In the case of the `acme` program, the runtime can safely treat the transaction's -signature as a signature of a `token` instruction. When the runtime sees the -`token` instruction references `alice_pubkey`, it looks up the key in the `acme` -instruction to see if that key corresponds to a signed account. In this case, it -does and thereby authorizes the `token` program to modify Alice's account. - -### Program signed accounts - -Programs can issue instructions that contain signed accounts that were not -signed in the original transaction by using [Program derived -addresses](#program-derived-addresses). - -To sign an account with program derived addresses, a program may -`invoke_signed()`. - -```rust,ignore - invoke_signed( - &instruction, - accounts, - &[&["First addresses seed"], - &["Second addresses first seed", "Second addresses second seed"]], - )?; -``` - -### Call Depth - -Cross-program invocations allow programs to invoke other programs directly, but -the depth is constrained currently to 4. - -### Reentrancy - -Reentrancy is currently limited to direct self recursion, capped at a fixed -depth. This restriction prevents situations where a program might invoke another -from an intermediary state without the knowledge that it might later be called -back into. Direct recursion gives the program full control of its state at the -point that it gets called back. - -## Program Derived Addresses - -Program derived addresses allow programmatically generated signatures to be used -when [calling between programs](#cross-program-invocations). - -Using a program derived address, a program may be given the authority over an -account and later transfer that authority to another. This is possible because -the program can act as the signer in the transaction that gives authority. - -For example, if two users want to make a wager on the outcome of a game in -Solana, they must each transfer their wager's assets to some intermediary that -will honor their agreement. Currently, there is no way to implement this -intermediary as a program in Solana because the intermediary program cannot -transfer the assets to the winner. - -This capability is necessary for many DeFi applications since they require -assets to be transferred to an escrow agent until some event occurs that -determines the new owner. - -- Decentralized Exchanges that transfer assets between matching bid and ask - orders. - -- Auctions that transfer assets to the winner. - -- Games or prediction markets that collect and redistribute prizes to the - winners. - -Program derived address: - -1. Allow programs to control specific addresses, called program addresses, in - such a way that no external user can generate valid transactions with - signatures for those addresses. - -2. Allow programs to programmatically sign for program addresses that are - present in instructions invoked via [Cross-Program Invocations](#cross-program-invocations). - -Given the two conditions, users can securely transfer or assign the authority of -on-chain assets to program addresses, and the program can then assign that -authority elsewhere at its discretion. - -### Private keys for program addresses - -A program address does not lie on the ed25519 curve and therefore has no valid -private key associated with it, and thus generating a signature for it is -impossible. While it has no private key of its own, it can be used by a program -to issue an instruction that includes the program address as a signer. - -### Hash-based generated program addresses - -Program addresses are deterministically derived from a collection of seeds and a -program id using a 256-bit pre-image resistant hash function. Program address -must not lie on the ed25519 curve to ensure there is no associated private key. -During generation, an error will be returned if the address is found to lie on -the curve. There is about a 50/50 chance of this happening for a given -collection of seeds and program id. If this occurs a different set of seeds or -a seed bump (additional 8 bit seed) can be used to find a valid program address -off the curve. - -Deterministic program addresses for programs follow a similar derivation path as -Accounts created with `SystemInstruction::CreateAccountWithSeed` which is -implemented with `Pubkey::create_with_seed`. - -For reference, that implementation is as follows: - -```rust,ignore -pub fn create_with_seed( - base: &Pubkey, - seed: &str, - program_id: &Pubkey, -) -> Result { - if seed.len() > MAX_ADDRESS_SEED_LEN { - return Err(SystemError::MaxSeedLengthExceeded); - } - - Ok(Pubkey::new( - hashv(&[base.as_ref(), seed.as_ref(), program_id.as_ref()]).as_ref(), - )) -} -``` - -Programs can deterministically derive any number of addresses by using seeds. -These seeds can symbolically identify how the addresses are used. - -From `Pubkey`:: - -```rust,ignore -/// Generate a derived program address -/// * seeds, symbolic keywords used to derive the key -/// * program_id, program that the address is derived for -pub fn create_program_address( - seeds: &[&[u8]], - program_id: &Pubkey, -) -> Result - -/// Find a valid off-curve derived program address and its bump seed -/// * seeds, symbolic keywords used to derive the key -/// * program_id, program that the address is derived for -pub fn find_program_address( - seeds: &[&[u8]], - program_id: &Pubkey, -) -> Option<(Pubkey, u8)> { - let mut bump_seed = [std::u8::MAX]; - for _ in 0..std::u8::MAX { - let mut seeds_with_bump = seeds.to_vec(); - seeds_with_bump.push(&bump_seed); - if let Ok(address) = create_program_address(&seeds_with_bump, program_id) { - return Some((address, bump_seed[0])); - } - bump_seed[0] -= 1; - } - None -} -``` - - **Warning**: Because of the way the seeds are hashed there is a potential for - program address collisions for the same program id. The seeds are hashed - sequentially which means that seeds {"abcdef"}, {"abc", "def"}, and {"ab", - "cd", "ef"} will all result in the same program address given the same program - id. Since the chance of collision is local to a given program id, the developer - of that program must take care to choose seeds that do not collide with each - other. For seed schemes that are susceptible to this type of hash collision, a - common remedy is to insert separators between seeds, e.g. transforming {"abc", - "def"} into {"abc", "-", "def"}. - -### Using program addresses - -Clients can use the `create_program_address` function to generate a destination -address. In this example, we assume that -`create_program_address(&[&["escrow"]], &escrow_program_id)` generates a valid -program address that is off the curve. - -```rust,ignore -// deterministically derive the escrow key -let escrow_pubkey = create_program_address(&[&["escrow"]], &escrow_program_id); - -// construct a transfer message using that key -let message = Message::new(vec![ - token_instruction::transfer(&alice_pubkey, &escrow_pubkey, 1), -]); - -// process the message which transfer one 1 token to the escrow -client.send_and_confirm_message(&[&alice_keypair], &message); -``` - -Programs can use the same function to generate the same address. In the function -below the program issues a `token_instruction::transfer` from a program address -as if it had the private key to sign the transaction. - -```rust,ignore -fn transfer_one_token_from_escrow( - program_id: &Pubkey, - accounts: &[AccountInfo], -) -> ProgramResult { - // User supplies the destination - let alice_pubkey = keyed_accounts[1].unsigned_key(); - - // Deterministically derive the escrow pubkey. - let escrow_pubkey = create_program_address(&[&["escrow"]], program_id); - - // Create the transfer instruction - let instruction = token_instruction::transfer(&escrow_pubkey, &alice_pubkey, 1); - - // The runtime deterministically derives the key from the currently - // executing program ID and the supplied keywords. - // If the derived address matches a key marked as signed in the instruction - // then that key is accepted as signed. - invoke_signed(&instruction, accounts, &[&["escrow"]]) -} -``` - -Note that the address generated using `create_program_address` is not guaranteed -to be a valid program address off the curve. For example, let's assume that the -seed `"escrow2"` does not generate a valid program address. - -To generate a valid program address using `"escrow2"` as a seed, use -`find_program_address`, iterating through possible bump seeds until a valid -combination is found. The preceding example becomes: - -```rust,ignore -// find the escrow key and valid bump seed -let (escrow_pubkey2, escrow_bump_seed) = find_program_address(&[&["escrow2"]], &escrow_program_id); - -// construct a transfer message using that key -let message = Message::new(vec![ - token_instruction::transfer(&alice_pubkey, &escrow_pubkey2, 1), -]); - -// process the message which transfer one 1 token to the escrow -client.send_and_confirm_message(&[&alice_keypair], &message); -``` - -Within the program, this becomes: - -```rust,ignore -fn transfer_one_token_from_escrow2( - program_id: &Pubkey, - accounts: &[AccountInfo], -) -> ProgramResult { - // User supplies the destination - let alice_pubkey = keyed_accounts[1].unsigned_key(); - - // Iteratively derive the escrow pubkey - let (escrow_pubkey2, bump_seed) = find_program_address(&[&["escrow2"]], program_id); - - // Create the transfer instruction - let instruction = token_instruction::transfer(&escrow_pubkey2, &alice_pubkey, 1); - - // Include the generated bump seed to the list of all seeds - invoke_signed(&instruction, accounts, &[&["escrow2", &[bump_seed]]]) -} -``` - -Since `find_program_address` requires iterating over a number of calls to -`create_program_address`, it may use more -[compute budget](developing/programming-model/runtime.md#compute-budget) when -used on-chain. To reduce the compute cost, use `find_program_address` off-chain -and pass the resulting bump seed to the program. - -### Instructions that require signers - -The addresses generated with `create_program_address` and `find_program_address` -are indistinguishable from any other public key. The only way for the runtime to -verify that the address belongs to a program is for the program to supply the -seeds used to generate the address. - -The runtime will internally call `create_program_address`, and compare the -result against the addresses supplied in the instruction. - -## Examples - -Refer to [Developing with -Rust](developing/on-chain-programs/../../../on-chain-programs/developing-rust.md#examples) -and [Developing with -C](developing/on-chain-programs/../../../on-chain-programs/developing-c.md#examples) -for examples of how to use cross-program invocation. diff --git a/docs/src/developing/programming-model/overview.md b/docs/src/developing/programming-model/overview.md deleted file mode 100644 index 43375b5292cc0a..00000000000000 --- a/docs/src/developing/programming-model/overview.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: "Overview" ---- - -An [app](terminology.md#app) interacts with a Solana cluster by sending it -[transactions](transactions.md) with one or more -[instructions](transactions.md#instructions). The Solana [runtime](runtime.md) -passes those instructions to [programs](terminology.md#program) deployed by app -developers beforehand. An instruction might, for example, tell a program to -transfer [lamports](terminology.md#lamport) from one [account](accounts.md) to -another or create an interactive contract that governs how lamports are -transferred. Instructions are executed sequentially and atomically for each -transaction. If any instruction is invalid, all account changes in the -transaction are discarded. - -To start developing immediately you can build, deploy, and run one of the -[examples](developing/on-chain-programs/examples.md). diff --git a/docs/src/developing/programming-model/runtime.md b/docs/src/developing/programming-model/runtime.md deleted file mode 100644 index ab95bef8dd04e2..00000000000000 --- a/docs/src/developing/programming-model/runtime.md +++ /dev/null @@ -1,172 +0,0 @@ ---- -title: "Runtime" ---- - -## Capability of Programs - -The runtime only permits the owner program to debit the account or modify its -data. The program then defines additional rules for whether the client can -modify accounts it owns. In the case of the System program, it allows users to -transfer lamports by recognizing transaction signatures. If it sees the client -signed the transaction using the keypair's _private key_, it knows the client -authorized the token transfer. - -In other words, the entire set of accounts owned by a given program can be -regarded as a key-value store, where a key is the account address and value is -program-specific arbitrary binary data. A program author can decide how to -manage the program's whole state, possibly as many accounts. - -After the runtime executes each of the transaction's instructions, it uses the -account metadata to verify that the access policy was not violated. If a program -violates the policy, the runtime discards all account changes made by all -instructions in the transaction, and marks the transaction as failed. - -### Policy - -After a program has processed an instruction, the runtime verifies that the -program only performed operations it was permitted to, and that the results -adhere to the runtime policy. - -The policy is as follows: - -- Only the owner of the account may change owner. - - And only if the account is writable. - - And only if the account is not executable. - - And only if the data is zero-initialized or empty. -- An account not assigned to the program cannot have its balance decrease. -- The balance of read-only and executable accounts may not change. -- Only the owner may change account size and data. - - And if the account is writable. - - And if the account is not executable. -- Executable is one-way (false->true) and only the account owner may set it. -- No one can make modifications to the rent_epoch associated with this account. - -## Balancing the balances - -Before and after each instruction, the sum of all account balances must stay the same. -E.g. if one account's balance is increased, another's must be decreased by the same amount. -Because the runtime can not see changes to accounts which were not passed to it, -all accounts for which the balances were modified must be passed, -even if they are not needed in the called instruction. - -## Compute Budget - -To prevent abuse of computational resources, each transaction is allocated a -compute budget. The budget specifies a maximum number of compute units that a -transaction can consume, the costs associated with different types of operations -the transaction may perform, and operational bounds the transaction must adhere -to. - -As the transaction is processed compute units are consumed by its -instruction's programs performing operations such as executing SBF instructions, -calling syscalls, etc... When the transaction consumes its entire budget, or -exceeds a bound such as attempting a call stack that is too deep, or loaded -account data size exceeds limit, the runtime halts the transaction processing and -returns an error. - -The following operations incur a compute cost: - -- Executing SBF instructions -- Passing data between programs -- Calling system calls - - logging - - creating program addresses - - cross-program invocations - - ... - -For cross-program invocations, the instructions invoked inherit the budget of -their parent. If an invoked instruction consumes the transactions remaining -budget, or exceeds a bound, the entire invocation chain and the top level -transaction processing are halted. - -The current [compute -budget](https://github.com/solana-labs/solana/blob/090e11210aa7222d8295610a6ccac4acda711bb9/program-runtime/src/compute_budget.rs#L26-L87) can be found in the Solana Program Runtime. - -#### Example Compute Budget - -For example, if the compute budget set in the Solana runtime is: - -```rust -max_units: 1,400,000, -log_u64_units: 100, -create_program address units: 1500, -invoke_units: 1000, -max_invoke_stack_height: 5, -max_instruction_trace_length: 64, -max_call_depth: 64, -stack_frame_size: 4096, -log_pubkey_units: 100, -... -``` - -Then any transaction: - -- Could execute 1,400,000 SBF instructions, if it did nothing else. -- Cannot exceed 4k of stack usage. -- Cannot exceed a SBF call depth of 64. -- Cannot exceed invoke stack height of 5 (4 levels of cross-program invocations). - -> **NOTE:** Since the compute budget is consumed incrementally as the transaction executes, -> the total budget consumption will be a combination of the various costs of the -> operations it performs. - -At runtime a program may log how much of the compute budget remains. See -[debugging](developing/on-chain-programs/debugging.md#monitoring-compute-budget-consumption) -for more information. - -### Prioritization fees - -As part of the Compute Budget, the runtime supports transactions including an -**optional** fee to prioritize itself against others known as a -[prioritization fee](./../../transaction_fees.md#prioritization-fee). - -This _prioritization fee_ is calculated by multiplying the number -of _compute units_ by the _compute unit price_ (measured in micro-lamports). -These values may be set via the Compute Budget instructions `SetComputeUnitLimit` -and `SetComputeUnitPrice` once per transaction. - -:::info -You can learn more of the specifics of _how_ and _when_ to set a prioritization fee -on the [transaction fees](./../../transaction_fees.md#prioritization-fee) page. -::: - -### Accounts data size limit - -A transaction should request the maximum bytes of accounts data it is -allowed to load by including a `SetLoadedAccountsDataSizeLimit` instruction, requested -limit is capped by `MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES`. If no -`SetLoadedAccountsDataSizeLimit` is provided, the transaction is defaulted to -have limit of `MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES`. - -The `ComputeBudgetInstruction::set_loaded_accounts_data_size_limit` function can be used -to create this instruction: - -```rust -let instruction = ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(100_000); -``` - -## New Features - -As Solana evolves, new features or patches may be introduced that changes the -behavior of the cluster and how programs run. Changes in behavior must be -coordinated between the various nodes of the cluster. If nodes do not -coordinate, then these changes can result in a break-down of consensus. Solana -supports a mechanism called runtime features to facilitate the smooth adoption -of changes. - -Runtime features are epoch coordinated events where one or more behavior changes -to the cluster will occur. New changes to Solana that will change behavior are -wrapped with feature gates and disabled by default. The Solana tools are then -used to activate a feature, which marks it pending, once marked pending the -feature will be activated at the next epoch. - -To determine which features are activated use the [Solana command-line -tools](cli/install-solana-cli-tools.md): - -```bash -solana feature status -``` - -If you encounter problems, first ensure that the Solana tools version you are -using match the version returned by `solana cluster-version`. If they do not -match, [install the correct tool suite](cli/install-solana-cli-tools.md). diff --git a/docs/src/developing/programming-model/transactions.md b/docs/src/developing/programming-model/transactions.md deleted file mode 100644 index 1f88d4117b6e1e..00000000000000 --- a/docs/src/developing/programming-model/transactions.md +++ /dev/null @@ -1,224 +0,0 @@ ---- -title: "Transactions" -description: "A Solana transaction consists of one or more instructions, an array of accounts to read and write data from, and one or more signatures." ---- - -On the Solana blockchain, program execution begins with a [transaction](./../../terminology.md#transaction) being submitted to the cluster. With each transaction consisting of one or many [instructions](./../../terminology.md#instruction), the runtime will process each of the instructions contained within the transaction, in order, and atomically. If any part of an instruction fails, then the entire transaction will fail. - -## Overview of a Transaction - -On Solana, clients update the runtime (for example, debiting an account) by submitting a transaction to the cluster. - -This transaction consists of three parts: - -- one or more instructions -- an array of accounts to read or write from -- one or more signatures - -An [instruction](./../../terminology.md#instruction) is the smallest execution logic on Solana. Instructions are basically a call to update the global Solana state. Instructions invoke programs that make calls to the Solana runtime to update the state (for example, calling the token program to transfer tokens from your account to another account). - -[Programs](./../intro/programs.md) on Solana don’t store data/state; rather, data/state is stored in accounts. - -[Signatures](./../../terminology.md#signature) verify that we have the authority to read or write data to the accounts that we list. - -## Anatomy of a Transaction - -This section covers the binary format of a transaction. - -### Transaction Format - -A transaction contains a [compact-array](#compact-array-format) of signatures, -followed by a [message](#message-format). Each item in the signatures array is -a [digital signature](#signature-format) of the given message. The Solana -runtime verifies that the number of signatures matches the number in the first -8 bits of the [message header](#message-header-format). It also verifies that -each signature was signed by the private key corresponding to the public key at -the same index in the message's account addresses array. - -#### Signature Format - -Each digital signature is in the ed25519 binary format and consumes 64 bytes. - -### Message Format - -A message contains a [header](#message-header-format), followed by a -compact-array of [account addresses](#account-addresses-format), followed by a -recent [blockhash](#blockhash-format), followed by a compact-array of -[instructions](#instruction-format). - -#### Message Header Format - -The message header contains three unsigned 8-bit values. The first value is the -number of required signatures in the containing transaction. The second value -is the number of those corresponding account addresses that are read-only. The -third value in the message header is the number of read-only account addresses -not requiring signatures. - -#### Account Addresses Format - -The addresses that require signatures appear at the beginning of the account -address array, with addresses requesting read-write access first, and read-only -accounts following. The addresses that do not require signatures follow the -addresses that do, again with read-write accounts first and read-only accounts -following. - -#### Blockhash Format - -A blockhash contains a 32-byte SHA-256 hash. It is used to indicate when a -client last observed the ledger. Validators will reject transactions when the -blockhash is too old. - -### Instruction Format - -An instruction contains a program id index, followed by a compact-array of -account address indexes, followed by a compact-array of opaque 8-bit data. The -program id index is used to identify an on-chain program that can interpret the -opaque data. The program id index is an unsigned 8-bit index to an account -address in the message's array of account addresses. The account address -indexes are each an unsigned 8-bit index into that same array. - -### Compact-Array Format - -A compact-array is serialized as the array length, followed by each array item. -The array length is a special multi-byte encoding called compact-u16. - -#### Compact-u16 Format - -A compact-u16 is a multi-byte encoding of 16 bits. The first byte contains the -lower 7 bits of the value in its lower 7 bits. If the value is above 0x7f, the -high bit is set and the next 7 bits of the value are placed into the lower 7 -bits of a second byte. If the value is above 0x3fff, the high bit is set and -the remaining 2 bits of the value are placed into the lower 2 bits of a third -byte. - -### Account Address Format - -An account address is 32-bytes of arbitrary data. When the address requires a -digital signature, the runtime interprets it as the public key of an ed25519 -keypair. - -## Instructions - -Each [instruction](terminology.md#instruction) specifies a single program, a -subset of the transaction's accounts that should be passed to the program, and a -data byte array that is passed to the program. The program interprets the data -array and operates on the accounts specified by the instructions. The program -can return successfully, or with an error code. An error return causes the -entire transaction to fail immediately. - -Programs typically provide helper functions to construct instructions they -support. For example, the system program provides the following Rust helper to -construct a -[`SystemInstruction::CreateAccount`](https://github.com/solana-labs/solana/blob/6606590b8132e56dab9e60b3f7d20ba7412a736c/sdk/program/src/system_instruction.rs#L63) -instruction: - -```rust -pub fn create_account( - from_pubkey: &Pubkey, - to_pubkey: &Pubkey, - lamports: u64, - space: u64, - owner: &Pubkey, -) -> Instruction { - let account_metas = vec![ - AccountMeta::new(*from_pubkey, true), - AccountMeta::new(*to_pubkey, true), - ]; - Instruction::new_with_bincode( - system_program::id(), - &SystemInstruction::CreateAccount { - lamports, - space, - owner: *owner, - }, - account_metas, - ) -} -``` - -Which can be found here: - -https://github.com/solana-labs/solana/blob/6606590b8132e56dab9e60b3f7d20ba7412a736c/sdk/program/src/system_instruction.rs#L220 - -### Program Id - -The instruction's [program id](./../../terminology.md#program-id) specifies which -program will process this instruction. The program's account's owner specifies -which loader should be used to load and execute the program, and the data -contains information about how the runtime should execute the program. - -In the case of [on-chain SBF programs](./../on-chain-programs/overview.md), -the owner is the SBF Loader and the account data holds the BPF bytecode. Program -accounts are permanently marked as executable by the loader once they are -successfully deployed. The runtime will reject transactions that specify programs -that are not executable. - -Unlike on-chain programs, [Native Programs](../runtime-facilities/programs.md) -are handled differently in that they are built directly into the Solana runtime. - -### Accounts - -The accounts referenced by an instruction represent on-chain state and serve as -both the inputs and outputs of a program. More information about accounts can be -found in the [Accounts](./accounts.md) section. - -### Instruction data - -Each instruction carries a general purpose byte array that is passed to the -program along with the accounts. The contents of the instruction data is program -specific and typically used to convey what operations the program should -perform, and any additional information those operations may need above and -beyond what the accounts contain. - -Programs are free to specify how information is encoded into the instruction -data byte array. The choice of how data is encoded should consider the -overhead of decoding, since that step is performed by the program on-chain. It's -been observed that some common encodings (Rust's bincode for example) are very -inefficient. - -The [Solana Program Library's Token -program](https://github.com/solana-labs/solana-program-library/tree/master/token) -gives one example of how instruction data can be encoded efficiently, but note -that this method only supports fixed sized types. Token utilizes the -[Pack](https://github.com/solana-labs/solana/blob/master/sdk/program/src/program_pack.rs) -trait to encode/decode instruction data for both token instructions as well as -token account states. - -### Multiple instructions in a single transaction - -A transaction can contain instructions in any order. This means a malicious -user could craft transactions that may pose instructions in an order that the -program has not been protected against. Programs should be hardened to properly -and safely handle any possible instruction sequence. - -One not so obvious example is account deinitialization. Some programs may -attempt to deinitialize an account by setting its lamports to zero, with the -assumption that the runtime will delete the account. This assumption may be -valid between transactions, but it is not between instructions or cross-program -invocations. To harden against this, the program should also explicitly zero out the -account's data. - -An example of where this could be a problem is if a token program, upon -transferring the token out of an account, sets the account's lamports to zero, -assuming it will be deleted by the runtime. If the program does not zero out the -account's data, a malicious user could trail this instruction with another that -transfers the tokens a second time. - -## Signatures - -Each transaction explicitly lists all account public keys referenced by the -transaction's instructions. A subset of those public keys are each accompanied -by a transaction signature. Those signatures signal on-chain programs that the -account holder has authorized the transaction. Typically, the program uses the -authorization to permit debiting the account or modifying its data. More -information about how the authorization is communicated to a program can be -found in [Accounts](./accounts.md#signers) - -## Recent Blockhash - -A transaction includes a recent [blockhash](../../terminology.md#blockhash) to prevent -duplication and to give transactions lifetimes. Any transaction that is -completely identical to a previous one is rejected, so adding a newer blockhash -allows multiple transactions to repeat the exact same action. Transactions also -have lifetimes that are defined by the blockhash, as any transaction whose -blockhash is too old will be rejected. diff --git a/docs/src/developing/transaction_confirmation.md b/docs/src/developing/transaction_confirmation.md deleted file mode 100644 index caed1005d7043c..00000000000000 --- a/docs/src/developing/transaction_confirmation.md +++ /dev/null @@ -1,197 +0,0 @@ ---- -title: "Transaction Confirmation" ---- - -Problems relating to [transaction confirmation](./../terminology.md#transaction-confirmations) are common with many newer developers while building applications. This article aims to boost the overall understanding of the confirmation mechanism used on the Solana blockchain, including some recommended best practices. - -## Brief background on transactions - -Let’s first make sure we’re all on the same page and thinking about the same things... - -### What is a transaction? - -Transactions consist of two components: a [message](./../terminology.md#message) and a [list of signatures](./../terminology.md#signature). The transaction message is where the magic happens and at a high level it consists of three components: - -- a **list of instructions** to invoke, -- a **list of accounts** to load, and -- a **“recent blockhash.”** - -In this article, we’re going to be focusing a lot on a transaction’s [recent blockhash](./../terminology.md#blockhash) because it plays a big role in transaction confirmation. - -### Transaction lifecycle refresher - -Below is a high level view of the lifecycle of a transaction. This article will touch on everything except steps 1 and 4. - -1. Create a list of instructions along with the list of accounts that instructions need to read and write -2. Fetch a recent blockhash and use it to prepare a transaction message -3. Simulate the transaction to ensure it behaves as expected -4. Prompt user to sign the prepared transaction message with their private key -5. Send the transaction to an RPC node which attempts to forward it to the current block producer -6. Hope that a block producer validates and commits the transaction into their produced block -7. Confirm the transaction has either been included in a block or detect when it has expired - -## What is a Blockhash? - -A [“blockhash”](./../terminology.md#blockhash) refers to the last Proof of History (PoH) hash for a [“slot”](./../terminology.md#slot) (description below). Since Solana uses PoH as a trusted clock, a transaction’s recent blockhash can be thought of as a **timestamp**. - -### Proof of History refresher - -Solana’s Proof of History mechanism uses a very long chain of recursive SHA-256 hashes to build a trusted clock. The “history” part of the name comes from the fact that block producers hash transaction id’s into the stream to record which transactions were processed in their block. - -[PoH hash calculation](https://github.com/solana-labs/solana/blob/9488a73f5252ad0d7ea830a0b456d9aa4bfbb7c1/entry/src/poh.rs#L82): `next_hash = hash(prev_hash, hash(transaction_ids))` - -PoH can be used as a trusted clock because each hash must be produced sequentially. Each produced block contains a blockhash and a list of hash checkpoints called “ticks” so that validators can verify the full chain of hashes in parallel and prove that some amount of time has actually passed. The stream of hashes can be broken up into the following time units: - -# Transaction Expiration - -By default, all Solana transactions will expire if not committed to a block in a certain amount of time. The **vast majority** of transaction confirmation issues are related to how RPC nodes and validators detect and handle **expired** transactions. A solid understanding of how transaction expiration works should help you diagnose the bulk of your transaction confirmation issues. - -## How does transaction expiration work? - -Each transaction includes a “recent blockhash” which is used as a PoH clock timestamp and expires when that blockhash is no longer “recent” enough. More concretely, Solana validators look up the corresponding slot number for each transaction’s blockhash that they wish to process in a block. If the validator [can’t find a slot number for the blockhash](https://github.com/solana-labs/solana/blob/9488a73f5252ad0d7ea830a0b456d9aa4bfbb7c1/runtime/src/bank.rs#L3687) or if the looked up slot number is more than 151 slots lower than the slot number of the block being processed, the transaction will be rejected. - -Slots are configured to last about [400ms](https://github.com/solana-labs/solana/blob/47b938e617b77eb3fc171f19aae62222503098d7/sdk/program/src/clock.rs#L12) but often fluctuate between 400ms and 600ms, so a given blockhash can only be used by transactions for about 60 to 90 seconds. - -Transaction has expired pseudocode: `currentBankSlot > slotForTxRecentBlockhash + 151` - -Transaction not expired pseudocode: `currentBankSlot - slotForTxRecentBlockhash < 152` - -### Example of transaction expiration - -Let’s walk through a quick example: - -1. A validator is producing a new block for slot #1000 -2. The validator receives a transaction with recent blockhash `1234...` from a user -3. The validator checks the `1234...` blockhash against the list of recent blockhashes leading up to its new block and discovers that it was the blockhash for slot #849 -4. Since slot #849 is exactly 151 slots lower than slot #1000, the transaction hasn’t expired yet and can still be processed! -5. But wait, before actually processing the transaction, the validator finished the block for slot #1000 and starts producing the block for slot #1001 (validators get to produce blocks for 4 consecutive slots). -6. The validator checks the same transaction again and finds that it’s now too old and drops it because it’s now 152 slots lower than the current slot :( - -## Why do transactions expire? - -There’s a very good reason for this actually, it’s to help validators avoid processing the same transaction twice. - -A naive brute force approach to prevent double processing could be to check every new transaction against the blockchain’s entire transaction history. But by having transactions expire after a short amount of time, validators only need to check if a new transaction is in a relatively small set of _recently_ processed transactions. - -### Other blockchains - -Solana’s approach of prevent double processing is quite different from other blockchains. For example, Ethereum tracks a counter (nonce) for each transaction sender and will only process transactions that use the next valid nonce. - -Ethereum’s approach is simple for validators to implement, but it can be problematic for users. Many people have encountered situations when their Ethereum transactions got stuck in a _pending_ state for a long time and all the later transactions, which used higher nonce values, were blocked from processing. - -### Advantages on Solana - -There are a few advantages to Solana’s approach: - -1. A single fee payer can submit multiple transactions at the same time that are allowed to be processed in any order. This might happen if you’re using multiple applications at the same time. -2. If a transaction doesn’t get committed to a block and expires, users can try again knowing that their previous transaction won’t ever be processed. - -By not using counters, the Solana wallet experience may be easier for users to understand because they can get to success, failure, or expiration states quickly and avoid annoying pending states. - -### Disadvantages on Solana - -Of course there are some disadvantages too: - -1. Validators have to actively track a set of all processed transaction id’s to prevent double processing. -2. If the expiration time period is too short, users might not be able to submit their transaction before it expires. - -These disadvantages highlight a tradeoff in how transaction expiration is configured. If the expiration time of a transaction is increased, validators need to use more memory to track more transactions. If expiration time is decreased, users don’t have enough time to submit their transaction. - -Currently, Solana clusters require that transactions use blockhashes that are no more than [151 slots](https://github.com/solana-labs/solana/blob/9488a73f5252ad0d7ea830a0b456d9aa4bfbb7c1/sdk/program/src/clock.rs#L65) old. - -> This [Github issue](https://github.com/solana-labs/solana/issues/23582) contains some calculations that estimate that mainnet-beta validators need about 150MB of memory to track transactions. -> This could be slimmed down in the future if necessary without decreasing expiration time as I’ve detailed in that issue. - -## Transaction confirmation tips - -As mentioned before, blockhashes expire after a time period of only 151 slots which can pass as quickly as **one minute** when slots are processed within the target time of 400ms. - -One minute is not a lot of time considering that a client needs to fetch a recent blockhash, wait for the user to sign, and finally hope that the broadcasted transaction reaches a leader that is willing to accept it. Let’s go through some tips to help avoid confirmation failures due to transaction expiration! - -### Fetch blockhashes with the appropriate commitment level - -Given the short expiration time frame, it’s imperative that clients help users create transactions with blockhash that is as recent as possible. - -When fetching blockhashes, the current recommended RPC API is called [`getLatestBlockhash`](/api/http#getlatestblockhash). By default, this API uses the `"finalized"` commitment level to return the most recently finalized block’s blockhash. However, you can override this behavior by [setting the `commitment` parameter](/api/http#configuring-state-commitment) to a different commitment level. - -**Recommendation** - -The `"confirmed"` commitment level should almost always be used for RPC requests because it’s usually only a few slots behind the `"processed"` commitment and has a very low chance of belonging to a dropped [fork](./../cluster/fork-generation.md). - -But feel free to consider the other options: - -- Choosing `"processed"` will let you fetch the most recent blockhash compared to other commitment levels and therefore gives you the most time to prepare and process a transaction. But due to the prevalence of forking in the Solana protocol, roughly 5% of blocks don’t end up being finalized by the cluster so there’s a real chance that your transaction uses a blockhash that belongs to a dropped fork. Transactions that use blockhashes for abandoned blocks won’t ever be considered recent by any blocks that are in the finalized blockchain. -- Using the default commitment level `"finalized"` will eliminate any risk that the blockhash you choose will belong to a dropped fork. The tradeoff is that there is typically at least a 32 slot difference between the most recent confirmed block and the most recent finalized block. This tradeoff is pretty severe and effectively reduces the expiration of your transactions by about 13 seconds but this could be even more during unstable cluster conditions. - -### Use an appropriate preflight commitment level - -If your transaction uses a blockhash that was fetched from one RPC node then you send, or simulate, that transaction with a different RPC node, you could run into issues due to one node lagging behind the other. - -When RPC nodes receive a `sendTransaction` request, they will attempt to determine the expiration block of your transaction using the most recent finalized block or with the block selected by the `preflightCommitment` parameter. A **VERY** common issue is that a received transaction’s blockhash was produced after the block used to calculate the expiration for that transaction. If an RPC node can’t determine when your transaction expires, it will only forward your transaction **one time** and then will **drop** the transaction. - -Similarly, when RPC nodes receive a `simulateTransaction` request, they will simulate your transaction using the most recent finalized block or with the block selected by the `preflightCommitment` parameter. If the block chosen for simulation is older than the block used for your transaction’s blockhash, the simulation will fail with the dreaded “blockhash not found” error. - -**Recommendation** - -Even if you use `skipPreflight`, **ALWAYS** set the `preflightCommitment` parameter to the same commitment level used to fetch your transaction’s blockhash for both `sendTransaction` and `simulateTransaction` requests. - -### Be wary of lagging RPC nodes when sending transactions - -When your application uses an RPC pool service or when the RPC endpoint differs between creating a transaction and sending a transaction, you need to be wary of situations where one RPC node is lagging behind the other. For example, if you fetch a transaction blockhash from one RPC node then you send that transaction to a second RPC node for forwarding or simulation, the second RPC node might be lagging behind the first. - -**Recommendation** - -For `sendTransaction` requests, clients should keep resending a transaction to a RPC node on a frequent interval so that if an RPC node is slightly lagging behind the cluster, it will eventually catch up and detect your transaction’s expiration properly. - -For `simulateTransaction` requests, clients should use the [`replaceRecentBlockhash`](/api/http#simulatetransaction) parameter to tell the RPC node to replace the simulated transaction’s blockhash with a blockhash that will always be valid for simulation. - -### Avoid reusing stale blockhashes - -Even if your application has fetched a very recent blockhash, be sure that you’re not reusing that blockhash in transactions for too long. The ideal scenario is that a recent blockhash is fetched right before a user signs their transaction. - -**Recommendation for applications** - -Poll for new recent blockhashes on a frequent basis to ensure that whenever a user triggers an action that creates a transaction, your application already has a fresh blockhash that’s ready to go. - -**Recommendation for wallets** - -Poll for new recent blockhashes on a frequent basis and replace a transaction’s recent blockhash right before they sign the transaction to ensure the blockhash is as fresh as possible. - -### Use healthy RPC nodes when fetching blockhashes - -By fetching the latest blockhash with the `"confirmed"` commitment level from an RPC node, it’s going to respond with the blockhash for the latest confirmed block that it’s aware of. Solana’s block propagation protocol prioritizes sending blocks to staked nodes so RPC nodes naturally lag about a block behind the rest of the cluster. They also have to do more work to handle application requests and can lag a lot more under heavy user traffic. - -Lagging RPC nodes can therefore respond to blockhash requests with blockhashes that were confirmed by the cluster quite awhile ago. By default, a lagging RPC node detects that it is more than 150 slots behind the cluster will stop responding to requests, but just before hitting that threshold they can still return a blockhash that is just about to expire. - -**Recommendation** - -Monitor the health of your RPC nodes to ensure that they have an up-to-date view of the cluster state with one of the following methods: - -1. Fetch your RPC node’s highest processed slot by using the [`getSlot`](/api/http#getslot) RPC API with the `"processed"` commitment level and then call the [`getMaxShredInsertSlot](/api/http#getmaxshredinsertslot) RPC API to get the highest slot that your RPC node has received a “shred” of a block for. If the difference between these responses is very large, the cluster is producing blocks far ahead of what the RPC node has processed. -2. Call the `getLatestBlockhash` RPC API with the `"confirmed"` commitment level on a few different RPC API nodes and use the blockhash from the node that returns the highest slot for its [context slot](/api/http#rpcresponse-structure). - -### Wait long enough for expiration - -**Recommendation** - -When calling [`getLatestBlockhash`](/api/http#getlatestblockhash) RPC API to get a recent blockhash for your transaction, take note of the `"lastValidBlockHeight"` in the response. - -Then, poll the [`getBlockHeight`](/api/http#getblockheight) RPC API with the “confirmed” commitment level until it returns a block height greater than the previously returned last valid block height. - -### Consider using “durable” transactions - -Sometimes transaction expiration issues are really hard to avoid (e.g. offline signing, cluster instability). If the previous tips are still not sufficient for your use-case, you can switch to using durable transactions (they just require a bit of setup). - -To start using durable transactions, a user first needs to submit a transaction that [invokes instructions that create a special on-chain “nonce” account](https://docs.rs/solana-program/latest/solana_program/system_instruction/fn.create_nonce_account.html) and stores a “durable blockhash” inside of it. At any point in the future (as long as the nonce account hasn’t been used yet), the user can create a durable transaction by following these 2 rules: - -1. The instruction list must start with an [“advance nonce” system instruction](https://docs.rs/solana-program/latest/solana_program/system_instruction/fn.advance_nonce_account.html) which loads their on-chain nonce account -2. The transaction’s blockhash must be equal to the durable blockhash stored by the on-chain nonce account - -Here’s how these transactions are processed by the Solana runtime: - -1. If the transaction’s blockhash is no longer “recent”, the runtime checks if the transaction’s instruction list begins with an “advance nonce” system instruction -2. If so, it then loads the nonce account specified by the “advance nonce” instruction -3. Then it checks that the stored durable blockhash matches the transaction’s blockhash -4. Lastly it makes sure to advance the nonce account’s stored blockhash to the latest recent blockhash to ensure that the same transaction can never be processed again - -For more details about how these durable transactions work, you can read the [original proposal](./../implemented-proposals/durable-tx-nonces.md) and [check out an example](./clients/javascript-reference#nonceaccount) in the Solana docs. diff --git a/docs/src/developing/versioned-transactions.md b/docs/src/developing/versioned-transactions.md deleted file mode 100644 index 8d942814d7310d..00000000000000 --- a/docs/src/developing/versioned-transactions.md +++ /dev/null @@ -1,149 +0,0 @@ ---- -title: Versioned Transactions -description: "" ---- - -[Versioned Transactions](./versioned-transactions.md) are the new transaction format that allow for additional functionality in the Solana runtime, including [Address Lookup Tables](./lookup-tables.md). - -While changes to [on chain](./on-chain-programs/overview.md) programs are **NOT** required to support the new functionality of versioned transactions (or for backwards compatibility), developers **WILL** need update their client side code to prevent [errors due to different transaction versions](#max-supported-transaction-version). - -## Current Transaction Versions - -The Solana runtime supports two transaction versions: - -- `legacy` - older transaction format with no additional benefit -- `0` - added support for [Address Lookup Tables](./lookup-tables.md) - -## Max supported transaction version - -All RPC requests that return a transaction **_should_** specify the highest version of transactions they will support in their application using the `maxSupportedTransactionVersion` option, including [`getBlock`](../api/http#getblock) and [`getTransaction`](../api/http#gettransaction). - -An RPC request will fail if a [Versioned Transaction](./versioned-transactions.md) is returned that is higher than the set `maxSupportedTransactionVersion`. (i.e. if a version `0` transaction is returned when `legacy` is selected) - -> WARNING: -> If no `maxSupportedTransactionVersion` value is set, then only `legacy` transactions will be allowed in the RPC response. Therefore, your RPC requests **WILL** fail if any version `0` transactions are returned. - -## How to set max supported version - -You can set the `maxSupportedTransactionVersion` using both the [`@solana/web3.js`](https://solana-labs.github.io/solana-web3.js/) library and JSON formatted requests directly to an RPC endpoint. - -### Using web3.js - -Using the [`@solana/web3.js`](https://solana-labs.github.io/solana-web3.js/) library, you can retrieve the most recent block or get a specific transaction: - -```js -// connect to the `devnet` cluster and get the current `slot` -const connection = new web3.Connection(web3.clusterApiUrl("devnet")); -const slot = await connection.getSlot(); - -// get the latest block (allowing for v0 transactions) -const block = await connection.getBlock(slot, { - maxSupportedTransactionVersion: 0, -}); - -// get a specific transaction (allowing for v0 transactions) -const getTx = await connection.getTransaction( - "3jpoANiFeVGisWRY5UP648xRXs3iQasCHABPWRWnoEjeA93nc79WrnGgpgazjq4K9m8g2NJoyKoWBV1Kx5VmtwHQ", - { - maxSupportedTransactionVersion: 0, - }, -); -``` - -### JSON requests to the RPC - -Using a standard JSON formatted POST request, you can set the `maxSupportedTransactionVersion` when retrieving a specific block: - -```bash -curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d \ -'{"jsonrpc": "2.0", "id":1, "method": "getBlock", "params": [430, { - "encoding":"json", - "maxSupportedTransactionVersion":0, - "transactionDetails":"full", - "rewards":false -}]}' -``` - -## How to create a Versioned Transaction - -Versioned transactions can be created similar to the older method of creating transactions. There are differences in using certain libraries that should be noted. - -Below is an example of how to create a Versioned Transaction, using the `@solana/web3.js` library, to send perform a SOL transfer between two accounts. - -#### Notes: - -- `payer` is a valid `Keypair` wallet, funded with SOL -- `toAccount` a valid `Keypair` - -Firstly, import the web3.js library and create a `connection` to your desired cluster. - -We then define the recent `blockhash` and `minRent` we will need for our transaction and the account: - -```js -const web3 = require("@solana/web3.js"); - -// connect to the cluster and get the minimum rent for rent exempt status -const connection = new web3.Connection(web3.clusterApiUrl("devnet")); -let minRent = await connection.getMinimumBalanceForRentExemption(0); -let blockhash = await connection - .getLatestBlockhash() - .then((res) => res.blockhash); -``` - -Create an `array` of all the `instructions` you desire to send in your transaction. In this example below, we are creating a simple SOL transfer instruction: - -```js -// create an array with your desired `instructions` -const instructions = [ - web3.SystemProgram.transfer({ - fromPubkey: payer.publicKey, - toPubkey: toAccount.publicKey, - lamports: minRent, - }), -]; -``` - -Next, construct a `MessageV0` formatted transaction message with your desired `instructions`: - -```js -// create v0 compatible message -const messageV0 = new web3.TransactionMessage({ - payerKey: payer.publicKey, - recentBlockhash: blockhash, - instructions, -}).compileToV0Message(); -``` - -Then, create a new `VersionedTransaction`, passing in our v0 compatible message: - -```js -const transaction = new web3.VersionedTransaction(messageV0); - -// sign your transaction with the required `Signers` -transaction.sign([payer]); -``` - -You can sign the transaction by either: - -- passing an array of `signatures` into the `VersionedTransaction` method, or -- call the `transaction.sign()` method, passing an array of the required `Signers` - -> NOTE: -> After calling the `transaction.sign()` method, all the previous transaction `signatures` will be fully replaced by new signatures created from the provided in `Signers`. - -After your `VersionedTransaction` has been signed by all required accounts, you can send it to the cluster and `await` the response: - -```js -// send our v0 transaction to the cluster -const txid = await connection.sendTransaction(transaction); -console.log(`https://explorer.solana.com/tx/${txid}?cluster=devnet`); -``` - -> NOTE: -> Unlike `legacy` transactions, sending a `VersionedTransaction` via `sendTransaction` does **NOT** support transaction signing via passing in an array of `Signers` as the second parameter. You will need to sign the transaction before calling `connection.sendTransaction()`. - -## More Resources - -- using [Versioned Transactions for Address Lookup Tables](./lookup-tables.md#how-to-create-an-address-lookup-table) -- view an [example of a v0 transaction](https://explorer.solana.com/tx/3jpoANiFeVGisWRY5UP648xRXs3iQasCHABPWRWnoEjeA93nc79WrnGgpgazjq4K9m8g2NJoyKoWBV1Kx5VmtwHQ/?cluster=devnet) on Solana Explorer -- read the [accepted proposal](./../proposals/versioned-transactions.md) for Versioned Transaction and Address Lookup Tables diff --git a/docs/src/economics_overview.md b/docs/src/economics_overview.md deleted file mode 100644 index 51997f07cc94c4..00000000000000 --- a/docs/src/economics_overview.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Solana Economics Overview ---- - -**Subject to change.** - -Solana’s crypto-economic system is designed to promote a healthy, long term self-sustaining economy with participant incentives aligned to the security and decentralization of the network. The main participants in this economy are validation-clients. Their contributions to the network, state validation, and their requisite incentive mechanisms are discussed below. - -The main channels of participant remittances are referred to as -protocol-based rewards and transaction fees. Protocol-based rewards -are generated from inflationary issuances from a protocol-defined inflation schedule. These rewards will constitute the total protocol-based reward delivered to validation clients, the remaining sourced from transaction fees. In the early days of the network, it is likely that protocol-based rewards, deployed based on predefined issuance schedule, will drive the majority of participant incentives to participate in the network. - -These protocol-based rewards are calculated per epoch and distributed across the active -delegated stake and validator set (per validator commission). As discussed further below, the per annum inflation rate is based on a pre-determined disinflationary schedule. This provides the network with supply predictability which supports long term economic stability and security. - -Transaction fees are participant-to-participant transfers, attached to network interactions as a motivation and compensation for the inclusion and execution of a proposed transaction. A mechanism for long-term economic stability and forking protection through partial burning of each transaction fee is also discussed below. - -First, an overview of the inflation design is presented. This section starts with defining and clarifying [Terminology](inflation/terminology.md) commonly used subsequently in the discussion of inflation and the related components. Following that, we outline Solana's proposed [Inflation Schedule](inflation/inflation_schedule.md), i.e. the specific parameters that uniquely parameterize the protocol-driven inflationary issuance over time. Next is a brief section on [Adjusted Staking Yield](inflation/adjusted_staking_yield.md), and how token dilution might influence staking behavior. - -An overview of [Transaction Fees](transaction_fees.md) on Solana is followed by a discussion of [Storage Rent Economics](storage_rent_economics.md) in which we describe an implementation of storage rent to account for the externality costs of maintaining the active state of the ledger. diff --git a/docs/src/validator/faq.md b/docs/src/faq.md similarity index 72% rename from docs/src/validator/faq.md rename to docs/src/faq.md index 4ae38152aefc14..35a17467f8c668 100644 --- a/docs/src/validator/faq.md +++ b/docs/src/faq.md @@ -5,23 +5,23 @@ sidebar_label: Frequently Asked Questions ### What is a validator? -A validator is a computer that runs a software program to verify transactions that are added to the Solana blockchain. A validator can be a voting validator or a non voting validator. To learn more, see [what is a validator](./overview/what-is-a-validator.md). +A validator is a computer that runs a software program to verify transactions that are added to the Solana blockchain. A validator can be a voting validator or a non voting validator. To learn more, see [what is a validator](./what-is-a-validator.md). ### What is an RPC node? -An RPC node is also a computer that runs the validator software. Typically, an RPC node does not vote on the network. Instead the RPC node's job is to respond to API requests. See [what is an rpc node](./overview/what-is-an-rpc-node.md) for more information. +An RPC node is also a computer that runs the validator software. Typically, an RPC node does not vote on the network. Instead the RPC node's job is to respond to API requests. See [what is an rpc node](./what-is-an-rpc-node.md) for more information. ### What is a cluster? -For a definition and an overview of the topic, see [what is a cluster?](../cluster/overview.md). Solana maintains several clusters. For details on each, see [Solana clusters](../clusters.md). +For a definition and an overview of the topic, see [what is a cluster?](./clusters/index.md). Solana maintains several clusters. For details on each, see [Solana clusters](./clusters/available.md). ### What is Proof of Stake? -Proof of Stake (PoS) is a blockchain architecture. Solana is a Proof of Stake blockchain. To read more, see [Proof of Stake](./overview/what-is-a-validator.md#proof-of-stake). +Proof of Stake (PoS) is a blockchain architecture. Solana is a Proof of Stake blockchain. To read more, see [Proof of Stake](./what-is-a-validator.md#proof-of-stake). ### What is Proof of Work? Is running a Solana validator the same as mining? -No, a Solana validator uses Proof of Stake. It does not use Proof of Work (often called mining). See [Proof of Work: For Contrast](./overview/what-is-a-validator.md#proof-of-stake). +No, a Solana validator uses Proof of Stake. It does not use Proof of Work (often called mining). See [Proof of Work: For Contrast](./what-is-a-validator.md#proof-of-stake). ### Who can operate a validator? @@ -33,18 +33,18 @@ No, all Solana clusters are permissionless. There is no limit to the number of ### What are the hardware requirements for running a validator? -See [validator requirements](../running-validator/validator-reqs.md). +See [validator requirements](./operations/requirements.md). ### Can I run my validator at home? Anyone can join the cluster including home users. You must make sure that your system can perform well and keep up with the cluster. Many home internet connections are not suitable to run a Solana validator. Most operators choose to operate their validator in a data center either by using a server provider or by supplying your own hardware at a colocation data center. -See the [validator requirements](../running-validator/validator-reqs.md) for more information. +See the [validator requirements](./operations/requirements.md) for more information. ### What skills does a Solana validator operator need? -See [Solana validator prerequisites](./overview/validator-prerequisites.md). +See [Solana validator prerequisites](./operations/prerequisites.md). ### What are the economics of running a validator? -See [economics of running a validator](./overview/running-validator-or-rpc-node.md#economics-of-running-a-consensus-validator). \ No newline at end of file +See [economics of running a validator](./operations/validator-or-rpc-node.md#economics-of-running-a-consensus-validator). \ No newline at end of file diff --git a/docs/src/getstarted/hello-world.md b/docs/src/getstarted/hello-world.md deleted file mode 100644 index beb8eacf5fc3e6..00000000000000 --- a/docs/src/getstarted/hello-world.md +++ /dev/null @@ -1,242 +0,0 @@ ---- -title: "Hello World Quickstart Guide" -description: 'This "hello world" quickstart guide will demonstrate how to setup, build, and deploy your first Solana program in your browser with Solana Playground.' -keywords: - - playground - - solana pg - - on chain - - rust - - native program - - tutorial - - intro to solana development - - blockchain developer - - blockchain tutorial - - web3 developer ---- - -For this "hello world" quickstart guide, we will use [Solana Playground](https://beta.solpg.io), a browser based IDE to develop and deploy our Solana program. To use it, you do **NOT** have to install any software on your computer. Simply open Solana Playground in your browser of choice, and you are ready to write and deploy Solana programs. - -## What you will learn - -- How to get started with Solana Playground -- How to create a Solana wallet on Playground -- How to program a basic Solana program in Rust -- How to build and deploy a Solana Rust program -- How to interact with your on chain program using JavaScript - -## Using Solana Playground - -[Solana Playground](https://beta.solpg.io) is browser based application that will let you write, build, and deploy on chain Solana programs. All from your browser. No installation needed. - -It is a great developer resource for getting started with Solana development, especially on Windows. - -### Import our example project - -In a new tab in your browser, open our example "_Hello World_" project on Solana Playground: https://beta.solpg.io/6314a69688a7fca897ad7d1d - -Next, import the project into your local workspace by clicking the "**Import**" icon and naming your project `hello_world`. - -![Import the get started Solana program on Solana Playground](/img/quickstarts/solana-get-started-import-on-playground.png) - -> If you do **not** import the program into **your** Solana Playground, then you will **not** be able to make changes to the code. But you **will** still be able to build and deploy the code to a Solana cluster. - -### Create a Playground wallet - -Normally with [local development](./local.md), you will need to create a file system wallet for use with the Solana CLI. But with the Solana Playground, you only need to click a few buttons to create a browser based wallet. - -:::caution -Your _Playground Wallet_ will be saved in your browser's local storage. Clearing your browser cache will remove your saved wallet. When creating a new wallet, you will have the option to save a local copy of your wallet's keypair file. -::: - -Click on the red status indicator button at the bottom left of the screen, (optionally) save your wallet's keypair file to your computer for backup, then click "**Continue**". - -After your Playground Wallet is created, you will notice the bottom of the window now states your wallet's address, your SOL balance, and the Solana cluster you are connected to (Devnet is usually the default/recommended, but a "localhost" [test validator](./local.md) is also acceptable). - -## Create a Solana program - -The code for your Rust based Solana program will live in your `src/lib.rs` file. Inside `src/lib.rs` you will be able to import your Rust crates and define your logic. Open your `src/lib.rs` file within Solana Playground. - -### Import the `solana_program` crate - -At the top of `lib.rs`, we import the `solana-program` crate and bring our needed items into the local namespace: - -```rust -use solana_program::{ - account_info::AccountInfo, - entrypoint, - entrypoint::ProgramResult, - pubkey::Pubkey, - msg, -}; -``` - -### Write your program logic - -Every Solana program must define an `entrypoint` that tells the Solana runtime where to start executing your on chain code. Your program's [entrypoint](../developing/on-chain-programs/developing-rust#program-entrypoint) should provide a public function named `process_instruction`: - -```rust -// declare and export the program's entrypoint -entrypoint!(process_instruction); - -// program entrypoint's implementation -pub fn process_instruction( - program_id: &Pubkey, - accounts: &[AccountInfo], - instruction_data: &[u8] -) -> ProgramResult { - // log a message to the blockchain - msg!("Hello, world!"); - - // gracefully exit the program - Ok(()) -} -``` - -Every on chain program should return the `Ok` [result enum](https://doc.rust-lang.org/std/result/) with a value of `()`. This tells the Solana runtime that your program executed successfully without errors. - -Our program above will simply [log a message](../developing/on-chain-programs/debugging#logging) of "_Hello, world!_" to the blockchain cluster, then gracefully exit with `Ok(())`. - -### Build your program - -On the left sidebar, select the "**Build & Deploy**" tab. Next, click the "Build" button. - -If you look at the Playground's terminal, you should see your Solana program begin to compile. Once complete, you will see a success message. - -![Viewing a successful build of your Rust based program](/img/quickstarts/solana-get-started-successful-build.png) - -:::caution -You may receive _warning_ when your program is compiled due to unused variables. Don't worry, these warning will not affect your build. They are due to our very simple program not using all the variables we declared in the `process_instruction` function. -::: - -### Deploy your program - -You can click the "Deploy" button to deploy your first program to the Solana blockchain. Specifically to your selected cluster (e.g. Devnet, Testnet, etc). - -After each deployment, you will see your Playground Wallet balance change. By default, Solana Playground will automatically request SOL airdrops on your behalf to ensure your wallet has enough SOL to cover the cost of deployment. - -> Note: -> If you need more SOL, you can airdrop more by typing airdrop command in the playground terminal: - -```sh -solana airdrop 2 -``` - -![Build and deploy your Solana program to the blockchain](/img/quickstarts/solana-get-started-build-and-deploy.png) - -### Find your program id - -When executing a program using [web3.js](../developing/clients/javascript-reference.md) or from [another Solana program](../developing/programming-model/calling-between-programs.md), you will need to provide the `program id` (aka public address of your program). - -Inside Solana Playground's **Build & Deploy** sidebar, you can find your `program id` under the **Program Credentials** dropdown. - -#### Congratulations! - -You have successfully setup, built, and deployed a Solana program using the Rust language directly in your browser. Next, we will demonstrate how to interact with your on chain program. - -## Interact with your on chain program - -Once you have successfully deployed a Solana program to the blockchain, you will want to be able to interact with that program. - -Like most developers creating dApps and websites, we will interact with our on chain program using JavaScript. Specifically, will use the open source [NPM package](https://www.npmjs.com/package/@solana/web3.js) `@solana/web3.js` to aid in our client application. - -:::info -This web3.js package is an abstraction layer on top of the [JSON RPC API](/api) that reduced the need for rewriting common boilerplate, helping to simplify your client side application code. -::: - -### Initialize client - -We will be using Solana Playground for the client generation. Create a client folder by running `run` command in the playground terminal: - -```bash -run -``` - -We have created `client` folder and a default `client.ts`. This is where we will work for the rest of our `hello world` program. - -### Playground globals - -In playground, there are many utilities that are globally available for us to use without installing or setting up anything. Most important ones for our `hello world` program are `web3` for `@solana/web3.js` and `pg` for Solana Playground utilities. - -:::info -You can go over all of the available globals by pressing `CTRL+SPACE` (or `CMD+SPACE` on macOS) inside the editor. -::: - -### Call the program - -To execute your on chain program, you must send a [transaction](../developing/programming-model/transactions.md) to it. Each transaction submitted to the Solana blockchain contains a listing of instructions (and the program's that instruction will interact with). - -Here we create a new transaction and add a single `instruction` to it: - -```js -// create an empty transaction -const transaction = new web3.Transaction(); - -// add a hello world program instruction to the transaction -transaction.add( - new web3.TransactionInstruction({ - keys: [], - programId: new web3.PublicKey(pg.PROGRAM_ID), - }), -); -``` - -Each `instruction` must include all the keys involved in the operation and the program ID we want to execute. In this example `keys` is empty because our program only logs `hello world` and doesn't need any accounts. - -With our transaction created, we can submit it to the cluster: - -```js -// send the transaction to the Solana cluster -console.log("Sending transaction..."); -const txHash = await web3.sendAndConfirmTransaction( - pg.connection, - transaction, - [pg.wallet.keypair], -); -console.log("Transaction sent with hash:", txHash); -``` - -:::info -The first signer in the signers array is the transaction fee payer by default. We are signing with our keypair `pg.wallet.keypair`. -::: - -### Run the application - -With the client application written, you can run the code via the same `run` command. - -Once your application completes, you will see output similar to this: - -```sh -Running client... - client.ts: - My address: GkxZRRNPfaUfL9XdYVfKF3rWjMcj5md6b6mpRoWpURwP - My balance: 5.7254472 SOL - Sending transaction... - Transaction sent with hash: 2Ra7D9JoqeNsax9HmNq6MB4qWtKPGcLwoqQ27mPYsPFh3h8wignvKB2mWZVvdzCyTnp7CEZhfg2cEpbavib9mCcq -``` - -### Get transaction logs - -We will be using `solana-cli` directly in playground to get the information about any transaction: - -```sh -solana confirm -v -``` - -Change `` with the hash you received from calling `hello world` program. - -You should see `Hello, world!` in the **Log Messages** section of the output. 🎉 - -#### Congratulations!!! - -You have now written a client application for your on chain program. You are now a Solana developer! - -PS: Try to update your program's message then re-build, re-deploy, and re-execute your program. - -## Next steps - -See the links below to learn more about writing Solana programs: - -- [Setup your local development environment](./local.md) -- [Overview of writing Solana programs](../developing/on-chain-programs/overview) -- [Learn more about developing Solana programs with Rust](../developing/on-chain-programs/developing-Rust) -- [Debugging on chain programs](../developing/on-chain-programs/debugging) diff --git a/docs/src/getstarted/local.md b/docs/src/getstarted/local.md deleted file mode 100644 index 3a4358cac20843..00000000000000 --- a/docs/src/getstarted/local.md +++ /dev/null @@ -1,166 +0,0 @@ ---- -title: "Local Development Quickstart" -description: "This quickstart guide will demonstrate how to quickly install and setup your local Solana development environment." -keywords: - - rust - - cargo - - toml - - program - - tutorial - - intro to solana development - - blockchain developer - - blockchain tutorial - - web3 developer ---- - -This quickstart guide will demonstrate how to quickly install and setup your local development environment, getting you ready to start developing and deploying Solana programs to the blockchain. - -## What you will learn - -- How to install the Solana CLI locally -- How to setup a localhost Solana cluster/validator -- How to create a Solana wallet for developing -- How to airdrop SOL tokens for your wallet - -## Install the Solana CLI - -To interact with the Solana network from your terminal, you will need to install the [Solana CLI tool suite](./../cli/install-solana-cli-tools) on your local system. - -
    -macOS / Linux / Windows Subsystem for Linux (WSL) -Open your favourite terminal application and install the CLI by running: - -```bash -sh -c "$(curl -sSfL https://release.solana.com/stable/install)" -``` - -Depending on your system, the end of the installer messaging may prompt you to - -```bash -Please update your PATH environment variable to include the solana programs: -``` - -If you get the above message, copy and paste the recommended command below it to update `PATH` - -Confirm you have the desired version of `solana` installed by running: - -```bash -solana --version -``` - -After a successful install, `solana-install update` may be used to easily update the Solana software to a newer version at any time. - -
    - -
    -Windows - -:::caution -[WSL](https://learn.microsoft.com/en-us/windows/wsl/install) is the recommended environment for Windows users. -::: - -- Open a Command Prompt (`cmd.exe`) as an Administrator - - - Search for Command Prompt in the Windows search bar. When the Command - Prompt app appears, right-click and select “Open as Administrator”. - If you are prompted by a pop-up window asking “Do you want to allow this app to - make changes to your device?”, click Yes. - -- Copy and paste the following command, then press Enter to download the Solana - installer into a temporary directory: - -```bash -cmd /c "curl https://release.solana.com/stable/solana-install-init-x86_64-pc-windows-msvc.exe --output C:\solana-install-tmp\solana-install-init.exe --create-dirs" -``` - -- Copy and paste the following command, then press Enter to install the latest - version of Solana. If you see a security pop-up by your system, please select - to allow the program to run. - -```bash -C:\solana-install-tmp\solana-install-init.exe stable -``` - -- When the installer is finished, press Enter. - -- Close the command prompt window and re-open a new command prompt window as a - normal user -- Confirm you have the desired version of `solana` installed by entering: - -```bash -solana --version -``` - -After a successful install, `solana-install update` may be used to easily update the Solana software to a newer version at any time. -
    - - -## Setup a localhost blockchain cluster - -The Solana CLI comes with the [test validator](./../developing/test-validator.md) built in. This command line tool will allow you to run a full blockchain cluster on your machine. - -```bash -solana-test-validator -``` - -> **PRO TIP:** -> Run the Solana test validator in a new/separate terminal window that will remain open. The command line program must remain running for your localhost cluster to remain online and ready for action. - -Configure your Solana CLI to use your localhost validator for all your future terminal commands: - -```bash -solana config set --url localhost -``` - -At any time, you can view your current Solana CLI configuration settings: - -```bash -solana config get -``` - -## Create a file system wallet - -To deploy a program with Solana CLI, you will need a Solana wallet with SOL tokens to pay for the cost of transactions. - -Let's create a simple file system wallet for testing: - -```bash -solana-keygen new -``` - -By default, the `solana-keygen` command will create a new file system wallet located at `~/.config/solana/id.json`. You can manually specify the output file location using the `--outfile /path` option. - -> **NOTE:** -> If you already have a file system wallet saved at the default location, this command will **NOT** override it (unless you explicitly force override using the `--force` flag). - -### Set your new wallet as default - -With your new file system wallet created, you must tell the Solana CLI to use this wallet to deploy and take ownership of your on chain program: - -```bash -solana config set -k ~/.config/solana/id.json -``` - -## Airdrop SOL tokens to your wallet - -Once your new wallet is set as the default, you can request a free airdrop of SOL tokens to it: - -```bash -solana airdrop 2 -``` - -> **NOTE:** -> The `solana airdrop` command has a limit of how many SOL tokens can be requested _per airdrop_ for each cluster (localhost, testnet, or devent). If your airdrop transaction fails, lower your airdrop request quantity and try again. - -You can check your current wallet's SOL balance any time: - -```bash -solana balance -``` - -## Next steps - -See the links below to learn more about writing Rust based Solana programs: - -- [Create and deploy a Solana Rust program](./rust.md) -- [Overview of writing Solana programs](../developing/on-chain-programs/overview) diff --git a/docs/src/getstarted/overview.md b/docs/src/getstarted/overview.md deleted file mode 100644 index ddc0aa94fa6eed..00000000000000 --- a/docs/src/getstarted/overview.md +++ /dev/null @@ -1,240 +0,0 @@ ---- -title: "Introduction to Solana Development" -description: - "Learn about the basic development concepts of the Solana blockchain." -keywords: - - accounts - - transactions - - nft - - solana basics - - tutorial - - intro to solana development - - blockchain developer - - blockchain tutorial - - web3 developer ---- - -Welcome to the Solana developer docs! - -This guide contains step-by-step instructions on how to get started. Before we -get into the hands on part of the guide, we'll cover basic concepts that all -developers need to be familiar with to build on Solana: - -- Transactions -- Accounts -- Programs - -## What you will learn - -- What the developer workflows look like -- What transactions, accounts, and programs are -- Test networks and other tools - -## An overview of Solana developer workflows - -The Solana network can be thought of as one massive global computer where anyone -can store and execute code for a fee. Deployed code is called a program, often -referred to as a "smart contract" on other blockchains. To interact with a -program, you need to send a transaction on the blockchain from a client. - -Here's a high level representation of this. It’s important to note that this is -an oversimplification of the Solana network for the purposes of learning in an -easy-to-understand way. - -![Solana developer workflows program-client model](/img/quickstarts/solana-overview-client-program.png) - -### Program development - -The first development workflow allows you to to create and deploy custom Rust, C -and C++ programs directly to the blockchain. Once these programs are deployed, -anyone who knows how to communicate with them can use them. - -You can communicate with these programs by writing dApps with any of the -available client SDKs (or the [CLI](../cli.md)), all of which use the -[JSON RPC API](../api) under the hood. - -### Client development - -The second development workflow is the dApp side where you can write dApps that -communicate with deployed programs. Your apps can submit transactions with -instructions to these programs via a client SDK to create a wide variety of -applications such as wallets, exchanges and more. The most popular apps are -browser extension wallets and web apps, but you can build mobile/desktop apps or -anything that can communicate with the JSON RPC API. - -These two pieces work together to create a network of dApps and programs that -can communicate with each other to update the state and query the blockchain. - -## Wallets - -A wallet is a pair of public and private keys that are used to verify actions on -the blockchain. The public key is used to identify the account and the private -key is used to sign transactions. - -## Transactions - -A transaction is the fundamental unit of activity on the Solana blockchain: it's -a signed data structure that contains instructions for the network to perform a -particular operation like transferring tokens. - -You need a transaction to create, update or delete data on-chain. You can read -data without a transaction. - -All transactions interact with programs on the network - these can be system -programs or user built programs. Transactions tell the program what they want to -do with a bunch of instructions, and if they're valid, the program will execute -them and update the state of the blockchain. Think of it like a write command -that can be rejected if certain conditions aren't met. - -Here's a visual representation of what a transaction contains: -![Visual layout of a transaction](/img/transaction.svg) - -- Signatures: An array of digital signatures from the transaction's signers. -- Message: The actual instructions that the transaction is issuing to the - network. - - Message header: 3 `uint8s` describing how many accounts will sign the - payload, how many won’t, and how many are read-only. - - Account addresses: an array of addresses of the accounts that will be used - in the transaction. - - Recent blockhash: a unique value that identifies a recent block - this - ensures the transaction is not too old and is not re-processed. - - Instructions: which program to call, which accounts to use, and any - additional data needed for the program to execute the instruction. - -Transactions can be created and signed using clients via SDKs, or even on-chain -programs. - -You can learn more about transactions -[here](../developing/programming-model/transactions.md). - -### Instructions - -Instructions are the most basic operational unit on Solana. A transaction can -contain one or more instructions. Instructions are executed sequentially in the -order they are provided in the transaction by programs on the blockchain. If any -part of an instruction fails, the entire transaction will fail. - -Here's what an instruction looks like: - -| Item | Description | -| ------------ | -------------------------------------------------------------------------------------------------------- | -| `Program ID` | The ID of the program being called | -| `Accounts` | The accounts that the instruction wants to read or modify | -| `Data` | Input data provided to the program as additional information or parameters in the format of a byte array | - -You can read more about instructions -[here](../developing/programming-model/transactions#instructions). - -### Transaction Fees - -Every time you submit a transaction, somebody on the network is providing space -and processing power to make it happen. To facilitate this, transactions on -Solana require a fee to be paid in Lamports, which are the smallest units of SOL -(like cents to a dollar or paise to a rupee). One SOL is equal to 1,000,000,000 -Lamports, and one Lamport has a value of 0.000000001 SOL. This fee is paid to -the validators who process the transaction. - -Transactions fees are calculated based on two main parts: - -- a statically set base fee per signature, and -- the computational resources used during the transaction, measured in - "[_compute units_](../terminology.md#compute-units)" - -The more work a transaction requires, the more compute units it will use, and -the more it will cost. - -You can read more about transaction fees [here](../transaction_fees.md). - -## Accounts - -Accounts on Solana are storage spaces that can hold arbitrary data up to 10MB. -They're used to store data, user programs, and native system programs. - -If a program needs to store state between transactions, it does so using -accounts. This means that all programs on Solana are stateless - they don't -store any state data, only code. If an account stores program code, it's marked -"executable" and can process instructions. - -The easiest way to think of an account is like a file. Users can have many -different files. Developers can write programs that can "talk" to these files. -In the same way that a Linux user uses a path to look up a file, a Solana client -uses an address to look up an account. The address is a 256-bit public key. Also -like a file, an account includes metadata that tells the runtime who is allowed -to access the data and how. This prevents unauthorized changes to the data in -the account. - -Unlike a file, the account includes metadata for the lifetime of the file. -Solana accounts have a unique lifecycle. When an account is created, it needs to -be assigned some space, and tokens are required to rent this space. If an -account doesn't have enough tokens to cover the rent, it will be removed. -However, if the account does hold enough tokens to cover the rent for two years, -it's considered "rent-exempt" and won't be deleted. - -You can read more about accounts -[here](../developing/programming-model/accounts.md). - -## Programs - -Programs are the foundation of the Solana blockchain. They're responsible for -everything that happens on the network: creating accounts, processing -transactions, collecting fees, and more. - -Programs process instructions from both end users and other programs. All -programs are stateless: any data they interact with is stored in separate -accounts that are passed in via instructions. - -There are two sets of programs that are maintained by the Solana Labs team: -[Native Programs](../developing/runtime-facilities/programs.md) and the -[Solana Program Library (SPL)](https://spl.solana.com/). These serve as core -building blocks for on-chain interactions. Native programs are used for core -blockchain functionality like creating new accounts, assigning ownership, -transferring SOL, and more. SPL programs are used for creating, swapping, and -lending tokens, as well as generating stake pools and maintaining an on-chain -name service. - -You can interact with both native programs and SPL programs easily using the -Solana CLI and the SDKs, allowing you to create complete dApps without writing -Rust. You can also build on top of any user programs that have been deployed to -the network - all you need is the program's address and how it works: the -account structures, instructions, and error codes. - -Developers most commonly write programs in Rust using frameworks such as Anchor. -However, programs can be written in any language that compiles to BPF, including -C++ and Move. - -You can learn more about programs [here](../developing/intro/programs.md). - -## Testing and developing environments - -When developing on Solana you have a few options for environments. - -The easiest and quickest way to get started is the -[Solana Playground](https://beta.solpg.io) - a browser based IDE that allows you -to write, deploy, and test programs. - -The most popular setup is [local development](local.md) with a local validator -that you run on your machine - this allows you to test your programs locally -before deploying them to any network. - -In each environment, you'll be using one of three networks: - -- Mainnet Beta - the "production" network where all the action happens. - Transactions cost real money here. -- Testnet - used for stress testing recent releases. Focused on network - performance, stability, and validator behavior. -- Devnet - the primary network for development. Most closely resembles Mainnet - Beta, but tokens are not real. - -Devnet has a faucet that allows you to get free SOL to test with. It costs $0 to -do development on Solana. - -Check out the [clusters page](../clusters.md) for more information on these. - -## Next steps - -You're now ready to get started building on Solana! - -- [Deploy your first Solana program in the browser](./hello-world.md) -- [Setup your local development environment](./local.md) -- [Get started building programs locally with Rust](./rust.md) -- [Overview of writing Solana programs](../developing/on-chain-programs/overview) diff --git a/docs/src/getstarted/rust.md b/docs/src/getstarted/rust.md deleted file mode 100644 index 661806928c01b1..00000000000000 --- a/docs/src/getstarted/rust.md +++ /dev/null @@ -1,157 +0,0 @@ ---- -title: "Rust Program Quickstart" -description: "This quickstart guide will demonstrate how to quickly setup, build, and deploy your first Rust based Solana program to the blockchain." -keywords: - - rust - - cargo - - toml - - program - - tutorial - - intro to solana development - - blockchain developer - - blockchain tutorial - - web3 developer ---- - -Rust is the most common programming language to write Solana programs with. This quickstart guide will demonstrate how to quickly setup, build, and deploy your first Rust based Solana program to the blockchain. - -> **NOTE: ** -> This guide uses the Solana CLI and assumes you have setup your local development environment. Checkout our [local development quickstart guide](./local.md) here to quickly get setup. - -## What you will learn - -- How to install the Rust language locally -- How to initialize a new Solana Rust program -- How to code a basic Solana program in Rust -- How to build and deploy your Rust program - -## Install Rust and Cargo - -To be able to compile Rust based Solana programs, install the Rust language and Cargo (the Rust package manager) using [Rustup](https://rustup.rs/): - -```bash -curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -``` - -## Run your localhost validator - -The Solana CLI comes with the [test validator](../developing/test-validator.md) built in. This command line tool will allow you to run a full blockchain cluster on your machine. - -```bash -solana-test-validator -``` - -> **PRO TIP:** -> Run the Solana test validator in a new/separate terminal window that will remain open. This command line program must remain running for your localhost validator to remain online and ready for action. - -Configure your Solana CLI to use your localhost validator for all your future terminal commands and Solana program deployment: - -```bash -solana config set --url localhost -``` - -## Create a new Rust library with Cargo - -Solana programs written in Rust are _libraries_ which are compiled to [BPF bytecode](../developing/on-chain-programs/faq.md#berkeley-packet-filter-bpf) and saved in the `.so` format. - -Initialize a new Rust library named `hello_world` via the Cargo command line: - -```bash -cargo init hello_world --lib -cd hello_world -``` - -Add the `solana-program` crate to your new Rust library: - -```bash -cargo add solana-program -``` - -Open your `Cargo.toml` file and add these required Rust library configuration settings, updating your project name as appropriate: - -```toml -[lib] -name = "hello_world" -crate-type = ["cdylib", "lib"] -``` - -## Create your first Solana program - -The code for your Rust based Solana program will live in your `src/lib.rs` file. Inside `src/lib.rs` you will be able to import your Rust crates and define your logic. Open your `src/lib.rs` file in your favorite editor. - -At the top of `lib.rs`, import the `solana-program` crate and bring our needed items into the local namespace: - -```rust -use solana_program::{ - account_info::AccountInfo, - entrypoint, - entrypoint::ProgramResult, - pubkey::Pubkey, - msg, -}; -``` - -Every Solana program must define an `entrypoint` that tells the Solana runtime where to start executing your on chain code. Your program's [entrypoint](../developing/on-chain-programs/developing-rust#program-entrypoint) should provide a public function named `process_instruction`: - -```rust -// declare and export the program's entrypoint -entrypoint!(process_instruction); - -// program entrypoint's implementation -pub fn process_instruction( - program_id: &Pubkey, - accounts: &[AccountInfo], - instruction_data: &[u8] -) -> ProgramResult { - // log a message to the blockchain - msg!("Hello, world!"); - - // gracefully exit the program - Ok(()) -} -``` - -Every on chain program should return the `Ok` [result enum](https://doc.rust-lang.org/std/result/) with a value of `()`. This tells the Solana runtime that your program executed successfully without errors. - -This program above will simply [log a message](../developing/on-chain-programs/debugging#logging) of "_Hello, world!_" to the blockchain cluster, then gracefully exit with `Ok(())`. - -## Build your Rust program - -Inside a terminal window, you can build your Solana Rust program by running in the root of your project (i.e. the directory with your `Cargo.toml` file): - -```bash -cargo build-bpf -``` - -> **NOTE:** -> After each time you build your Solana program, the above command will output the build path of your compiled program's `.so` file and the default keyfile that will be used for the program's address. -> `cargo build-bpf` installs the toolchain from the currently installed solana CLI tools. You may need to upgrade those tools if you encounter any version incompatibilities. - -## Deploy your Solana program - -Using the Solana CLI, you can deploy your program to your currently selected cluster: - -```bash -solana program deploy ./target/deploy/hello_world.so -``` - -Once your Solana program has been deployed (and the transaction [finalized](../cluster/commitments.md)), the above command will output your program's public address (aka its "program id"). - -```bash -# example output -Program Id: EFH95fWg49vkFNbAdw9vy75tM7sWZ2hQbTTUmuACGip3 -``` - -#### Congratulations! - -You have successfully setup, built, and deployed a Solana program using the Rust language. - -> PS: Check your Solana wallet's balance again after you deployed. See how much SOL it cost to deploy your simple program? - -## Next steps - -See the links below to learn more about writing Rust based Solana programs: - -- [Overview of writing Solana programs](../developing/on-chain-programs/overview) -- [Learn more about developing Solana programs with Rust](../developing/on-chain-programs/developing-Rust) -- [Debugging on chain programs](../developing/on-chain-programs/debugging) diff --git a/docs/src/history.md b/docs/src/history.md deleted file mode 100644 index a3fd7a7c114e5c..00000000000000 --- a/docs/src/history.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: History ---- - -In November of 2017, Anatoly Yakovenko published a whitepaper describing Proof -of History, a technique for keeping time between computers that do not trust -one another. From Anatoly's previous experience designing distributed systems -at Qualcomm, Mesosphere and Dropbox, he knew that a reliable clock makes -network synchronization very simple. When synchronization is simple the -resulting network can be blazing fast, bound only by network bandwidth. - -Anatoly watched as blockchain systems without clocks, such as Bitcoin and -Ethereum, struggled to scale beyond 15 transactions per second worldwide when -centralized payment systems such as Visa required peaks of 65,000 tps. Without -a clock, it was clear they'd never graduate to being the global payment system -or global supercomputer most had dreamed them to be. When Anatoly solved the -problem of getting computers that don’t trust each other to agree on time, he -knew he had the key to bring 40 years of distributed systems research to the -world of blockchain. The resulting cluster wouldn't be just 10 times faster, or -a 100 times, or a 1,000 times, but 10,000 times faster, right out of the gate! - -Anatoly's implementation began in a private codebase and was implemented in the -C programming language. Greg Fitzgerald, who had previously worked with Anatoly -at semiconductor giant Qualcomm Incorporated, encouraged him to reimplement the -project in the Rust programming language. Greg had worked on the LLVM compiler -infrastructure, which underlies both the Clang C/C++ compiler as well as the -Rust compiler. Greg claimed that the language's safety guarantees would improve -software productivity and that its lack of a garbage collector would allow -programs to perform as well as those written in C. Anatoly gave it a shot and -just two weeks later, had migrated his entire codebase to Rust. Sold. With -plans to weave all the world's transactions together on a single, scalable -blockchain, Anatoly called the project Loom. - -On February 13th of 2018, Greg began prototyping the first open source -implementation of Anatoly's whitepaper. The project was published to GitHub -under the name Silk in the loomprotocol organization. On February 28th, Greg -made his first release, demonstrating 10 thousand signed transactions could be -verified and processed in just over half a second. Shortly after, another -former Qualcomm cohort, Stephen Akridge, demonstrated throughput could be -massively improved by offloading signature verification to graphics processors. -Anatoly recruited Greg, Stephen and three others to co-found a company, then -called Loom. - -Around the same time, Ethereum-based project Loom Network sprung up and many -people were confused about whether they were the same project. The Loom team -decided it would rebrand. They chose the name Solana, a nod to a small beach -town North of San Diego called Solana Beach, where Anatoly, Greg and Stephen -lived and surfed for three years when they worked for Qualcomm. On March 28th, -the team created the Solana GitHub organization and renamed Greg's prototype -Silk to Solana. - -In June of 2018, the team scaled up the technology to run on cloud-based -networks and on July 19th, published a 50-node, permissioned, public testnet -consistently supporting bursts of 250,000 transactions per second. In a later -release in December, called v0.10 Pillbox, the team published a permissioned -testnet running 150 nodes on a gigabit network and demonstrated soak tests -processing an _average_ of 200 thousand transactions per second with bursts -over 500 thousand. The project was also extended to support on-chain programs -written in the C programming language and run concurrently in a safe execution -environment called SBF. diff --git a/docs/src/implemented-proposals/abi-management.md b/docs/src/implemented-proposals/abi-management.md index 74505087dbab12..88e27c9a1a9066 100644 --- a/docs/src/implemented-proposals/abi-management.md +++ b/docs/src/implemented-proposals/abi-management.md @@ -130,7 +130,7 @@ name suggests, there is no need to implement `AbiEnumVisitor` for other types. To summarize this interplay, `serde` handles the recursive serialization control flow in tandem with `AbiDigester`. The initial entry point in tests and child `AbiDigester`s use `AbiExample` recursively to create an example object -hierarchal graph. And `AbiDigester` uses `AbiEnumVisitor` to inquiry the actual +hierarchical graph. And `AbiDigester` uses `AbiEnumVisitor` to inquiry the actual ABI information using the constructed sample. `Default` isn't enough for `AbiExample`. Various collection's `::default()` is @@ -142,7 +142,7 @@ On the other hand, ABI digesting can't be done only with `AbiExample`, either. `AbiEnumVisitor` is required because all variants of an `enum` cannot be traversed just with a single variant of it as a ABI example. -Digestable information: +Digestible information: - rust's type name - `serde`'s data type name @@ -152,7 +152,7 @@ Digestable information: - `enum`: normal variants and `struct`- and `tuple`- styles. - attributes: `serde(serialize_with=...)` and `serde(skip)` -Not digestable information: +Not digestible information: - Any custom serialize code path not touched by the sample provided by `AbiExample`. (technically not possible) diff --git a/docs/src/implemented-proposals/commitment.md b/docs/src/implemented-proposals/commitment.md index 3297dd9fe74bef..43b6f8a7fd861c 100644 --- a/docs/src/implemented-proposals/commitment.md +++ b/docs/src/implemented-proposals/commitment.md @@ -4,7 +4,7 @@ title: Commitment The commitment metric aims to give clients a measure of the network confirmation and stake levels on a particular block. Clients can then use this information to -derive their own [measures of commitment](../cluster/commitments.md). +derive their own [measures of commitment](../consensus/commitments.md). # Calculation RPC diff --git a/docs/src/implemented-proposals/durable-tx-nonces.md b/docs/src/implemented-proposals/durable-tx-nonces.md index 72aa701f391f64..d2de019db5c793 100644 --- a/docs/src/implemented-proposals/durable-tx-nonces.md +++ b/docs/src/implemented-proposals/durable-tx-nonces.md @@ -71,8 +71,8 @@ stored hash, and thus unusable. To initialize a newly created account, an `InitializeNonceAccount` instruction must be issued. This instruction takes one parameter, the `Pubkey` of the account's -[authority](../offline-signing/durable-nonce.md#nonce-authority). Nonce accounts -must be [rent-exempt](rent.md#two-tiered-rent-regime) to meet the data-persistence +[authority](../cli/examples/durable-nonce.md#nonce-authority). Nonce accounts +must be [rent-exempt](./rent.md#two-tiered-rent-regime) to meet the data-persistence requirements of the feature, and as such, require that sufficient lamports be deposited before they can be initialized. Upon successful initialization, the cluster's most recent blockhash is stored along with specified nonce authority @@ -83,7 +83,7 @@ value. It stores the cluster's most recent blockhash in the account's state data failing if that matches the value already stored there. This check prevents replaying transactions within the same block. -Due to nonce accounts' [rent-exempt](rent.md#two-tiered-rent-regime) requirement, +Due to nonce accounts' [rent-exempt](./rent.md#two-tiered-rent-regime) requirement, a custom withdraw instruction is used to move funds out of the account. The `WithdrawNonceAccount` instruction takes a single argument, lamports to withdraw, and enforces rent-exemption by preventing the account's balance from falling @@ -92,12 +92,12 @@ would be zero lamports, which makes the account eligible for deletion. This account closure detail has an additional requirement that the stored nonce value must not match the cluster's most recent blockhash, as per `AdvanceNonceAccount`. -The account's [nonce authority](../offline-signing/durable-nonce.md#nonce-authority) +The account's [nonce authority](../cli/examples/durable-nonce.md#nonce-authority) can be changed using the `AuthorizeNonceAccount` instruction. It takes one parameter, the `Pubkey` of the new authority. Executing this instruction grants full control over the account and its balance to the new authority. -> `AdvanceNonceAccount`, `WithdrawNonceAccount` and `AuthorizeNonceAccount` all require the current [nonce authority](../offline-signing/durable-nonce.md#nonce-authority) for the account to sign the transaction. +> `AdvanceNonceAccount`, `WithdrawNonceAccount` and `AuthorizeNonceAccount` all require the current [nonce authority](../cli/examples/durable-nonce.md#nonce-authority) for the account to sign the transaction. ### Runtime Support diff --git a/docs/src/implemented-proposals/implemented-proposals.md b/docs/src/implemented-proposals/index.md similarity index 100% rename from docs/src/implemented-proposals/implemented-proposals.md rename to docs/src/implemented-proposals/index.md diff --git a/docs/src/implemented-proposals/leader-validator-transition.md b/docs/src/implemented-proposals/leader-validator-transition.md index 3793cb890b5b28..e3281252f136e4 100644 --- a/docs/src/implemented-proposals/leader-validator-transition.md +++ b/docs/src/implemented-proposals/leader-validator-transition.md @@ -2,15 +2,23 @@ title: Leader-to-Validator Transition --- -A validator typically spends its time validating blocks. If, however, a staker delegates its stake to a validator, it will occasionally be selected as a _slot leader_. As a slot leader, the validator is responsible for producing blocks during an assigned _slot_. A slot has a duration of some number of preconfigured _ticks_. The duration of those ticks are estimated with a _PoH Recorder_ described later in this document. +A validator typically spends its time validating blocks. If, however, a staker +delegates its stake to a validator, it will occasionally be selected as a _slot +leader_. As a slot leader, the validator is responsible for producing blocks +during an assigned _slot_. A slot has a duration of some number of preconfigured +_ticks_. The duration of those ticks are estimated with a _PoH Recorder_ +described later in this document. ## BankFork -BankFork tracks changes to the bank state over a specific slot. Once the final tick has been registered the state is frozen. Any attempts to write to are rejected. +BankFork tracks changes to the bank state over a specific slot. Once the final +tick has been registered the state is frozen. Any attempts to write to are +rejected. ## Validator -A validator operates on many different concurrent forks of the bank state until it generates a PoH hash with a height within its leader slot. +A validator operates on many different concurrent forks of the bank state until +it generates a PoH hash with a height within its leader slot. ## Slot Leader @@ -18,35 +26,58 @@ A slot leader builds blocks on top of only one fork, the one it last voted on. ## PoH Recorder -Slot leaders and validators use a PoH Recorder for both estimating slot height and for recording transactions. +Slot leaders and validators use a PoH Recorder for both estimating slot height +and for recording transactions. ### PoH Recorder when Validating -The PoH Recorder acts as a simple VDF when validating. It tells the validator when it needs to switch to the slot leader role. Every time the validator votes on a fork, it should use the fork's latest [blockhash](../terminology.md#blockhash) to re-seed the VDF. Re-seeding solves two problems. First, it synchronizes its VDF to the leader's, allowing it to more accurately determine when its leader slot begins. Second, if the previous leader goes down, all wallclock time is accounted for in the next leader's PoH stream. For example, if one block is missing when the leader starts, the block it produces should have a PoH duration of two blocks. The longer duration ensures the following leader isn't attempting to snip all the transactions from the previous leader's slot. +The PoH Recorder acts as a simple VDF when validating. It tells the validator +when it needs to switch to the slot leader role. Every time the validator votes +on a fork, it should use the fork's latest +[blockhash](https://solana.com/docs/terminology#blockhash) to re-seed the VDF. +Re-seeding solves two problems. First, it synchronizes its VDF to the leader's, +allowing it to more accurately determine when its leader slot begins. Second, if +the previous leader goes down, all wallclock time is accounted for in the next +leader's PoH stream. For example, if one block is missing when the leader +starts, the block it produces should have a PoH duration of two blocks. The +longer duration ensures the following leader isn't attempting to snip all the +transactions from the previous leader's slot. ### PoH Recorder when Leading -A slot leader use the PoH Recorder to record transactions, locking their positions in time. The PoH hash must be derived from a previous leader's last block. If it isn't, its block will fail PoH verification and be rejected by the cluster. +A slot leader use the PoH Recorder to record transactions, locking their +positions in time. The PoH hash must be derived from a previous leader's last +block. If it isn't, its block will fail PoH verification and be rejected by the +cluster. -The PoH Recorder also serves to inform the slot leader when its slot is over. The leader needs to take care not to modify its bank if recording the transaction would generate a PoH height outside its designated slot. The leader, therefore, should not commit account changes until after it generates the entry's PoH hash. When the PoH height falls outside its slot any transactions in its pipeline may be dropped or forwarded to the next leader. Forwarding is preferred, as it would minimize network congestion, allowing the cluster to advertise higher TPS capacity. +The PoH Recorder also serves to inform the slot leader when its slot is over. +The leader needs to take care not to modify its bank if recording the +transaction would generate a PoH height outside its designated slot. The leader, +therefore, should not commit account changes until after it generates the +entry's PoH hash. When the PoH height falls outside its slot any transactions in +its pipeline may be dropped or forwarded to the next leader. Forwarding is +preferred, as it would minimize network congestion, allowing the cluster to +advertise higher TPS capacity. ## Validator Loop -The PoH Recorder manages the transition between modes. Once a ledger is replayed, the validator can run until the recorder indicates it should be the slot leader. As a slot leader, the node can then execute and record transactions. +The PoH Recorder manages the transition between modes. Once a ledger is +replayed, the validator can run until the recorder indicates it should be the +slot leader. As a slot leader, the node can then execute and record +transactions. -The loop is synchronized to PoH and does a synchronous start and stop of the slot leader functionality. After stopping, the validator's TVU should find itself in the same state as if a different leader had sent it the same block. The following is pseudocode for the loop: +The loop is synchronized to PoH and does a synchronous start and stop of the +slot leader functionality. After stopping, the validator's TVU should find +itself in the same state as if a different leader had sent it the same block. +The following is pseudocode for the loop: 1. Query the LeaderScheduler for the next assigned slot. -2. Run the TVU over all the forks. 1. TVU will send votes to what it believes is the "best" fork. 2. After each vote, restart the PoH Recorder to run until the next assigned - - slot. - +2. Run the TVU over all the forks. 1. TVU will send votes to what it believes is + the "best" fork. 2. After each vote, restart the PoH Recorder to run until + the next assigned slot. 3. When time to be a slot leader, start the TPU. Point it to the last fork the - TVU voted on. - -4. Produce entries until the end of the slot. 1. For the duration of the slot, the TVU must not vote on other forks. 2. After the slot ends, the TPU freezes its BankFork. After freezing, - - the TVU may resume voting. - +4. Produce entries until the end of the slot. 1. For the duration of the slot, + the TVU must not vote on other forks. 2. After the slot ends, the TPU freezes + its BankFork. After freezing, the TVU may resume voting. 5. Goto 1. diff --git a/docs/src/implemented-proposals/reliable-vote-transmission.md b/docs/src/implemented-proposals/reliable-vote-transmission.md index e29c6c6fd2117c..c4632be4a97fea 100644 --- a/docs/src/implemented-proposals/reliable-vote-transmission.md +++ b/docs/src/implemented-proposals/reliable-vote-transmission.md @@ -8,7 +8,7 @@ Validator votes are messages that have a critical function for consensus and con 1. Leader rotation is triggered by PoH, which is clock with high drift. So many nodes are likely to have an incorrect view if the next leader is active in realtime or not. 2. The next leader may be easily be flooded. Thus a DDOS would not only prevent delivery of regular transactions, but also consensus messages. -3. UDP is unreliable, and our asynchronous protocol requires any message that is transmitted to be retransmitted until it is observed in the ledger. Retransmittion could potentially cause an unintentional _thundering herd_ against the leader with a large number of validators. Worst case flood would be `(num_nodes * num_retransmits)`. +3. UDP is unreliable, and our asynchronous protocol requires any message that is transmitted to be retransmitted until it is observed in the ledger. Retransmission could potentially cause an unintentional _thundering herd_ against the leader with a large number of validators. Worst case flood would be `(num_nodes * num_retransmits)`. 4. Tracking if the vote has been transmitted or not via the ledger does not guarantee it will appear in a confirmed block. The current observed block may be unrolled. Validators would need to maintain state for each vote and fork. ## Design @@ -20,7 +20,7 @@ Validator votes are messages that have a critical function for consensus and con Each vote transaction should maintain a `wallclock` in its data. The merge strategy for Votes will keep the last N set of votes as configured by the local client. For push/pull the vector is traversed recursively and each Transaction is treated as an individual CrdsValue with its own local wallclock and signature. -Gossip is designed for efficient propagation of state. Messages that are sent through gossip-push are batched and propagated with a minimum spanning tree to the rest of the network. Any partial failures in the tree are actively repaired with the gossip-pull protocol while minimizing the amount of data transfered between any nodes. +Gossip is designed for efficient propagation of state. Messages that are sent through gossip-push are batched and propagated with a minimum spanning tree to the rest of the network. Any partial failures in the tree are actively repaired with the gossip-pull protocol while minimizing the amount of data transferred between any nodes. ## How this design solves the Challenges diff --git a/docs/src/implemented-proposals/repair-service.md b/docs/src/implemented-proposals/repair-service.md index 2de8105fb87406..ea51a33bc4e85c 100644 --- a/docs/src/implemented-proposals/repair-service.md +++ b/docs/src/implemented-proposals/repair-service.md @@ -54,7 +54,7 @@ The different protocol strategies to address the above challenges: Blockstore tracks the latest root slot. RepairService will then periodically iterate every fork in blockstore starting from the root slot, sending repair requests to validators for any missing shreds. It will send at most some `N` - repair reqeusts per iteration. Shred repair should prioritize repairing + repair requests per iteration. Shred repair should prioritize repairing forks based on the leader's fork weight. Validators should only send repair requests to validators who have marked that slot as completed in their EpochSlots. Validators should prioritize repairing shreds in each slot diff --git a/docs/src/implemented-proposals/rpc-transaction-history.md b/docs/src/implemented-proposals/rpc-transaction-history.md index c8eb878eae45ce..54288ad9659bd7 100644 --- a/docs/src/implemented-proposals/rpc-transaction-history.md +++ b/docs/src/implemented-proposals/rpc-transaction-history.md @@ -4,19 +4,19 @@ There's a need for RPC to serve at least 6 months of transaction history. The current history, on the order of days, is insufficient for downstream users. 6 months of transaction data cannot be stored practically in a validator's -rocksdb ledger so an external data store is necessary. The validator's -rocksdb ledger will continue to serve as the primary data source, and then will -fall back to the external data store. +rocksdb ledger so an external data store is necessary. The validator's rocksdb +ledger will continue to serve as the primary data source, and then will fall +back to the external data store. The affected RPC endpoints are: -- [getFirstAvailableBlock](../api/http#getfirstavailableblock) -- [getConfirmedBlock](../api/http#getconfirmedblock) -- [getConfirmedBlocks](../api/http#getconfirmedblocks) -- [getConfirmedSignaturesForAddress](../api/http#getconfirmedsignaturesforaddress) -- [getConfirmedTransaction](../api/http#getconfirmedtransaction) -- [getSignatureStatuses](../api/http#getsignaturestatuses) -- [getBlockTime](../api/http#getblocktime) +- [getFirstAvailableBlock](https://solana.com/docs/rpc/http/getfirstavailableblock) +- [getConfirmedBlock](https://solana.com/docs/rpc/deprecated/getconfirmedblock) +- [getConfirmedBlocks](https://solana.com/docs/rpc/deprecated/getconfirmedblocks) +- [getConfirmedSignaturesForAddress](https://solana.com/docs/rpc/http/getconfirmedsignaturesforaddress) +- [getConfirmedTransaction](https://solana.com/docs/rpc/deprecated/getConfirmedTransaction) +- [getSignatureStatuses](https://solana.com/docs/rpc/http/getsignaturestatuses) +- [getBlockTime](https://solana.com/docs/rpc/http/getblocktime) Some system design constraints: @@ -40,19 +40,19 @@ store. A BigTable instance is used to hold all transaction data, broken up into different tables for quick searching. -New data may be copied into the instance at anytime without affecting the existing -data, and all data is immutable. Generally the expectation is that new data -will be uploaded once an current epoch completes but there is no limitation on -the frequency of data dumps. +New data may be copied into the instance at anytime without affecting the +existing data, and all data is immutable. Generally the expectation is that new +data will be uploaded once an current epoch completes but there is no limitation +on the frequency of data dumps. Cleanup of old data is automatic by configuring the data retention policy of the -instance tables appropriately, it just disappears. Therefore the order of when data is -added becomes important. For example if data from epoch N-1 is added after data -from epoch N, the older epoch data will outlive the newer data. However beyond -producing _holes_ in query results, this kind of unordered deletion will -have no ill effect. Note that this method of cleanup effectively allows for an -unlimited amount of transaction data to be stored, restricted only by the -monetary costs of doing so. +instance tables appropriately, it just disappears. Therefore the order of when +data is added becomes important. For example if data from epoch N-1 is added +after data from epoch N, the older epoch data will outlive the newer data. +However beyond producing _holes_ in query results, this kind of unordered +deletion will have no ill effect. Note that this method of cleanup effectively +allows for an unlimited amount of transaction data to be stored, restricted only +by the monetary costs of doing so. The table layout s supports the existing RPC endpoints only. New RPC endpoints in the future may require additions to the schema and potentially iterating over @@ -61,15 +61,15 @@ all transactions to build up the necessary metadata. ## Accessing BigTable BigTable has a gRPC endpoint that can be accessed using the -[tonic](https://crates.io/crates/crate)] and the raw protobuf API, as currently no -higher-level Rust crate for BigTable exists. Practically this makes parsing the -results of BigTable queries more complicated but is not a significant issue. +[tonic](https://crates.io/crates/crate)] and the raw protobuf API, as currently +no higher-level Rust crate for BigTable exists. Practically this makes parsing +the results of BigTable queries more complicated but is not a significant issue. ## Data Population -The ongoing population of instance data will occur on an epoch cadence through the -use of a new `solana-ledger-tool` command that will convert rocksdb data for a -given slot range into the instance schema. +The ongoing population of instance data will occur on an epoch cadence through +the use of a new `solana-ledger-tool` command that will convert rocksdb data for +a given slot range into the instance schema. The same process will be run once, manually, to backfill the existing ledger data. @@ -80,8 +80,8 @@ This table contains the compressed block data for a given slot. The row key is generated by taking the 16 digit lower case hexadecimal representation of the slot, to ensure that the oldest slot with a confirmed -block will always be first when the rows are listed. eg, The row key for slot -42 would be 000000000000002a. +block will always be first when the rows are listed. eg, The row key for slot 42 +would be 000000000000002a. The row data is a compressed `StoredConfirmedBlock` struct. @@ -89,19 +89,33 @@ The row data is a compressed `StoredConfirmedBlock` struct. This table contains the transactions that affect a given address. -The row key is `/`. The row -data is a compressed `TransactionByAddrInfo` struct. +The row key is +`/`. +The row data is a compressed `TransactionByAddrInfo` struct. Taking the one's compliment of the slot allows for listing of slots ensures that -the newest slot with transactions that affect an address will always -be listed first. +the newest slot with transactions that affect an address will always be listed +first. -Sysvar addresses are not indexed. However frequently used programs such as -Vote or System are, and will likely have a row for every confirmed slot. +Sysvar addresses are not indexed. However frequently used programs such as Vote +or System are, and will likely have a row for every confirmed slot. ### Transaction Signature Lookup Table: `tx` -This table maps a transaction signature to its confirmed block, and index within that block. +This table maps a transaction signature to its confirmed block, and index within +that block. The row key is the base58-encoded transaction signature. The row data is a compressed `TransactionInfo` struct. + +### Entries Table: `entries` + +> Support for the `entries` table was added in v1.18.0. + +This table contains data about the entries in a slot. + +The row key is the same as a `block` row key. + +The row data is a compressed `Entries` struct, which is a list of entry-summary +data, including hash, number of hashes since previous entry, number of +transactions, and starting transaction index. diff --git a/docs/src/implemented-proposals/staking-rewards.md b/docs/src/implemented-proposals/staking-rewards.md index a85a23e1fec9fe..fb9d437f85fb84 100644 --- a/docs/src/implemented-proposals/staking-rewards.md +++ b/docs/src/implemented-proposals/staking-rewards.md @@ -30,4 +30,4 @@ Solana's trustless sense of time and ordering provided by its PoH data structure As discussed in the [Economic Design](ed_overview/ed_overview.md) section, annual validator interest rates are to be specified as a function of total percentage of circulating supply that has been staked. The cluster rewards validators who are online and actively participating in the validation process throughout the entirety of their _validation period_. For validators that go offline/fail to validate transactions during this period, their annual reward is effectively reduced. -Similarly, we may consider an algorithmic reduction in a validator's active amount staked amount in the case that they are offline. I.e. if a validator is inactive for some amount of time, either due to a partition or otherwise, the amount of their stake that is considered ‘active’ \(eligible to earn rewards\) may be reduced. This design would be structured to help long-lived partitions to eventually reach finality on their respective chains as the % of non-voting total stake is reduced over time until a supermajority can be achieved by the active validators in each partition. Similarly, upon re-engaging, the ‘active’ amount staked will come back online at some defined rate. Different rates of stake reduction may be considered depending on the size of the partition/active set. +Similarly, we may consider an algorithmic reduction in a validator's active staked amount in the case that they are offline. I.e. if a validator is inactive for some amount of time, either due to a partition or otherwise, the amount of their stake that is considered ‘active’ \(eligible to earn rewards\) may be reduced. This design would be structured to help long-lived partitions to eventually reach finality on their respective chains as the % of non-voting total stake is reduced over time until a supermajority can be achieved by the active validators in each partition. Similarly, upon re-engaging, the ‘active’ amount staked will come back online at some defined rate. Different rates of stake reduction may be considered depending on the size of the partition/active set. diff --git a/docs/src/implemented-proposals/testing-programs.md b/docs/src/implemented-proposals/testing-programs.md index 03651293ab72ea..b4054c1fecc98e 100644 --- a/docs/src/implemented-proposals/testing-programs.md +++ b/docs/src/implemented-proposals/testing-programs.md @@ -32,7 +32,7 @@ trait SyncClient { } ``` -Users send transactions and asynchrounously and synchrounously await results. +Users send transactions and asynchronously and synchronously await results. ### ThinClient for Clusters diff --git a/docs/src/implemented-proposals/tower-bft.md b/docs/src/implemented-proposals/tower-bft.md index cdc936af394267..c8a7819bbfec03 100644 --- a/docs/src/implemented-proposals/tower-bft.md +++ b/docs/src/implemented-proposals/tower-bft.md @@ -14,14 +14,14 @@ For brevity this design assumes that a single voter with a stake is deployed as ## Time -The Solana cluster generates a source of time via a Verifiable Delay Function we are calling [Proof of History](../cluster/synchronization.md). +The Solana cluster generates a source of time via a Verifiable Delay Function we are calling [Proof of History](../consensus/synchronization.md). The unit of time is called a "slot". Each slot has a designated leader that can produce a block `B`. The `slot` of block `B` is designated `slot(B)`. A leader does not necessarily need to generate a block for its slot, in which case there may not be blocks for some slots. -For more details, see [fork generation](../cluster/fork-generation.md) and [leader rotation](../cluster/leader-rotation.md). +For more details, see [fork generation](../consensus/fork-generation.md) and [leader rotation](../consensus/leader-rotation.md). ## Votes @@ -154,7 +154,7 @@ ancestors. Each validator maintains a vote tower `T` which follows the rules described above in [Vote Tower](#vote-tower), which is a sequence of blocks it has voted for (initially empty). The variable `l` records the length of the stack. For each entry in the tower, denoted by `B = T(x)` for `x < l` where `B` is the `xth` entry in the tower, we record also a value `confcount(B)`. Define the lock expiration slot `lockexp(B) := slot(B) + 2 ^ confcount(B)`. -The validator `i` runs a voting loop as as follows. Let `B` be the heaviest +The validator `i` runs a voting loop as follows. Let `B` be the heaviest block returned by the fork choice rule above [Fork Choice](#fork-choice). If `i` has not voted for `B` before, then `i` votes for `B` so long as the following conditions are satisfied: 1. Respecting lockouts: For any block `B′` in the tower that is not an ancestor of `B`, `lockexp(B′) ≤ slot(B)`. diff --git a/docs/src/implemented-proposals/transaction-fees.md b/docs/src/implemented-proposals/transaction-fees.md index 042db21a50af76..1163c3fcaed434 100644 --- a/docs/src/implemented-proposals/transaction-fees.md +++ b/docs/src/implemented-proposals/transaction-fees.md @@ -6,7 +6,7 @@ title: Deterministic Transaction Fees Before sending a transaction to the cluster, a client may query the network to determine what the transaction's fee will be via the rpc request -[getFeeForMessage](../api/http#getfeeformessage). +[getFeeForMessage](https://solana.com/docs/rpc/http/getfeeformessage). ## Fee Parameters diff --git a/docs/src/index.mdx b/docs/src/index.mdx index 2d24609c4438e2..422404b0a7379b 100644 --- a/docs/src/index.mdx +++ b/docs/src/index.mdx @@ -9,56 +9,49 @@ description: "Solana is a high performance network that is utilized for a range # displayed_sidebar: introductionSidebar --- -# Solana Documentation +# Solana Validator Documentation Solana is a blockchain built for mass adoption. It's a high performance network that is utilized for a range of use cases, including finance, NFTs, payments, and gaming. Solana operates as a single global state machine, and is open, interoperable and decentralized. -## Getting started +## Command Line Interface and Tool Suite -Dive right into Solana to start building or setup your tooling. +To get started using the Solana Command Line (CLI) tools: -- [Setup local environment](/cli) - Install the Solana CLI to get your local - development environment setup -- [Hello World in your browser](getstarted/hello-world) - Build and deploy your - first on-chain Solana program, directly in your browser using Solana - Playground +- [Install the Solana CLI Tool Suite](./cli/install.md) - Quickly get setup + locally with the CLI, optionally build from source +- [Introduction to the CLI conventions](./cli/intro.md) - Understand the common + conventions used within the CLI tool suite +- [Choose a cluster](./cli/examples/choose-a-cluster.md) - Select a Solana + network cluster to connect (e.g. `devnet`, `testnet`, `mainnet-beta`) +- [Create a wallet](./cli/wallets/index.md) - Create a command line wallet for + use within the CLI and beyond -## Start learning - -Build a strong understanding of the core concepts that make Solana different -from other blockchains. - -- [Transactions](./developing/programming-model/transactions) - Collection of - instructions for the blockchain to execute -- [Accounts](./developing/programming-model/accounts) - Data and state storage - mechanism for Solana -- [Programs](./developing/intro/programs) - The executable code used to perform - actions on the blockchain -- [Cross-Program Invocation](./developing/programming-model/calling-between-programs) - - Core of the "composability" of Solana, this is how programs can "call" each - other. - -## Understanding the architecture +## Understanding the Architecture Get to know the underlying architecture of how the proof-of-stake blockchain -works. +works: -- [Validators](./validator/anatomy) - the individual nodes that are the backbone - of the network -- [Clusters](./cluster/overview) - a collection of validators that work together - for consensus +- [Clusters](./clusters/index.md) - a collection of validators that work + together for consensus +- [Validators](./validator/anatomy.md) - the individual nodes that are the + backbone of the network +- [Runtime](./runtime/programs.md) - the native programs that are core to the + validator and the blockchain -## Running a validator +## Running a Validator Explore what it takes to operate a Solana validator and help secure the network. -- [System requirements](./running-validator/validator-reqs) - Recommended - hardware requirements and expected SOL needed to operate a validator -- [Quick start guide](./validator/get-started/setup-a-validator) - Setup a - validator and get connected to a cluster for the first time +- [Validator vs RPC node](./operations/validator-or-rpc-node.md) - Understand + the important differences between voting and non-voting validators on the + network +- [System requirements](./operations/requirements.md) - Recommended hardware + requirements and expected SOL needed to operate a validator +- [Quick start guide](./operations/setup-a-validator.md) - Setup a validator and + get connected to a cluster for the first time ## Learn more diff --git a/docs/src/inflation/adjusted_staking_yield.md b/docs/src/inflation/adjusted_staking_yield.md deleted file mode 100644 index ba1974b4f158e8..00000000000000 --- a/docs/src/inflation/adjusted_staking_yield.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -title: Adjusted Staking Yield ---- - -### Token Dilution - -Similarly we can look at the expected _Staked Dilution_ (i.e. _Adjusted Staking Yield_) and _Un-staked Dilution_ as previously defined. Again, _dilution_ in this context is defined as the change in fractional representation (i.e. ownership) of a set of tokens within a larger set. In this sense, dilution can be a positive value: an increase in fractional ownership (staked dilution / _Adjusted Staking Yield_), or a negative value: a decrease in fractional ownership (un-staked dilution). - -We are interested in the relative change in ownership of staked vs un-staked tokens as the overall token pool increases with inflation issuance. As discussed, this issuance is distributed only to staked token holders, increasing the staked token fractional representation of the _Total Current Supply_. - -Continuing with the same _Inflation Schedule_ parameters as above, we see the fraction of staked supply grow as shown below. - -![](/img/p_ex_staked_supply_w_range_initial_stake.png) - -Due to this relative change in representation, the proportion of stake of any token holder will also change as a function of the _Inflation Schedule_ and the proportion of all tokens that are staked. - -Of initial interest, however, is the _dilution of **un-staked** tokens_, or $D_{us}$. In the case of un-staked tokens, token dilution is only a function of the _Inflation Schedule_ because the amount of un-staked tokens doesn't change over time. - -This can be seen by explicitly calculating un-staked dilution as $D_{us}$. The un-staked proportion of the token pool at time $t$ is $P_{us}(t_{N})$ and $I_{t}$ is the incremental inflation rate applied between any two consecutive time points. $SOL_{us}(t)$ and $SOL_{total}(t)$ is the amount of un-staked and total SOL on the network, respectively, at time $t$. Therefore $P_{us}(t) = SOL_{us}(t)/SOL_{total}(t)$. - -$$ -\begin{aligned} - D_{us} &= \left( \frac{P_{us}(t_{1}) - P_{us}(t_{0})}{P_{us}(t_{0})} \right)\\ - &= \left( \frac{ \left( \frac{SOL_{us}(t_{2})}{SOL_{total}(t_{2})} \right) - \left( \frac{SOL_{us}(t_{1})}{SOL_{total}(t_{1})} \right)}{ \left( \frac{SOL_{us}(t_{1})}{SOL_{total}(t_{1})} \right) } \right)\\ - -\end{aligned} -$$ - -However, because inflation issuance only increases the total amount and the un-staked supply doesn't change: - -$$ -\begin{aligned} - SOL_{us}(t_2) &= SOL_{us}(t_1)\\ - SOL_{total}(t_2) &= SOL_{total}(t_1)\times (1 + I_{t_1})\\ -\end{aligned} -$$ - -So $D_{us}$ becomes: - -$$ -\begin{aligned} - D_{us} &= \left( \frac{ \left( \frac{SOL_{us}(t_{1})}{SOL_{total}(t_{1})\times (1 + I_{1})} \right) - \left( \frac{SOL_{us}(t_{1})}{SOL_{total}(t_{1})} \right)}{ \left( \frac{SOL_{us}(t_{1})}{SOL_{total}(t_{1})} \right) } \right)\\ - D_{us} &= \frac{1}{(1 + I_{1})} - 1\\ -\end{aligned} -$$ - -Or generally, dilution for un-staked tokens over any time frame undergoing inflation $I$: - -$$ -D_{us} = -\frac{I}{I + 1} \\ -$$ - -So as guessed, this dilution is independent of the total proportion of staked tokens and only depends on inflation rate. This can be seen with our example _Inflation Schedule_ here: - -![p_ex_unstaked_dilution](/img/p_ex_unstaked_dilution.png) - -### Estimated Adjusted Staked Yield - -We can do a similar calculation to determine the _dilution_ of staked token holders, or as we've defined here as the **_Adjusted Staked Yield_**, keeping in mind that dilution in this context is an _increase_ in proportional ownership over time. We'll use the terminology _Adjusted Staked Yield_ to avoid confusion going forward. - -To see the functional form, we calculate, $Y_{adj}$, or the _Adjusted Staked Yield_ (to be compared to _D\_{us}_ the dilution of un-staked tokens above), where $P_{s}(t)$ is the staked proportion of token pool at time $t$ and $I_{t}$ is the incremental inflation rate applied between any two consecutive time points. The definition of $Y_{adj}$ is therefore: - -$$ - Y_{adj} = \frac{P_s(t_2) - P_s(t_1)}{P_s(t_1)}\\ -$$ - -As seen in the plot above, the proportion of staked tokens increases with inflation issuance. Letting $SOL_s(t)$ and $SOL_{\text{total}}(t)$ represent the amount of staked and total SOL at time $t$ respectively: - -$$ - P_s(t_2) = \frac{SOL_s(t_1) + SOL_{\text{total}}(t_1)\times I(t_1)}{SOL_{\text{total}}(t_1)\times (1 + I(t_1))}\\ -$$ - -Where $SOL_{\text{total}}(t_1)\times I(t_1)$ is the additional inflation issuance added to the staked token pool. Now we can write $Y_{adj}$ in common terms $t_1 = t$: - -$$ -\begin{aligned} -Y_{adj} &= \frac{\frac{SOL_s(t) + SOL_{\text{total}}(t)\times I(t)}{SOL_{\text{total}}(t)\times (1 + I(t))} - \frac{SOL_s(t)}{SOL_{\text{total}}(t)} }{ \frac{SOL_s(t)}{SOL_{\text{total}}(t)} } \\ - &= \frac{ SOL_{\text{total}}(t)\times (SOL_s(t) + SOL_{\text{total}}(t)\times I(t)) }{ SOL_s(t)\times SOL_{\text{total}}\times (1 + I(t)) } -1 \\ -\end{aligned} -$$ - -which simplifies to: - -$$ -Y_{adj} = \frac{ 1 + I(t)/P_s(t) }{ 1 + I(t) } - 1\\ -$$ - -So we see that the _Adjusted Staked Yield_ a function of the inflation rate and the percent of staked tokens on the network. We can see this plotted for various staking fractions here: - -![p_ex_adjusted_staked_yields](/img/p_ex_adjusted_staked_yields.png) - -It is also clear that in all cases, dilution of un-staked tokens $>$ adjusted staked yield (i.e. dilution of staked tokens). Explicitly we can look at the _relative dilution of un-staked tokens to staked tokens:_ $D_{us}/Y_{adj}$. Here the relationship to inflation drops out and the relative dilution, i.e. the impact of staking tokens vs not staking tokens, is purely a function of the % of the total token supply staked. From above - -$$ -\begin{aligned} -Y_{adj} &= \frac{ 1 + I/P_s }{ 1 + I } - 1,~\text{and}\\ -D_{us} &= -\frac{I}{I + 1},~\text{so} \\ -\frac{D_{us}}{Y_{adj}} &= \frac{ \frac{I}{I + 1} }{ \frac{ 1 + I/P_s }{ 1 + I } - 1 } \\ -\end{aligned} -$$ - -which simplifies as, - -$$ - \begin{aligned} - \frac{D_{us}}{Y_{adj}} &= \frac{ I }{ 1 + \frac{I}{P_s} - (1 + I)}\\ - &= \frac{ I }{ \frac{I}{P_s} - I}\\ - \frac{D_{us}}{Y_{adj}}&= \frac{ P_s }{ 1 - P_s}\\ - \end{aligned} -$$ - -Where we can see a primary dependence of the relative dilution of un-staked tokens to staked tokens is on the function of the proportion of total tokens staked. As shown above, the proportion of total tokens staked changes over time (i.e. $P_s = P_s(t)$ due to the re-staking of inflation issuance thus we see relative dilution grow over time as: - -![p_ex_relative_dilution](/img/p_ex_relative_dilution.png) - -As might be intuitive, as the total fraction of staked tokens increases the relative dilution of un-staked tokens grows dramatically. E.g. with $80\%$ of the network tokens staked, an un-staked token holder will experience ~$400\%$ more dilution than a staked holder. - -Again, this represents the change in fractional change in ownership of staked tokens and illustrates the built-in incentive for token holder to stake their tokens to earn _Staked Yield_ and avoid _Un-staked Dilution_. diff --git a/docs/src/inflation/inflation_schedule.md b/docs/src/inflation/inflation_schedule.md deleted file mode 100644 index 0b83a75f4e843a..00000000000000 --- a/docs/src/inflation/inflation_schedule.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Solana's Proposed Inflation Schedule ---- - -As mentioned above, the network's _Inflation Schedule_ is uniquely described by three parameters: _Initial Inflation Rate_, _Disinflation Rate_ and _Long-term Inflation Rate_. When considering these numbers, there are many factors to take into account: - -- A large portion of the SOL issued via inflation will be distributed to stake-holders in proportion to the SOL they have staked. We want to ensure that the _Inflation Schedule_ design results in reasonable _Staking Yields_ for token holders who delegate SOL and for validation service providers (via commissions taken from _Staking Yields_). -- The primary driver of _Staked Yield_ is the amount of SOL staked divided by the total amount of SOL (% of total SOL staked). Therefore the distribution and delegation of tokens across validators are important factors to understand when determining initial inflation parameters. -- Yield throttling is a current area of research that would impact _staking-yields_. This is not taken into consideration in the discussion here or the modeling below. -- Overall token issuance - i.e. what do we expect the Current Total Supply to be in 10 years, or 20 years? -- Long-term, steady-state inflation is an important consideration not only for sustainable support for the validator ecosystem and the Solana Foundation grant programs, but also should be tuned in consideration with expected token losses and burning over time. -- The rate at which we expect network usage to grow, as a consideration to the disinflationary rate. Over time, we plan for inflation to drop and expect that usage will grow. - -Based on these considerations and the community discussions following the initial design, the Solana Foundation proposes the following Inflation Schedule parameters: - -- Initial Inflation Rate: $8\%$ -- Disinflation Rate: $-15\%$ -- Long-term Inflation Rate: $1.5\%$ - -These parameters define the proposed _Inflation Schedule_. Below we show implications of these parameters. These plots only show the impact of inflation issuances given the Inflation Schedule as parameterized above. They _do not account_ for other factors that may impact the Total Supply such as fee/rent burning, slashing or other unforeseen future token destruction events. Therefore, what is presented here is an **upper limit** on the amount of SOL issued via inflation. - -![](/img/p_inflation_schedule.png) - -In the above graph we see the annual inflation rate [$\%$] over time, given the inflation parameters proposed above. - -![](/img/p_total_supply.png) - -Similarly, here we see the _Total Current Supply_ of SOL [MM] over time, assuming an initial _Total Current Supply_ of `488,587,349 SOL` (i.e. for this example, taking the _Total Current Supply_ as of `2020-01-25` and simulating inflation starting from that day). - -Setting aside validator uptime and commissions, the expected Staking Yield and Adjusted Staking Yield metrics are then primarily a function of the % of total SOL staked on the network. Therefore we can we can model _Staking Yield_, if we introduce an additional parameter _% of Staked SOL_: - -$$ -\%~\text{SOL Staked} = \frac{\text{Total SOL Staked}}{\text{Total Current Supply}} -$$ - -This parameter must be estimated because it is a dynamic property of the token holders and staking incentives. The values of _% of Staked SOL_ presented here range from $60\% - 90\%$, which we feel covers the likely range we expect to observe, based on feedback from the investor and validator communities as well as what is observed on comparable Proof-of-Stake protocols. - -![](/img/p_ex_staked_yields.png) - -Again, the above shows an example _Staked Yield_ that a staker might expect over time on the Solana network with the _Inflation Schedule_ as specified. This is an idealized _Staked Yield_ as it neglects validator uptime impact on rewards, validator commissions, potential yield throttling and potential slashing incidents. It additionally ignores that _% of Staked SOL_ is dynamic by design - the economic incentives set up by this _Inflation Schedule_ are more clearly seen when _Token Dilution_ is taken into account (see the **Adjusted Staking Yield** section below). diff --git a/docs/src/inflation/terminology.md b/docs/src/inflation/terminology.md deleted file mode 100644 index e9de2b8656ea74..00000000000000 --- a/docs/src/inflation/terminology.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Terminology ---- - -Many terms are thrown around when discussing inflation and the related components (e.g. rewards/yield/interest), we try to define and clarify some commonly used concept here: - -### Total Current Supply [SOL] - -The total amount of tokens (locked or unlocked) that have been generated (via genesis block or protocol inflation) minus any tokens that have been burnt (via transaction fees or other mechanism) or slashed. At network launch, 500,000,000 SOL were instantiated in the genesis block. Since then the Total Current Supply has been reduced by the burning of transaction fees and a planned token reduction event. Solana’s _Total Current Supply_ can be found at https://explorer.solana.com/supply - -### Inflation Rate [%] - -The Solana protocol will automatically create new tokens on a predetermined inflation schedule (discussed below). The _Inflation Rate [%]_ is the annualized growth rate of the _Total Current Supply_ at any point in time. - -### Inflation Schedule - -A deterministic description of token issuance over time. The Solana Foundation is proposing a disinflationary _Inflation Schedule_. I.e. Inflation starts at its highest value, the rate reduces over time until stabilizing at a predetermined long-term inflation rate (see discussion below). This schedule is completely and uniquely parameterized by three numbers: - -- **Initial Inflation Rate [%]**: The starting _Inflation Rate_ for when inflation is first enabled. Token issuance rate can only decrease from this point. -- **Disinflation Rate [%]**: The rate at which the _Inflation Rate_ is reduced. -- **Long-term Inflation Rate [%]**: The stable, long-term _Inflation Rate_ to be expected. - -### Effective Inflation Rate [%] - -The inflation rate actually observed on the Solana network after accounting for other factors that might decrease the _Total Current Supply_. Note that it is not possible for tokens to be created outside of what is described by the _Inflation Schedule_. - -- While the _Inflation Schedule_ determines how the protocol issues SOL, this neglects the concurrent elimination of tokens in the ecosystem due to various factors. The primary token burning mechanism is the burning of a portion of each transaction fee. $50\%$ of each transaction fee is burned, with the remaining fee retained by the validator that processes the transaction. -- Additional factors such as loss of private keys and slashing events should also be considered in a holistic analysis of the _Effective Inflation Rate_. For example, it’s estimated that $10-20\%$ of all BTC have been lost and are unrecoverable and that networks may experience similar yearly losses at the rate of $1-2\%$. - -### Staking Yield [%] - -The rate of return (aka _interest_) earned on SOL staked on the network. It is often quoted as an annualized rate (e.g. "the network _staking yield_ is currently $10\%$ per year"). - -- _Staking yield_ is of great interest to validators and token holders who wish to delegate their tokens to avoid token dilution due to inflation (the extent of which is discussed below). -- $100\%$ of inflationary issuances are to be distributed to staked token-holders in proportion to their staked SOL and to validators who charge a commission on the rewards earned by their delegated SOL. - - There may be future consideration for an additional split of inflation issuance with the introduction of _Archivers_ into the economy. _Archivers_ are network participants who provide a decentralized storage service and should also be incentivized with token distribution from inflation issuances for this service. - Similarly, early designs specified a fixed percentage of inflationary issuance to be delivered to the Foundation treasury for operational expenses and future grants. However, inflation will be launching without any portion allocated to the Foundation. -- _Staking yield_ can be calculated from the _Inflation Schedule_ along with the fraction of the _Total Current Supply_ that is staked at any given time. The explicit relationship is given by: - -$$ -\begin{aligned} -\text{Staking Yield} =~&\text{Inflation Rate}\times\text{Validator Uptime}~\times \\ -&\left( 1 - \text{Validator Fee} \right) \times \left( \frac{1}{\%~\text{SOL Staked}} \right) \\ -\text{where:}\\ -\%~\text{SOL Staked} &= \frac{\text{Total SOL Staked}}{\text{Total Current Supply}} -\end{aligned} -$$ - -### Token Dilution [%] - -Dilution is defined here as the change in proportional representation of a set of tokens within a larger set due to the introduction of new tokens. In practical terms, we discuss the dilution of staked or un-staked tokens due to the introduction and distribution of inflation issuance across the network. As will be shown below, while dilution impacts every token holder, the _relative_ dilution between staked and un-staked tokens should be the primary concern to un-staked token holders. Staking tokens, which will receive their proportional distribution of inflation issuance, should assuage any dilution concerns for staked token holders. I.e. dilution from 'inflation' is offset by the distribution of new tokens to staked token holders, nullifying the 'dilutive' effects of the inflation for that group. - -### Adjusted Staking Yield [%] - -A complete appraisal of earning potential from staking tokens should take into account staked _Token Dilution_ and its impact on the _Staking Yield_. For this, we define the _Adjusted Staking Yield_ as the change in fractional token supply ownership of staked tokens due to the distribution of inflation issuance. I.e. the positive dilutive effects of inflation. diff --git a/docs/src/integrations/exchange.md b/docs/src/integrations/exchange.md deleted file mode 100644 index 2e8d70f9cc614f..00000000000000 --- a/docs/src/integrations/exchange.md +++ /dev/null @@ -1,924 +0,0 @@ ---- -title: Add Solana to Your Exchange ---- - -This guide describes how to add Solana's native token SOL to your cryptocurrency -exchange. - -## Node Setup - -We highly recommend setting up at least two nodes on high-grade computers/cloud -instances, upgrading to newer versions promptly, and keeping an eye on service -operations with a bundled monitoring tool. - -This setup enables you: - -- to have a self-administered gateway to the Solana mainnet-beta cluster to get - data and submit withdrawal transactions -- to have full control over how much historical block data is retained -- to maintain your service availability even if one node fails - -Solana nodes demand relatively high computing power to handle our fast blocks -and high TPS. For specific requirements, please see -[hardware recommendations](../running-validator/validator-reqs.md). - -To run an api node: - -1. [Install the Solana command-line tool suite](../cli/install-solana-cli-tools.md) -2. Start the validator with at least the following parameters: - -```bash -solana-validator \ - --ledger \ - --identity \ - --entrypoint \ - --expected-genesis-hash \ - --rpc-port 8899 \ - --no-voting \ - --enable-rpc-transaction-history \ - --limit-ledger-size \ - --known-validator \ - --only-known-rpc -``` - -Customize `--ledger` to your desired ledger storage location, and `--rpc-port` to the port you want to expose. - -The `--entrypoint` and `--expected-genesis-hash` parameters are all specific to the cluster you are joining. -[Current parameters for Mainnet Beta](../clusters.md#example-solana-validator-command-line-2) - -The `--limit-ledger-size` parameter allows you to specify how many ledger -[shreds](../terminology.md#shred) your node retains on disk. If you do not -include this parameter, the validator will keep the entire ledger until it runs -out of disk space. The default value attempts to keep the ledger disk usage -under 500GB. More or less disk usage may be requested by adding an argument to -`--limit-ledger-size` if desired. Check `solana-validator --help` for the -default limit value used by `--limit-ledger-size`. More information about -selecting a custom limit value is [available -here](https://github.com/solana-labs/solana/blob/583cec922b6107e0f85c7e14cb5e642bc7dfb340/core/src/ledger_cleanup_service.rs#L15-L26). - -Specifying one or more `--known-validator` parameters can protect you from booting from a malicious snapshot. [More on the value of booting with known validators](../running-validator/validator-start.md#known-validators) - -Optional parameters to consider: - -- `--private-rpc` prevents your RPC port from being published for use by other nodes -- `--rpc-bind-address` allows you to specify a different IP address to bind the RPC port - -### Automatic Restarts and Monitoring - -We recommend configuring each of your nodes to restart automatically on exit, to -ensure you miss as little data as possible. Running the solana software as a -systemd service is one great option. - -For monitoring, we provide -[`solana-watchtower`](https://github.com/solana-labs/solana/blob/master/watchtower/README.md), -which can monitor your validator and detect with the `solana-validator` process -is unhealthy. It can directly be configured to alert you via Slack, Telegram, -Discord, or Twillio. For details, run `solana-watchtower --help`. - -```bash -solana-watchtower --validator-identity -``` - -> You can find more information about the [best practices for Solana Watchtower](../validator/best-practices/monitoring.md#solana-watchtower) here in the docs. - -#### New Software Release Announcements - -We release new software frequently (around 1 release / week). -Sometimes newer versions include incompatible protocol changes, which -necessitate timely software update to avoid errors in processing blocks. - -Our official release announcements for all kinds of releases (normal and -security) are communicated via a [discord](https://solana.com/discord) channel called -`#mb-announcement` -(`mb` stands for `mainnet-beta`). - -Like staked validators, we expect any exchange-operated validators to be updated -at your earliest convenience within a business day or two after a normal release -announcement. For security-related releases, more urgent action may be needed. - -### Ledger Continuity - -By default, each of your nodes will boot from a snapshot provided by one of your -known validators. This snapshot reflects the current state of the chain, but -does not contain the complete historical ledger. If one of your node exits and -boots from a new snapshot, there may be a gap in the ledger on that node. In -order to prevent this issue, add the `--no-snapshot-fetch` parameter to your -`solana-validator` command to receive historical ledger data instead of a -snapshot. - -Do not pass the `--no-snapshot-fetch` parameter on your initial boot as it's not -possible to boot the node all the way from the genesis block. Instead boot from -a snapshot first and then add the `--no-snapshot-fetch` parameter for reboots. - -It is important to note that the amount of historical ledger available to your -nodes from the rest of the network is limited at any point in time. Once -operational if your validators experience significant downtime they may not be -able to catch up to the network and will need to download a new snapshot from a -known validator. In doing so your validators will now have a gap in its -historical ledger data that cannot be filled. - -### Minimizing Validator Port Exposure - -The validator requires that various UDP and TCP ports be open for inbound -traffic from all other Solana validators. While this is the most efficient mode of -operation, and is strongly recommended, it is possible to restrict the -validator to only require inbound traffic from one other Solana validator. - -First add the `--restricted-repair-only-mode` argument. This will cause the -validator to operate in a restricted mode where it will not receive pushes from -the rest of the validators, and instead will need to continually poll other -validators for blocks. The validator will only transmit UDP packets to other -validators using the _Gossip_ and _ServeR_ ("serve repair") ports, and only -receive UDP packets on its _Gossip_ and _Repair_ ports. - -The _Gossip_ port is bi-directional and allows your validator to remain in -contact with the rest of the cluster. Your validator transmits on the _ServeR_ -to make repair requests to obtaining new blocks from the rest of the network, -since Turbine is now disabled. Your validator will then receive repair -responses on the _Repair_ port from other validators. - -To further restrict the validator to only requesting blocks from one or more -validators, first determine the identity pubkey for that validator and add the -`--gossip-pull-validator PUBKEY --repair-validator PUBKEY` arguments for each -PUBKEY. This will cause your validator to be a resource drain on each validator -that you add, so please do this sparingly and only after consulting with the -target validator. - -Your validator should now only be communicating with the explicitly listed -validators and only on the _Gossip_, _Repair_ and _ServeR_ ports. - -## Setting up Deposit Accounts - -Solana accounts do not require any on-chain initialization; once they contain -some SOL, they exist. To set up a deposit account for your exchange, simply -generate a Solana keypair using any of our [wallet tools](../wallet-guide/cli.md). - -We recommend using a unique deposit account for each of your users. - -Solana accounts must be made rent-exempt by containing 2-years worth of -[rent](developing/programming-model/accounts.md#rent) in SOL. In order to find -the minimum rent-exempt balance for your deposit accounts, query the -[`getMinimumBalanceForRentExemption` endpoint](../api/http#getminimumbalanceforrentexemption): - -```bash -curl localhost:8899 -X POST -H "Content-Type: application/json" -d '{ - "jsonrpc": "2.0", - "id": 1, - "method": "getMinimumBalanceForRentExemption", - "params":[0] -}' - -# Result -{"jsonrpc":"2.0","result":890880,"id":1} -``` - -### Offline Accounts - -You may wish to keep the keys for one or more collection accounts offline for -greater security. If so, you will need to move SOL to hot accounts using our -[offline methods](../offline-signing.md). - -## Listening for Deposits - -When a user wants to deposit SOL into your exchange, instruct them to send a -transfer to the appropriate deposit address. - -### Versioned Transaction Migration - -When the Mainnet Beta network starts processing versioned transactions, exchanges -**MUST** make changes. If no changes are made, deposit detection will no longer -work properly because fetching a versioned transaction or a block containing -versioned transactions will return an error. - -- `{"maxSupportedTransactionVersion": 0}` - - The `maxSupportedTransactionVersion` parameter must be added to `getBlock` and - `getTransaction` requests to avoid disruption to deposit detection. The latest - transaction version is `0` and should be specified as the max supported - transaction version value. - -It's important to understand that versioned transactions allow users to create -transactions that use another set of account keys loaded from on-chain address -lookup tables. - -- `{"encoding": "jsonParsed"}` - - When fetching blocks and transactions, it's now recommended to use the - `"jsonParsed"` encoding because it includes all transaction account keys - (including those from lookup tables) in the message `"accountKeys"` list. - This makes it straightforward to resolve balance changes detailed in - `preBalances` / `postBalances` and `preTokenBalances` / `postTokenBalances`. - - If the `"json"` encoding is used instead, entries in `preBalances` / - `postBalances` and `preTokenBalances` / `postTokenBalances` may refer to - account keys that are **NOT** in the `"accountKeys"` list and need to be - resolved using `"loadedAddresses"` entries in the transaction metadata. - -### Poll for Blocks - -To track all the deposit accounts for your exchange, poll for each confirmed -block and inspect for addresses of interest, using the JSON-RPC service of your -Solana API node. - -- To identify which blocks are available, send a [`getBlocks`](../api/http#getblocks) request, - passing the last block you have already processed as the start-slot parameter: - -```bash -curl https://api.devnet.solana.com -X POST -H "Content-Type: application/json" -d '{ - "jsonrpc": "2.0", - "id": 1, - "method": "getBlocks", - "params": [160017005, 160017015] -}' - -# Result -{"jsonrpc":"2.0","result":[160017005,160017006,160017007,160017012,160017013,160017014,160017015],"id":1} -``` - -Not every slot produces a block, so there may be gaps in the sequence of integers. - -- For each block, request its contents with a [`getBlock`](../api/http#getblock) request: - -### Block Fetching Tips - -- `{"rewards": false}` - -By default, fetched blocks will return information about validator fees on each -block and staking rewards on epoch boundaries. If you don't need this -information, disable it with the "rewards" parameter. - -- `{"transactionDetails": "accounts"}` - -By default, fetched blocks will return a lot of transaction info and metadata -that isn't necessary for tracking account balances. Set the "transactionDetails" -parameter to speed up block fetching. - -```bash -curl https://api.devnet.solana.com -X POST -H 'Content-Type: application/json' -d '{ - "jsonrpc": "2.0", - "id": 1, - "method": "getBlock", - "params": [ - 166974442, - { - "encoding": "jsonParsed", - "maxSupportedTransactionVersion": 0, - "transactionDetails": "accounts", - "rewards": false - } - ] -}' - -# Result -{ - "jsonrpc": "2.0", - "result": { - "blockHeight": 157201607, - "blockTime": 1665070281, - "blockhash": "HKhao674uvFc4wMK1Cm3UyuuGbKExdgPFjXQ5xtvsG3o", - "parentSlot": 166974441, - "previousBlockhash": "98CNLU4rsYa2HDUyp7PubU4DhwYJJhSX9v6pvE7SWsAo", - "transactions": [ - ... (omit) - { - "meta": { - "err": null, - "fee": 5000, - "postBalances": [ - 1110663066, - 1, - 1040000000 - ], - "postTokenBalances": [], - "preBalances": [ - 1120668066, - 1, - 1030000000 - ], - "preTokenBalances": [], - "status": { - "Ok": null - } - }, - "transaction": { - "accountKeys": [ - { - "pubkey": "9aE476sH92Vz7DMPyq5WLPkrKWivxeuTKEFKd2sZZcde", - "signer": true, - "source": "transaction", - "writable": true - }, - { - "pubkey": "11111111111111111111111111111111", - "signer": false, - "source": "transaction", - "writable": false - }, - { - "pubkey": "G1wZ113tiUHdSpQEBcid8n1x8BAvcWZoZgxPKxgE5B7o", - "signer": false, - "source": "lookupTable", - "writable": true - } - ], - "signatures": [ - "2CxNRsyRT7y88GBwvAB3hRg8wijMSZh3VNYXAdUesGSyvbRJbRR2q9G1KSEpQENmXHmmMLHiXumw4dp8CvzQMjrM" - ] - }, - "version": 0 - }, - ... (omit) - ] - }, - "id": 1 -} -``` - -The `preBalances` and `postBalances` fields allow you to track the balance -changes in every account without having to parse the entire transaction. They -list the starting and ending balances of each account in -[lamports](../terminology.md#lamport), indexed to the `accountKeys` list. For -example, if the deposit address of interest is -`G1wZ113tiUHdSpQEBcid8n1x8BAvcWZoZgxPKxgE5B7o`, this transaction represents a -transfer of 1040000000 - 1030000000 = 10,000,000 lamports = 0.01 SOL - -If you need more information about the transaction type or other specifics, you -can request the block from RPC in binary format, and parse it using either our -[Rust SDK](https://github.com/solana-labs/solana) or -[Javascript SDK](https://github.com/solana-labs/solana-web3.js). - -### Address History - -You can also query the transaction history of a specific address. This is -generally _not_ a viable method for tracking all your deposit addresses over all -slots, but may be useful for examining a few accounts for a specific period of -time. - -- Send a [`getSignaturesForAddress`](../api/http#getsignaturesforaddress) - request to the api node: - -```bash -curl localhost:8899 -X POST -H "Content-Type: application/json" -d '{ - "jsonrpc": "2.0", - "id": 1, - "method": "getSignaturesForAddress", - "params": [ - "3M2b3tLji7rvscqrLAHMukYxDK2nB96Q9hwfV6QkdzBN", - { - "limit": 3 - } - ] -}' - -# Result -{ - "jsonrpc": "2.0", - "result": [ - { - "blockTime": 1662064640, - "confirmationStatus": "finalized", - "err": null, - "memo": null, - "signature": "3EDRvnD5TbbMS2mCusop6oyHLD8CgnjncaYQd5RXpgnjYUXRCYwiNPmXb6ZG5KdTK4zAaygEhfdLoP7TDzwKBVQp", - "slot": 148697216 - }, - { - "blockTime": 1662064434, - "confirmationStatus": "finalized", - "err": null, - "memo": null, - "signature": "4rPQ5wthgSP1kLdLqcRgQnkYkPAZqjv5vm59LijrQDSKuL2HLmZHoHjdSLDXXWFwWdaKXUuryRBGwEvSxn3TQckY", - "slot": 148696843 - }, - { - "blockTime": 1662064341, - "confirmationStatus": "finalized", - "err": null, - "memo": null, - "signature": "36Q383JMiqiobuPV9qBqy41xjMsVnQBm9rdZSdpbrLTGhSQDTGZJnocM4TQTVfUGfV2vEX9ZB3sex6wUBUWzjEvs", - "slot": 148696677 - } - ], - "id": 1 -} -``` - -- For each signature returned, get the transaction details by sending a - [`getTransaction`](../api/http#gettransaction) request: - -```bash -curl https://api.devnet.solana.com -X POST -H 'Content-Type: application/json' -d '{ - "jsonrpc":"2.0", - "id":1, - "method":"getTransaction", - "params":[ - "2CxNRsyRT7y88GBwvAB3hRg8wijMSZh3VNYXAdUesGSyvbRJbRR2q9G1KSEpQENmXHmmMLHiXumw4dp8CvzQMjrM", - { - "encoding":"jsonParsed", - "maxSupportedTransactionVersion":0 - } - ] -}' - -# Result -{ - "jsonrpc": "2.0", - "result": { - "blockTime": 1665070281, - "meta": { - "err": null, - "fee": 5000, - "innerInstructions": [], - "logMessages": [ - "Program 11111111111111111111111111111111 invoke [1]", - "Program 11111111111111111111111111111111 success" - ], - "postBalances": [ - 1110663066, - 1, - 1040000000 - ], - "postTokenBalances": [], - "preBalances": [ - 1120668066, - 1, - 1030000000 - ], - "preTokenBalances": [], - "rewards": [], - "status": { - "Ok": null - } - }, - "slot": 166974442, - "transaction": { - "message": { - "accountKeys": [ - { - "pubkey": "9aE476sH92Vz7DMPyq5WLPkrKWivxeuTKEFKd2sZZcde", - "signer": true, - "source": "transaction", - "writable": true - }, - { - "pubkey": "11111111111111111111111111111111", - "signer": false, - "source": "transaction", - "writable": false - }, - { - "pubkey": "G1wZ113tiUHdSpQEBcid8n1x8BAvcWZoZgxPKxgE5B7o", - "signer": false, - "source": "lookupTable", - "writable": true - } - ], - "addressTableLookups": [ - { - "accountKey": "4syr5pBaboZy4cZyF6sys82uGD7jEvoAP2ZMaoich4fZ", - "readonlyIndexes": [], - "writableIndexes": [ - 3 - ] - } - ], - "instructions": [ - { - "parsed": { - "info": { - "destination": "G1wZ113tiUHdSpQEBcid8n1x8BAvcWZoZgxPKxgE5B7o", - "lamports": 10000000, - "source": "9aE476sH92Vz7DMPyq5WLPkrKWivxeuTKEFKd2sZZcde" - }, - "type": "transfer" - }, - "program": "system", - "programId": "11111111111111111111111111111111" - } - ], - "recentBlockhash": "BhhivDNgoy4L5tLtHb1s3TP19uUXqKiy4FfUR34d93eT" - }, - "signatures": [ - "2CxNRsyRT7y88GBwvAB3hRg8wijMSZh3VNYXAdUesGSyvbRJbRR2q9G1KSEpQENmXHmmMLHiXumw4dp8CvzQMjrM" - ] - }, - "version": 0 - }, - "id": 1 -} -``` - -## Sending Withdrawals - -To accommodate a user's request to withdraw SOL, you must generate a Solana -transfer transaction, and send it to the api node to be forwarded to your -cluster. - -### Synchronous - -Sending a synchronous transfer to the Solana cluster allows you to easily ensure -that a transfer is successful and finalized by the cluster. - -Solana's command-line tool offers a simple command, `solana transfer`, to -generate, submit, and confirm transfer transactions. By default, this method -will wait and track progress on stderr until the transaction has been finalized -by the cluster. If the transaction fails, it will report any transaction errors. - -```bash -solana transfer --allow-unfunded-recipient --keypair --url http://localhost:8899 -``` - -The [Solana Javascript SDK](https://github.com/solana-labs/solana-web3.js) -offers a similar approach for the JS ecosystem. Use the `SystemProgram` to build -a transfer transaction, and submit it using the `sendAndConfirmTransaction` -method. - -### Asynchronous - -For greater flexibility, you can submit withdrawal transfers asynchronously. In -these cases, it is your responsibility to verify that the transaction succeeded -and was finalized by the cluster. - -**Note:** Each transaction contains a [recent -blockhash](developing/programming-model/transactions.md#blockhash-format) to -indicate its liveness. It is **critical** to wait until this blockhash expires -before retrying a withdrawal transfer that does not appear to have been -confirmed or finalized by the cluster. Otherwise, you risk a double spend. See -more on [blockhash expiration](#blockhash-expiration) below. - -First, get a recent blockhash using the [`getFees`](../api/http#getfees) endpoint or the CLI command: - -```bash -solana fees --url http://localhost:8899 -``` - -In the command-line tool, pass the `--no-wait` argument to send a transfer -asynchronously, and include your recent blockhash with the `--blockhash` argument: - -```bash -solana transfer --no-wait --allow-unfunded-recipient --blockhash --keypair --url http://localhost:8899 -``` - -You can also build, sign, and serialize the transaction manually, and fire it off to -the cluster using the JSON-RPC [`sendTransaction`](../api/http#sendtransaction) endpoint. - -#### Transaction Confirmations & Finality - -Get the status of a batch of transactions using the -[`getSignatureStatuses`](../api/http#getsignaturestatuses) JSON-RPC endpoint. -The `confirmations` field reports how many -[confirmed blocks](../terminology.md#confirmed-block) have elapsed since the -transaction was processed. If `confirmations: null`, it is [finalized](../terminology.md#finality). - -```bash -curl localhost:8899 -X POST -H "Content-Type: application/json" -d '{ - "jsonrpc":"2.0", - "id":1, - "method":"getSignatureStatuses", - "params":[ - [ - "5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW", - "5j7s6NiJS3JAkvgkoc18WVAsiSaci2pxB2A6ueCJP4tprA2TFg9wSyTLeYouxPBJEMzJinENTkpA52YStRW5Dia7" - ] - ] -}' - -# Result -{ - "jsonrpc": "2.0", - "result": { - "context": { - "slot": 82 - }, - "value": [ - { - "slot": 72, - "confirmations": 10, - "err": null, - "status": { - "Ok": null - } - }, - { - "slot": 48, - "confirmations": null, - "err": null, - "status": { - "Ok": null - } - } - ] - }, - "id": 1 -} -``` - -#### Blockhash Expiration - -You can check whether a particular blockhash is still valid by sending a -[`getFeeCalculatorForBlockhash`](../api/http#getfeecalculatorforblockhash) -request with the blockhash as a parameter. If the response value is `null`, the -blockhash is expired, and the withdrawal transaction using that blockhash should -never succeed. - -### Validating User-supplied Account Addresses for Withdrawals - -As withdrawals are irreversible, it may be a good practice to validate a -user-supplied account address before authorizing a withdrawal in order to -prevent accidental loss of user funds. - -#### Basic verification - -Solana addresses a 32-byte array, encoded with the bitcoin base58 alphabet. This -results in an ASCII text string matching the following regular expression: - -``` -[1-9A-HJ-NP-Za-km-z]{32,44} -``` - -This check is insufficient on its own as Solana addresses are not checksummed, so -typos cannot be detected. To further validate the user's input, the string can be -decoded and the resulting byte array's length confirmed to be 32. However, there -are some addresses that can decode to 32 bytes despite a typo such as a single -missing character, reversed characters and ignored case - -#### Advanced verification - -Due to the vulnerability to typos described above, it is recommended that the -balance be queried for candidate withdraw addresses and the user prompted to -confirm their intentions if a non-zero balance is discovered. - -#### Valid ed25519 pubkey check - -The address of a normal account in Solana is a Base58-encoded string of a -256-bit ed25519 public key. Not all bit patterns are valid public keys for the -ed25519 curve, so it is possible to ensure user-supplied account addresses are -at least correct ed25519 public keys. - -#### Java - -Here is a Java example of validating a user-supplied address as a valid ed25519 -public key: - -The following code sample assumes you're using the Maven. - -`pom.xml`: - -```xml - - ... - - spring - https://repo.spring.io/libs-release/ - - - -... - - - ... - - io.github.novacrypto - Base58 - 0.1.3 - - - cafe.cryptography - curve25519-elisabeth - 0.1.0 - - -``` - -```java -import io.github.novacrypto.base58.Base58; -import cafe.cryptography.curve25519.CompressedEdwardsY; - -public class PubkeyValidator -{ - public static boolean verifyPubkey(String userProvidedPubkey) - { - try { - return _verifyPubkeyInternal(userProvidedPubkey); - } catch (Exception e) { - return false; - } - } - - public static boolean _verifyPubkeyInternal(String maybePubkey) throws Exception - { - byte[] bytes = Base58.base58Decode(maybePubkey); - return !(new CompressedEdwardsY(bytes)).decompress().isSmallOrder(); - } -} -``` - -## Minimum Deposit & Withdrawal Amounts - -Every deposit and withdrawal of SOL must be greater or equal to the minimum -rent-exempt balance for the account at the wallet address (a basic SOL account -holding no data), currently: 0.000890880 SOL - -Similarly, every deposit account must contain at least this balance. - -```bash -curl localhost:8899 -X POST -H "Content-Type: application/json" -d '{ - "jsonrpc": "2.0", - "id": 1, - "method": "getMinimumBalanceForRentExemption", - "params": [0] -}' - -# Result -{"jsonrpc":"2.0","result":890880,"id":1} -``` - -## Supporting the SPL Token Standard - -[SPL Token](https://spl.solana.com/token) is the standard for wrapped/synthetic -token creation and exchange on the Solana blockchain. - -The SPL Token workflow is similar to that of native SOL tokens, but there are a -few differences which will be discussed in this section. - -### Token Mints - -Each _type_ of SPL Token is declared by creating a _mint_ account. This account -stores metadata describing token features like the supply, number of decimals, and -various authorities with control over the mint. Each SPL Token account references -its associated mint and may only interact with SPL Tokens of that type. - -### Installing the `spl-token` CLI Tool - -SPL Token accounts are queried and modified using the `spl-token` command line -utility. The examples provided in this section depend upon having it installed -on the local system. - -`spl-token` is distributed from Rust [crates.io](https://crates.io/crates/spl-token) -via the Rust `cargo` command line utility. The latest version of `cargo` can be -installed using a handy one-liner for your platform at [rustup.rs](https://rustup.rs). -Once `cargo` is installed, `spl-token` can be obtained with the following command: - -``` -cargo install spl-token-cli -``` - -You can then check the installed version to verify - -``` -spl-token --version -``` - -Which should result in something like - -```text -spl-token-cli 2.0.1 -``` - -### Account Creation - -SPL Token accounts carry additional requirements that native System Program -accounts do not: - -1. SPL Token accounts must be created before an amount of tokens can be - deposited. Token accounts can be created explicitly with the - `spl-token create-account` command, or implicitly by the - `spl-token transfer --fund-recipient ...` command. -1. SPL Token accounts must remain [rent-exempt](developing/programming-model/accounts.md#rent-exemption) - for the duration of their existence and therefore require a small amount of - native SOL tokens be deposited at account creation. For SPL Token v2 accounts, - this amount is 0.00203928 SOL (2,039,280 lamports). - -#### Command Line - -To create an SPL Token account with the following properties: - -1. Associated with the given mint -1. Owned by the funding account's keypair - -``` -spl-token create-account -``` - -#### Example - -``` -$ spl-token create-account AkUFCWTXb3w9nY2n6SFJvBV6VwvFUCe4KBMCcgLsa2ir -Creating account 6VzWGL51jLebvnDifvcuEDec17sK6Wupi4gYhm5RzfkV -Signature: 4JsqZEPra2eDTHtHpB4FMWSfk3UgcCVmkKkP7zESZeMrKmFFkDkNd91pKP3vPVVZZPiu5XxyJwS73Vi5WsZL88D7 -``` - -Or to create an SPL Token account with a specific keypair: - -``` -$ solana-keygen new -o token-account.json -$ spl-token create-account AkUFCWTXb3w9nY2n6SFJvBV6VwvFUCe4KBMCcgLsa2ir token-account.json -Creating account 6VzWGL51jLebvnDifvcuEDec17sK6Wupi4gYhm5RzfkV -Signature: 4JsqZEPra2eDTHtHpB4FMWSfk3UgcCVmkKkP7zESZeMrKmFFkDkNd91pKP3vPVVZZPiu5XxyJwS73Vi5WsZL88D7 -``` - -### Checking an Account's Balance - -#### Command Line - -``` -spl-token balance -``` - -#### Example - -``` -$ solana balance 6VzWGL51jLebvnDifvcuEDec17sK6Wupi4gYhm5RzfkV -0 -``` - -### Token Transfers - -The source account for a transfer is the actual token account that contains the -amount. - -The recipient address however can be a normal wallet account. If an associated -token account for the given mint does not yet exist for that wallet, the -transfer will create it provided that the `--fund-recipient` argument as -provided. - -#### Command Line - -``` -spl-token transfer --fund-recipient -``` - -#### Example - -``` -$ spl-token transfer 6B199xxzw3PkAm25hGJpjj3Wj3WNYNHzDAnt1tEqg5BN 1 6VzWGL51jLebvnDifvcuEDec17sK6Wupi4gYhm5RzfkV -Transfer 1 tokens - Sender: 6B199xxzw3PkAm25hGJpjj3Wj3WNYNHzDAnt1tEqg5BN - Recipient: 6VzWGL51jLebvnDifvcuEDec17sK6Wupi4gYhm5RzfkV -Signature: 3R6tsog17QM8KfzbcbdP4aoMfwgo6hBggJDVy7dZPVmH2xbCWjEj31JKD53NzMrf25ChFjY7Uv2dfCDq4mGFFyAj -``` - -### Depositing - -Since each `(wallet, mint)` pair requires a separate account on chain. It is -recommended that the addresses for these accounts be derived from SOL deposit -wallets using the -[Associated Token Account](https://spl.solana.com/associated-token-account) (ATA) -scheme and that _only_ deposits from ATA addresses be accepted. - -Monitoring for deposit transactions should follow the [block polling](#poll-for-blocks) -method described above. Each new block should be scanned for successful -transactions referencing user token-account derived addresses. The -`preTokenBalance` and `postTokenBalance` fields from the transaction's metadata -must then be used to determine the effective balance change. These fields will -identify the token mint and account owner (main wallet address) of the affected -account. - -Note that if a receiving account is created during the transaction, it will have no -`preTokenBalance` entry as there is no existing account state. In this -case, the initial balance can be assumed to be zero. - -### Withdrawing - -The withdrawal address a user provides must be the that of their SOL wallet. - -Before executing a withdrawal [transfer](#token-transfers), -the exchange should check the address as -[described above](#validating-user-supplied-account-addresses-for-withdrawals). -Additionally this address must be owned by the System Program and have no -account data. If the address has no SOL balance, user confirmation should be -obtained before proceeding with the withdrawal. All other withdrawal addresses -must be rejected. - -From the withdrawal address, the [Associated Token Account](https://spl.solana.com/associated-token-account) -(ATA) for the correct mint is derived and the transfer issued to that account via a -[TransferChecked](https://github.com/solana-labs/solana-program-library/blob/fc0d6a2db79bd6499f04b9be7ead0c400283845e/token/program/src/instruction.rs#L268) -instruction. Note that it is possible that the ATA address does not yet exist, at which point the -exchange should fund the account on behalf of the user. For SPL Token v2 -accounts, funding the withdrawal account will require 0.00203928 SOL (2,039,280 -lamports). - -Template `spl-token transfer` command for a withdrawal: - -``` -$ spl-token transfer --fund-recipient -``` - -### Other Considerations - -#### Freeze Authority - -For regulatory compliance reasons, an SPL Token issuing entity may optionally -choose to hold "Freeze Authority" over all accounts created in association with -its mint. This allows them to [freeze](https://spl.solana.com/token#freezing-accounts) -the assets in a given account at will, rendering the account unusable until thawed. -If this feature is in use, the freeze authority's pubkey will be registered in -the SPL Token's mint account. - -## Testing the Integration - -Be sure to test your complete workflow on Solana devnet and testnet -[clusters](../clusters.md) before moving to production on mainnet-beta. Devnet -is the most open and flexible, and ideal for initial development, while testnet -offers more realistic cluster configuration. Both devnet and testnet support a faucet, -run `solana airdrop 1` to obtain some devnet or testnet SOL for development and testing. diff --git a/docs/src/integrations/retrying-transactions.md b/docs/src/integrations/retrying-transactions.md deleted file mode 100644 index c2d7ff24be2292..00000000000000 --- a/docs/src/integrations/retrying-transactions.md +++ /dev/null @@ -1,336 +0,0 @@ ---- -title: Retrying Transactions ---- - -# Retrying Transactions - -On some occasions, a seemingly valid transaction may be dropped before it is -included in a block. This most often occurs during periods of network -congestion, when an RPC node fails to rebroadcast the transaction to the -[leader](../terminology#leader). To an end-user, it may -appear as if their transaction disappears entirely. While RPC nodes are equipped -with a generic rebroadcasting algorithm, application developers are also capable -of developing their own custom rebroadcasting logic. - -## Facts - -:::note -Fact Sheet - -- RPC nodes will attempt to rebroadcast transactions using a generic algorithm -- Application developers can implement their own custom rebroadcasting logic -- Developers should take advantage of the `maxRetries` parameter on the - `sendTransaction` JSON-RPC method -- Developers should enable preflight checks to raise errors before transactions - are submitted -- Before re-signing any transaction, it is **very important** to ensure that the - initial transaction’s blockhash has expired - -::: - -## The Journey of a Transaction - -### How Clients Submit Transactions - -In Solana, there is no concept of a mempool. All transactions, whether they are -initiated programmatically or by an end-user, are efficiently routed to leaders -so that they can be processed into a block. There are two main ways in which a -transaction can be sent to leaders: - -1. By proxy via an RPC server and the - [sendTransaction](../api/http#sendtransaction) - JSON-RPC method -2. Directly to leaders via a - [TPU Client](https://docs.rs/solana-client/1.7.3/solana_client/tpu_client/index.html) - -The vast majority of end-users will submit transactions via an RPC server. When -a client submits a transaction, the receiving RPC node will in turn attempt to -broadcast the transaction to both the current and next leaders. Until the -transaction is processed by a leader, there is no record of the transaction -outside of what the client and the relaying RPC nodes are aware of. In the case -of a TPU client, rebroadcast and leader forwarding is handled entirely by the -client software. - -![Transaction Journey](../../static/img/rt-tx-journey.png) - - - -### How RPC Nodes Broadcast Transactions - -After an RPC node receives a transaction via `sendTransaction`, it will convert -the transaction into a -[UDP](https://en.wikipedia.org/wiki/User_Datagram_Protocol) packet before -forwarding it to the relevant leaders. UDP allows validators to quickly -communicate with one another, but does not provide any guarantees regarding -transaction delivery. - -Because Solana’s leader schedule is known in advance of every -[epoch](../terminology#epoch) (~2 days), an RPC node will -broadcast its transaction directly to the current and next leaders. This is in -contrast to other gossip protocols such as Ethereum that propagate transactions -randomly and broadly across the entire network. By default, RPC nodes will try -to forward transactions to leaders every two seconds until either the -transaction is finalized or the transaction’s blockhash expires (150 blocks or -~1 minute 19 seconds as of the time of this writing). If the outstanding -rebroadcast queue size is greater than -[10,000 transactions](https://github.com/solana-labs/solana/blob/bfbbc53dac93b3a5c6be9b4b65f679fdb13e41d9/send-transaction-service/src/send_transaction_service.rs#L20), -newly submitted transactions are dropped. There are command-line -[arguments](https://github.com/solana-labs/solana/blob/bfbbc53dac93b3a5c6be9b4b65f679fdb13e41d9/validator/src/main.rs#L1172) -that RPC operators can adjust to change the default behavior of this retry -logic. - -When an RPC node broadcasts a transaction, it will attempt to forward the -transaction to a leader’s -[Transaction Processing Unit (TPU)](https://github.com/solana-labs/solana/blob/cd6f931223181d5a1d47cba64e857785a175a760/core/src/validator.rs#L867). -The TPU processes transactions in five distinct phases: - -- [Fetch Stage](https://github.com/solana-labs/solana/blob/cd6f931223181d5a1d47cba64e857785a175a760/core/src/fetch_stage.rs#L21) -- [SigVerify Stage](https://github.com/solana-labs/solana/blob/cd6f931223181d5a1d47cba64e857785a175a760/core/src/tpu.rs#L91) -- [Banking Stage](https://github.com/solana-labs/solana/blob/cd6f931223181d5a1d47cba64e857785a175a760/core/src/banking_stage.rs#L249) -- [Proof of History Service](https://github.com/solana-labs/solana/blob/cd6f931223181d5a1d47cba64e857785a175a760/poh/src/poh_service.rs) -- [Broadcast Stage](https://github.com/solana-labs/solana/blob/cd6f931223181d5a1d47cba64e857785a175a760/core/src/tpu.rs#L136) - -![TPU Overview](../../static/img/rt-tpu-jito-labs.png) - -Of these five phases, the Fetch Stage is responsible for receiving transactions. -Within the Fetch Stage, validators will categorize incoming transactions -according to three ports: - -- [tpu](https://github.com/solana-labs/solana/blob/cd6f931223181d5a1d47cba64e857785a175a760/gossip/src/contact_info.rs#L27) - handles regular transactions such as token transfers, NFT mints, and program - instructions -- [tpu_vote](https://github.com/solana-labs/solana/blob/cd6f931223181d5a1d47cba64e857785a175a760/gossip/src/contact_info.rs#L31) - focuses exclusively on voting transactions -- [tpu_forwards](https://github.com/solana-labs/solana/blob/cd6f931223181d5a1d47cba64e857785a175a760/gossip/src/contact_info.rs#L29) - forwards unprocessed packets to the next leader if the current leader is - unable to process all transactions - -For more information on the TPU, please refer to -[this excellent writeup by Jito Labs](https://jito-labs.medium.com/solana-validator-101-transaction-processing-90bcdc271143). - -## How Transactions Get Dropped - -Throughout a transaction’s journey, there are a few scenarios in which the -transaction can be unintentionally dropped from the network. - -### Before a transaction is processed - -If the network drops a transaction, it will most likely do so before the -transaction is processed by a leader. UDP -[packet loss](https://en.wikipedia.org/wiki/Packet_loss) is the simplest reason -why this might occur. During times of intense network load, it’s also possible -for validators to become overwhelmed by the sheer number of transactions -required for processing. While validators are equipped to forward surplus -transactions via `tpu_forwards`, there is a limit to the amount of data that can -be -[forwarded](https://github.com/solana-labs/solana/blob/master/core/src/banking_stage.rs#L389). -Furthermore, each forward is limited to a single hop between validators. That -is, transactions received on the `tpu_forwards` port are not forwarded on to -other validators. - -There are also two lesser known reasons why a transaction may be dropped before -it is processed. The first scenario involves transactions that are submitted via -an RPC pool. Occasionally, part of the RPC pool can be sufficiently ahead of the -rest of the pool. This can cause issues when nodes within the pool are required -to work together. In this example, the transaction’s -[recentBlockhash](../developing/programming-model/transactions#recent-blockhash) -is queried from the advanced part of the pool (Backend A). When the transaction -is submitted to the lagging part of the pool (Backend B), the nodes will not -recognize the advanced blockhash and will drop the transaction. This can be -detected upon transaction submission if developers enable -[preflight checks](../api/http#sendtransaction) -on `sendTransaction`. - -![Dropped via RPC Pool](../../static/img/rt-dropped-via-rpc-pool.png) - -Temporarily network forks can also result in dropped transactions. If a -validator is slow to replay its blocks within the Banking Stage, it may end up -creating a minority fork. When a client builds a transaction, it’s possible for -the transaction to reference a `recentBlockhash` that only exists on the -minority fork. After the transaction is submitted, the cluster can then switch -away from its minority fork before the transaction is processed. In this -scenario, the transaction is dropped due to the blockhash not being found. - -![Dropped due to Minority Fork (Before Processed)](../../static/img/rt-dropped-minority-fork-pre-process.png) - -### After a transaction is processed and before it is finalized - -In the event a transaction references a `recentBlockhash` from a minority fork, -it’s still possible for the transaction to be processed. In this case, however, -it would be processed by the leader on the minority fork. When this leader -attempts to share its processed transactions with the rest of the network, it -would fail to reach consensus with the majority of validators that do not -recognize the minority fork. At this time, the transaction would be dropped -before it could be finalized. - -![Dropped due to Minority Fork (After Processed)](../../static/img/rt-dropped-minority-fork-post-process.png) - -## Handling Dropped Transactions - -While RPC nodes will attempt to rebroadcast transactions, the algorithm they -employ is generic and often ill-suited for the needs of specific applications. -To prepare for times of network congestion, application developers should -customize their own rebroadcasting logic. - -### An In-Depth Look at sendTransaction - -When it comes to submitting transactions, the `sendTransaction` RPC method is -the primary tool available to developers. `sendTransaction` is only responsible -for relaying a transaction from a client to an RPC node. If the node receives -the transaction, `sendTransaction` will return the transaction id that can be -used to track the transaction. A successful response does not indicate whether -the transaction will be processed or finalized by the cluster. - -:::note - -### Request Parameters - -- `transaction`: `string` - fully-signed Transaction, as encoded string -- (optional) `configuration object`: `object` - - `skipPreflight`: `boolean` - if true, skip the preflight transaction checks - (default: false) - - (optional) `preflightCommitment`: `string` - - [Commitment](../api/http#configuring-state-commitment) - level to use for preflight simulations against the bank slot (default: - "finalized"). - - (optional) `encoding`: `string` - Encoding used for the transaction data. - Either "base58" (slow), or "base64". (default: "base58"). - - (optional) `maxRetries`: `usize` - Maximum number of times for the RPC node - to retry sending the transaction to the leader. If this parameter is not - provided, the RPC node will retry the transaction until it is finalized or - until the blockhash expires. - -Response - -- `transaction id`: `string` - First transaction signature embedded in the - transaction, as base-58 encoded string. This transaction id can be used with - [`getSignatureStatuses`](../api/http#getsignaturestatuses) - to poll for status updates. - -::: - -## Customizing Rebroadcast Logic - -In order to develop their own rebroadcasting logic, developers should take -advantage of `sendTransaction`’s `maxRetries` parameter. If provided, -`maxRetries` will override an RPC node’s default retry logic, allowing -developers to manually control the retry process -[within reasonable bounds](https://github.com/solana-labs/solana/blob/98707baec2385a4f7114d2167ef6dfb1406f954f/validator/src/main.rs#L1258-L1274). - -A common pattern for manually retrying transactions involves temporarily storing -the `lastValidBlockHeight` that comes from -[getLatestBlockhash](../api/http#getlatestblockhash). -Once stashed, an application can then -[poll the cluster’s blockheight](../api/http#getblockheight) -and manually retry the transaction at an appropriate interval. In times of -network congestion, it’s advantageous to set `maxRetries` to 0 and manually -rebroadcast via a custom algorithm. While some applications may employ an -[exponential backoff](https://en.wikipedia.org/wiki/Exponential_backoff) -algorithm, others such as [Mango](https://www.mango.markets/) opt to -[continuously resubmit](https://github.com/blockworks-foundation/mango-ui/blob/b6abfc6c13b71fc17ebbe766f50b8215fa1ec54f/src/utils/send.tsx#L713) -transactions at a constant interval until some timeout has occurred. - -```ts -import { - Keypair, - Connection, - LAMPORTS_PER_SOL, - SystemProgram, - Transaction, -} from "@solana/web3.js"; -import * as nacl from "tweetnacl"; - -const sleep = async (ms: number) => { - return new Promise((r) => setTimeout(r, ms)); -}; - -(async () => { - const payer = Keypair.generate(); - const toAccount = Keypair.generate().publicKey; - - const connection = new Connection("http://127.0.0.1:8899", "confirmed"); - - const airdropSignature = await connection.requestAirdrop( - payer.publicKey, - LAMPORTS_PER_SOL, - ); - - await connection.confirmTransaction({ signature: airdropSignature }); - - const blockhashResponse = await connection.getLatestBlockhashAndContext(); - const lastValidBlockHeight = blockhashResponse.context.slot + 150; - - const transaction = new Transaction({ - feePayer: payer.publicKey, - blockhash: blockhashResponse.value.blockhash, - lastValidBlockHeight: lastValidBlockHeight, - }).add( - SystemProgram.transfer({ - fromPubkey: payer.publicKey, - toPubkey: toAccount, - lamports: 1000000, - }), - ); - const message = transaction.serializeMessage(); - const signature = nacl.sign.detached(message, payer.secretKey); - transaction.addSignature(payer.publicKey, Buffer.from(signature)); - const rawTransaction = transaction.serialize(); - let blockheight = await connection.getBlockHeight(); - - while (blockheight < lastValidBlockHeight) { - connection.sendRawTransaction(rawTransaction, { - skipPreflight: true, - }); - await sleep(500); - blockheight = await connection.getBlockHeight(); - } -})(); -``` - -When polling via `getLatestBlockhash`, applications should specify their -intended -[commitment](../api/http#configuring-state-commitment) -level. By setting its commitment to `confirmed` (voted on) or `finalized` (~30 -blocks after `confirmed`), an application can avoid polling a blockhash from a -minority fork. - -If an application has access to RPC nodes behind a load balancer, it can also -choose to divide its workload amongst specific nodes. RPC nodes that serve -data-intensive requests such as -[getProgramAccounts](https://solanacookbook.com/guides/get-program-accounts.html) -may be prone to falling behind and can be ill-suited for also forwarding -transactions. For applications that handle time-sensitive transactions, it may -be prudent to have dedicated nodes that only handle `sendTransaction`. - -### The Cost of Skipping Preflight - -By default, `sendTransaction` will perform three preflight checks prior to -submitting a transaction. Specifically, `sendTransaction` will: - -- Verify that all signatures are valid -- Check that the referenced blockhash is within the last 150 blocks -- Simulate the transaction against the bank slot specified by the - `preflightCommitment` - -In the event that any of these three preflight checks fail, `sendTransaction` -will raise an error prior to submitting the transaction. Preflight checks can -often be the difference between losing a transaction and allowing a client to -gracefully handle an error. To ensure that these common errors are accounted -for, it is recommended that developers keep `skipPreflight` set to `false`. - -### When to Re-Sign Transactions - -Despite all attempts to rebroadcast, there may be times in which a client is -required to re-sign a transaction. Before re-signing any transaction, it is -**very important** to ensure that the initial transaction’s blockhash has -expired. If the initial blockhash is still valid, it is possible for both -transactions to be accepted by the network. To an end-user, this would appear as -if they unintentionally sent the same transaction twice. - -In Solana, a dropped transaction can be safely discarded once the blockhash it -references is older than the `lastValidBlockHeight` received from -`getLatestBlockhash`. Developers should keep track of this -`lastValidBlockHeight` by querying -[`getEpochInfo`](../api/http#getepochinfo) -and comparing with `blockHeight` in the response. Once a blockhash is -invalidated, clients may re-sign with a newly-queried blockhash. diff --git a/docs/src/introduction.md b/docs/src/introduction.md deleted file mode 100644 index 4a1e9c1a5e3045..00000000000000 --- a/docs/src/introduction.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Introduction ---- - -## What is Solana? - -Solana is an open source project implementing a new, high-performance, permissionless blockchain. The Solana Foundation is based in Geneva, Switzerland and maintains the open source project. - -## Why Solana? - -It is possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [\[H.T.Kung, J.T.Robinson (1981)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.65.4735). At Solana, we are demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes cannot rely upon one another. Once nodes can rely upon time, suddenly ~40 years of distributed systems research becomes applicable to blockchain! - -> Perhaps the most striking difference between algorithms obtained by our method and ones based upon timeout is that using timeout produces a traditional distributed algorithm in which the processes operate asynchronously, while our method produces a globally synchronous one in which every process does the same thing at (approximately) the same time. Our method seems to contradict the whole purpose of distributed processing, which is to permit different processes to operate independently and perform different functions. However, if a distributed system is really a single system, then the processes must be synchronized in some way. Conceptually, the easiest way to synchronize processes is to get them all to do the same thing at the same time. Therefore, our method is used to implement a kernel that performs the necessary synchronization--for example, making sure that two different processes do not try to modify a file at the same time. Processes might spend only a small fraction of their time executing the synchronizing kernel; the rest of the time, they can operate independently--e.g., accessing different files. This is an approach we have advocated even when fault-tolerance is not required. The method's basic simplicity makes it easier to understand the precise properties of a system, which is crucial if one is to know just how fault-tolerant the system is. [\[L.Lamport (1984)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.71.1078) - -Furthermore, and much to our surprise, it can be implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you would use block height instead of a timestamp if you don't rely upon the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well en route towards that theoretical limit of 710,000 transactions per second. - -## Documentation Overview - -The Solana docs describe the Solana open source project, a blockchain built from the ground up for scale. They cover why Solana is useful, how to use it, how it works, and why it will continue to work long after the company Solana closes its doors. The goal of the Solana architecture is to demonstrate there exists a set of software algorithms that when used in combination to implement a blockchain, removes software as a performance bottleneck, allowing transaction throughput to scale proportionally with network bandwidth. The architecture goes on to satisfy all three desirable properties of a proper blockchain: it is scalable, secure and decentralized. - -The architecture describes a theoretical upper bound of 710 thousand transactions per second \(tps\) on a standard gigabit network and 28.4 million tps on 40 gigabit. Furthermore, the architecture supports safe, concurrent execution of programs authored in general-purpose programming languages such as C or Rust. - -## What is a Solana Cluster? - -A cluster is a set of computers that work together and can be viewed from the outside as a single system. A Solana cluster is a set of independently owned computers working together \(and sometimes against each other\) to verify the output of untrusted, user-submitted programs. A Solana cluster can be utilized any time a user wants to preserve an immutable record of events in time or programmatic interpretations of those events. One use is to track which of the computers did meaningful work to keep the cluster running. Another use might be to track the possession of real-world assets. In each case, the cluster produces a record of events called the ledger. It will be preserved for the lifetime of the cluster. As long as someone somewhere in the world maintains a copy of the ledger, the output of its programs \(which may contain a record of who possesses what\) will forever be reproducible, independent of the organization that launched it. - -## What are SOLs? - -A SOL is the name of Solana's native token, which can be passed to nodes in a Solana cluster in exchange for running an on-chain program or validating its output. The system may perform micropayments of fractional SOLs, which are called _lamports_. They are named in honor of Solana's biggest technical influence, [Leslie Lamport](https://en.wikipedia.org/wiki/Leslie_Lamport). A lamport has a value of 0.000000001 SOL. - -## Disclaimer - -All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore, nothing in this project constitutes a solicitation for investment. diff --git a/docs/src/learn/state-compression.md b/docs/src/learn/state-compression.md deleted file mode 100644 index 993544944b3d06..00000000000000 --- a/docs/src/learn/state-compression.md +++ /dev/null @@ -1,334 +0,0 @@ ---- -title: State Compression -description: - 'State Compression is the method of cheaply and securely storing - "fingerprints" of off-chain data in the Solana leger, instead of expensive - accounts.' ---- - -On Solana, [State Compression](./state-compression.md) is the method of creating -a "fingerprint" (or hash) of off-chain data and storing this fingerprint -on-chain for secure verification. Effectively using the security of the Solana -ledger to securely validate off-chain data, verifying it has not been tampered -with. - -This method of "compression" allows Solana programs and dApps to use cheap -blockchain [ledger](./../terminology.md#ledger) space, instead of the more -expensive [account](./../terminology.md#account) space, to securely store data. - -This is accomplished by using a special binary tree structure, known as a -[concurrent merkle tree](#what-is-a-concurrent-merkle-tree), to create a hash of -each piece of data (called a `leaf`), hashing those together, and only storing -this final hash on-chain. - -## What is State Compression? - -In simple terms, state compression uses "**_tree_**" structures to -cryptographically hash off-chain data together, in a deterministic way, to -compute a single final hash that gets stored on-chain. - -These _trees_ are created in this "_deterministic_" process by: - -- taking any piece of data -- creating a hash of this data -- storing this hash as a `leaf` the bottom of the tree -- each `leaf` pair is then hash together, creating a `branch` -- each `branch` is then hash together -- continually climbing the tree and hashing adjacent branches together -- once at the top of the tree, a final `root hash` is produced - -This `root hash` is then stored on chain, as a verifiable **_proof_** of all of -the data within every leaf. Allowing anyone to cryptographically verify all the -off-chain data within the tree, while only actually storing a **minimal** amount -of data on-chain. Therefore, significantly reducing the cost to store/prove -large amounts of data due to this "state compression". - -## Merkle trees and concurrent merkle trees - -Solana's state compression used a special type of -[merkle tree](#what-is-a-merkle-tree) that allows for multiple changes to any -given tree to happen, while still maintaining the integrity and validity of the -tree. - -This special tree, known as a -"[concurrent merkle tree](#what-is-a-concurrent-merkle-tree)", effectively -retains a "changelog" of the tree on-chain. Allowing for multiple rapid changes -to the same tree (i.e. all in the same block), before a proof is invalidated. - -### What is a merkle tree? - -A [merkle tree](https://en.wikipedia.org/wiki/merkle_tree), sometimes called a -"hash tree", is a hash based binary tree structure where each `leaf` node is -represented as a cryptographic hash of its inner data. And every node that is -**not** a leaf, called a `branch`, is represented as a hash of its child leaf -hashes. - -Each branch is then also hashed together, climbing the tree, until eventually -only a single hash remains. This final hash, called the `root hash` or "root", -can then be used in combination with a "proof path" to verify any piece of data -stored within a leaf node. - -Once a final `root hash` has been computed, any piece of data stored within a -`leaf` node can be verified by rehashing the specific leaf's data and the hash -label of each adjacent branch climbing the tree (known as the `proof` or "proof -path"). Comparing this "rehash" to the `root hash` is the verification of the -underlying leaf data. If they match, the data is verified accurate. If they do -not match, the leaf data was changed. - -Whenever desired, the original leaf data can be changed by simply hashing the -**new leaf** data and recomputing the root hash in the same manner of the -original root. This **new root hash** is then used to verify any of the data, -and effectively invalidates the previous root hash and previous proof. -Therefore, each change to these _traditional merkle trees_ are required to be -performed in series. - -:::info - -This process of changing leaf data, and computing a new root hash can be a -**very common** thing when using merkle trees! While it is one of the design -points of the tree, it can result in one of the most notable drawbacks: rapid -changes. - -::: - -### What is a Concurrent merkle tree? - -In high throughput applications, like within the -[Solana runtime](/src/validator/runtime.md), requests to change an on-chain -_traditional merkle tree_ could be received by validators in relatively rapid -succession (e.g. within the same slot). Each leaf data change would still be -required to performed in series. Resulting in each subsequent request for change -to fail, due to the root hash and proof being invalidated by the previous change -request in the slot. - -Enter, Concurrent merkle trees. - -A **Concurrent merkle tree** stores a **secure changelog** of the most recent -changes, their root hash, and the proof to derive it. This changelog "buffer" is -stored on-chain in an account specific to each tree, with a maximum number of -changelog "records" (aka `maxBufferSize`). - -When multiple leaf data change requests are received by validators in the same -slot, the on-chain _concurrent merkle tree_ can use this "changelog buffer" as a -source of truth for more acceptable proofs. Effectively allowing for up to -`maxBufferSize` changes to the same tree in the same slot. Significantly -boosting throughput. - -## Sizing a concurrent merkle tree - -When creating one of these on-chain trees, there are 3 values that will -determine the size of your tree, the cost to create your tree, and the number of -concurrent changes to your tree: - -1. max depth -2. max buffer size -3. canopy depth - -### Max depth - -The "max depth" of a tree is the **maximum number** of hops to get from any data -`leaf` to the `root` of the tree. - -Since merkle trees are binary trees, every leaf is connected to **only one** -other leaf; existing as a `leaf pair`. - -Therefore, the `maxDepth` of a tree is used to determine the maximum number of -nodes (aka pieces of data or `leafs`) to store within the tree using a simple -calculation: - -``` -nodes_count = 2 ^ maxDepth -``` - -Since a trees depth must be set at tree creation, you must decide how many -pieces of data you want your tree to store. Then using the simple calculation -above, you can determine the lowest `maxDepth` to store your data. - -#### Example 1: minting 100 nfts - -If you wanted to create a tree to store 100 compressed nfts, we will need a -minimum of "100 leafs" or "100 nodes". - -``` -// maxDepth=6 -> 64 nodes -2^6 = 64 - -// maxDepth=7 -> 128 nodes -2^7 = 128 -``` - -We must use a `maxDepth` of `7` to ensure we can store all of our data. - -#### Example 2: minting 15000 nfts - -If you wanted to create a tree to store 15000 compressed nfts, we will need a -minimum of "15000 leafs" or "15000 nodes". - -``` -// maxDepth=13 -> 8192 nodes -2^13 = 8192 - -// maxDepth=14 -> 16384 nodes -2^14 = 16384 -``` - -We must use a `maxDepth` of `14` to ensure we can store all of our data. - -#### The higher the max depth, the higher the cost - -The `maxDepth` value will be one of the primary drivers of cost when creating a -tree since you will pay this cost upfront at tree creation. The higher the max -tree depth depth, the more data fingerprints (aka hashes) you can store, the -higher the cost. - -### Max buffer size - -The "max buffer size" is effectively the maximum number of changes that can -occur on a tree, with the `root hash` still being valid. - -Due to the root hash effectively being a single hash of all leaf data, changing -any single leaf would invalidate the proof needed for all subsequent attempts to -change any leaf of a regular tree. - -But with a [concurrent tree](#what-is-a-concurrent-merkle-tree), there is -effectively a changelog of updates for these proofs. This changelog buffer is -sized and set at tree creation via this `maxBufferSize` value. - -### Canopy depth - -The "canopy depth", sometimes called the canopy size, is the number of proof -nodes that are cached/stored on-chain for any given proof path. - -When performing an update action on a `leaf`, like transferring ownership (e.g. -selling a compressed NFT), the **complete** proof path must be used to verify -original ownership of the leaf and therefore allow for the update action. This -verification is performed using the **complete** proof path to correctly compute -the current `root hash` (or any cached `root hash` via the on-chain "concurrent -buffer"). - -The larger a tree's max depth is, the more proof nodes are required to perform -this verification. For example, if your max depth is `14`, there are `14` total -proof nodes required to be used to verify. As a tree gets larger, the complete -proof path gets larger. - -Normally, each of these proof nodes would be required to be included within each -tree update transaction. Since each proof node value takes up `32 bytes` in a -transaction (similar to providing a Public Key), larger trees would very quickly -exceed the maximum transaction size limit. - -Enter the canopy. The canopy enables storing a set number of proof nodes on -chain (for any given proof path). Allowing for less proof nodes to be included -within each update transactions, therefore keeping the overall transaction size -below the limit. - -For example, a tree with a max depth of `14` would require `14` total proof -nodes. With a canopy of `10`, only `4` proof nodes are required to be submitted -per update transaction. - -#### The larger the canopy depth value, the higher the cost - -The `canopyDepth` value is also a primary factor of cost when creating a tree -since you will pay this cost upfront at tree creation. The higher the canopy -depth, the more data proof nodes are stored on chain, the higher the cost. - -#### Smaller canopy limits composability - -While a tree's creation costs are higher with a higher canopy, having a lower -`canopyDepth` will require more proof nodes to be included within each update -transaction. The more nodes required to be submitted, the larger the transaction -size, and therefore the easier it is to exceed the transaction size limits. - -This will also be the case for any other Solana program or dApp that attempts to -interact with your tree/leafs. If your tree requires too many proof nodes -(because of a low canopy depth), then any other additional actions another -on-chain program **could** offer will be **limited** by their specific -instruction size plus your proof node list size. Limiting composability, and -potential additional utility for your specific tree. - -For example, if your tree is being used for compressed NFTs and has a very low -canopy depth, an NFT marketplace may only be able to support simple NFTs -transfers. And not be able to support an on-chain bidding system. - -## Cost of creating a tree - -The cost of creating a concurrent merkle tree is based on the tree's size -parameters: `maxDepth`, `maxBufferSize`, and `canopyDepth`. These values are all -used to calculate the on-chain storage (in bytes) required for a tree to exist -on chain. - -Once the required space (in bytes) has been calculated, and using the -[`getMinimumBalanceForRentExemption`](/api/http#getminimumbalanceforrentexemption) -RPC method, request the cost (in lamports) to allocate this amount of bytes -on-chain. - -### Calculate tree cost in JavaScript - -Within the -[`@solana/spl-account-compression`](https://www.npmjs.com/package/@solana/spl-account-compression) -package, developers can use the -[`getConcurrentMerkleTreeAccountSize`](https://solana-labs.github.io/solana-program-library/account-compression/sdk/docs/modules/index.html#getConcurrentMerkleTreeAccountSize) -function to calculate the required space for a given tree size parameters. - -Then using the -[`getMinimumBalanceForRentExemption`](https://solana-labs.github.io/solana-web3.js/classes/Connection.html#getMinimumBalanceForRentExemption) -function to get the final cost (in lamports) to allocate the required space for -the tree on-chain. - -Then determine the cost in lamports to make an account of this size rent exempt, -similar to any other account creation. - -```ts -// calculate the space required for the tree -const requiredSpace = getConcurrentMerkleTreeAccountSize( - maxDepth, - maxBufferSize, - canopyDepth, -); - -// get the cost (in lamports) to store the tree on-chain -const storageCost = await connection.getMinimumBalanceForRentExemption( - requiredSpace, -); -``` - -### Example costs - -Listed below are several example costs, for different tree sizes, including how -many leaf nodes are possible for each: - -**Example #1: 16,384 nodes costing 0.222 SOL** - -- max depth of `14` and max buffer size of `64` -- maximum number of leaf nodes: `16,384` -- canopy depth of `0` costs approximately `0.222 SOL` to create - -**Example #2: 16,384 nodes costing 1.134 SOL** - -- max depth of `14` and max buffer size of `64` -- maximum number of leaf nodes: `16,384` -- canopy depth of `11` costs approximately `1.134 SOL` to create - -**Example #3: 1,048,576 nodes costing 1.673 SOL** - -- max depth of `20` and max buffer size of `256` -- maximum number of leaf nodes: `1,048,576` -- canopy depth of `10` costs approximately `1.673 SOL` to create - -**Example #4: 1,048,576 nodes costing 15.814 SOL** - -- max depth of `20` and max buffer size of `256` -- maximum number of leaf nodes: `1,048,576` -- canopy depth of `15` costs approximately `15.814 SOL` to create - -## Compressed NFTs - -Compressed NFTs are one of the most popular use cases for State Compression on -Solana. With compression, a one million NFT collection could be minted for -`~50 SOL`, vice `~12,000 SOL` for its uncompressed equivalent collection. - -:::info Developer Guide - -Read our developer guide for -[minting and transferring compressed NFTs](./../developing/guides/compressed-nfts). - -::: diff --git a/docs/src/operations/_category_.json b/docs/src/operations/_category_.json new file mode 100644 index 00000000000000..a32cdd91fe0f18 --- /dev/null +++ b/docs/src/operations/_category_.json @@ -0,0 +1,10 @@ +{ + "position": 4, + "label": "Operating a Validator", + "collapsible": true, + "collapsed": true, + "link": { + "type": "doc", + "id": "operations/index" + } +} diff --git a/docs/src/operations/best-practices/_category_.json b/docs/src/operations/best-practices/_category_.json new file mode 100644 index 00000000000000..caf50117dc33df --- /dev/null +++ b/docs/src/operations/best-practices/_category_.json @@ -0,0 +1,7 @@ +{ + "position": 7, + "label": "Best Practices", + "collapsible": true, + "collapsed": true, + "link": null +} diff --git a/docs/src/operations/best-practices/general.md b/docs/src/operations/best-practices/general.md new file mode 100644 index 00000000000000..3bd0f906f729a7 --- /dev/null +++ b/docs/src/operations/best-practices/general.md @@ -0,0 +1,236 @@ +--- +title: Validator Operations Best Practices +sidebar_label: General Operations +--- + +After you have successfully setup and started a +[validator on testnet](../setup-a-validator.md) (or another cluster +of your choice), you will want to become familiar with how to operate your +validator on a day-to-day basis. During daily operations, you will be +[monitoring your server](./monitoring.md), updating software regularly (both the +Solana validator software and operating system packages), and managing your vote +account and identity account. + +All of these skills are critical to practice. Maximizing your validator uptime +is an important part of being a good operator. + +## Educational Workshops + +The Solana validator community holds regular educational workshops. You can +watch past workshops through the +[Solana validator educational workshops playlist](https://www.youtube.com/watch?v=86zySQ5vGW8&list=PLilwLeBwGuK6jKrmn7KOkxRxS9tvbRa5p). + +## Help with the validator command line + +From within the Solana CLI, you can execute the `solana-validator` command with +the `--help` flag to get a better understanding of the flags and sub commands +available. + +``` +solana-validator --help +``` + +## Restarting your validator + +There are many operational reasons you may want to restart your validator. As a +best practice, you should avoid a restart during a leader slot. A +[leader slot](https://solana.com/docs/terminology#leader-schedule) is the time +when your validator is expected to produce blocks. For the health of the cluster +and also for your validator's ability to earn transaction fee rewards, you do +not want your validator to be offline during an opportunity to produce blocks. + +To see the full leader schedule for an epoch, use the following command: + +``` +solana leader-schedule +``` + +Based on the current slot and the leader schedule, you can calculate open time +windows where your validator is not expected to produce blocks. + +Assuming you are ready to restart, you may use the `solana-validator exit` +command. The command exits your validator process when an appropriate idle time +window is reached. Assuming that you have systemd implemented for your validator +process, the validator should restart automatically after the exit. See the +below help command for details: + +``` +solana-validator exit --help +``` + +## Upgrading + +There are many ways to upgrade the +[Solana CLI software](../../cli/install.md). As an operator, you +will need to upgrade often, so it is important to get comfortable with this +process. + +> **Note** validator nodes do not need to be offline while the newest version is +> being downloaded or built from source. All methods below can be done before +> the validator process is restarted. + +### Building From Source + +It is a best practice to always build your Solana binaries from source. If you +build from source, you are certain that the code you are building has not been +tampered with before the binary was created. You may also be able to optimize +your `solana-validator` binary to your specific hardware. + +If you build from source on the validator machine (or a machine with the same +CPU), you can target your specific architecture using the `-march` flag. Refer +to the following doc for +[instructions on building from source](../../cli/install.md#build-from-source). + +### solana-install + +If you are not comfortable building from source, or you need to quickly install +a new version to test something out, you could instead try using the +`solana-install` command. + +Assuming you want to install Solana version `1.14.17`, you would execute the +following: + +``` +solana-install init 1.14.17 +``` + +This command downloads the executable for `1.14.17` and installs it into a +`.local` directory. You can also look at `solana-install --help` for more +options. + +> **Note** this command only works if you already have the solana cli installed. +> If you do not have the cli installed, refer to +> [install solana cli tools](../../cli/install.md) + +### Restart + +For all install methods, the validator process will need to be restarted before +the newly installed version is in use. Use `solana-validator exit` to restart +your validator process. + +### Verifying version + +The best way to verify that your validator process has changed to the desired +version is to grep the logs after a restart. The following grep command should +show you the version that your validator restarted with: + +``` +grep -B1 'Starting validator with' +``` + +## Snapshots + +Validators operators who have not experienced significant downtime (multiple +hours of downtime), should avoid downloading snapshots. It is important for the +health of the cluster as well as your validator history to maintain the local +ledger. Therefore, you should not download a new snapshot any time your +validator is offline or experiences an issue. Downloading a snapshot should only +be reserved for occasions when you do not have local state. Prolonged downtime +or the first install of a new validator are examples of times when you may not +have state locally. In other cases such as restarts for upgrades, a snapshot +download should be avoided. + +To avoid downloading a snapshot on restart, add the following flag to the +`solana-validator` command: + +``` +--no-snapshot-fetch +``` + +If you use this flag with the `solana-validator` command, make sure that you run +`solana catchup ` after your validator starts to make sure that the +validator is catching up in a reasonable time. After some time (potentially a +few hours), if it appears that your validator continues to fall behind, then you +may have to download a new snapshot. + +### Downloading Snapshots + +If you are starting a validator for the first time, or your validator has fallen +too far behind after a restart, then you may have to download a snapshot. + +To download a snapshot, you must **_NOT_** use the `--no-snapshot-fetch` flag. +Without the flag, your validator will automatically download a snapshot from +your known validators that you specified with the `--known-validator` flag. + +If one of the known validators is downloading slowly, you can try adding the +`--minimal-snapshot-download-speed` flag to your validator. This flag will +switch to another known validator if the initial download speed is below the +threshold that you set. + +### Manually Downloading Snapshots + +In the case that there are network troubles with one or more of your known +validators, then you may have to manually download the snapshot. To manually +download a snapshot from one of your known validators, first, find the IP +address of the validator in using the `solana gossip` command. In the example +below, `5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on` is the pubkey of one of my +known validators: + +``` +solana gossip | grep 5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on +``` + +The IP address of the validators is `139.178.68.207` and the open port on this +validator is `80`. You can see the IP address and port in the fifth column in +the gossip output: + +``` +139.178.68.207 | 5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on | 8001 | 8004 | 139.178.68.207:80 | 1.10.27 | 1425680972 +``` + +Now that the IP and port are known, you can download a full snapshot or an +incremental snapshot: + +``` +wget --trust-server-names http://139.178.68.207:80/snapshot.tar.bz2 +wget --trust-server-names http://139.178.68.207:80/incremental-snapshot.tar.bz2 +``` + +Now move those files into your snapshot directory. If you have not specified a +snapshot directory, then you should put the files in your ledger directory. + +Once you have a local snapshot, you can restart your validator with the +`--no-snapshot-fetch` flag. + +## Regularly Check Account Balances + +It is important that you do not accidentally run out of funds in your identity +account, as your node will stop voting. It is also important to note that this +account keypair is the most vulnerable of the three keypairs in a vote account +because the keypair for the identity account is stored on your validator when +running the `solana-validator` software. How much SOL you should store there is +up to you. As a best practice, make sure to check the account regularly and +refill or deduct from it as needed. To check the account balance do: + +``` +solana balance validator-keypair.json +``` + +> **Note** `solana-watchtower` can monitor for a minimum validator identity +> balance. See [monitoring best practices](./monitoring.md) for details. + +## Withdrawing From The Vote Account + +As a reminder, your withdrawer's keypair should **_NEVER_** be stored on your +server. It should be stored on a hardware wallet, paper wallet, or multisig +mitigates the risk of hacking and theft of funds. + +To withdraw your funds from your vote account, you will need to run +`solana withdraw-from-vote-account` on a trusted computer. For example, on a +trusted computer, you could withdraw all of the funds from your vote account +(excluding the rent exempt minimum). The below example assumes you have a +separate keypair to store your funds called `person-keypair.json` + +``` +solana withdraw-from-vote-account \ + vote-account-keypair.json \ + person-keypair.json ALL \ + --authorized-withdrawer authorized-withdrawer-keypair.json +``` + +To get more information on the command, use +`solana withdraw-from-vote-account --help`. + +For a more detailed explanation of the different keypairs and other related +operations refer to +[vote account management](../guides/vote-accounts.md). diff --git a/docs/src/validator/best-practices/monitoring.md b/docs/src/operations/best-practices/monitoring.md similarity index 100% rename from docs/src/validator/best-practices/monitoring.md rename to docs/src/operations/best-practices/monitoring.md diff --git a/docs/src/validator/best-practices/security.md b/docs/src/operations/best-practices/security.md similarity index 94% rename from docs/src/validator/best-practices/security.md rename to docs/src/operations/best-practices/security.md index 1e533dc860ce32..d53491c115ae9f 100644 --- a/docs/src/validator/best-practices/security.md +++ b/docs/src/operations/best-practices/security.md @@ -26,7 +26,7 @@ To reiterate, the withdrawer keypair should never be stored on your validator at It may be easier to get started by running your application as root, but it is a bad practice. -If there is an exploit in your system, a hacker could have full access if your Solana application is running as the `root` user. Instead, see the [setup instructions](../get-started/setup-a-validator.md#sol-user) for creating a user called `sol` and running the application as the `sol` user. +If there is an exploit in your system, a hacker could have full access if your Solana application is running as the `root` user. Instead, see the [setup instructions](../setup-a-validator.md#sol-user) for creating a user called `sol` and running the application as the `sol` user. ## Close Ports That Are Not In Use diff --git a/docs/src/operations/guides/_category_.json b/docs/src/operations/guides/_category_.json new file mode 100644 index 00000000000000..133867d408828e --- /dev/null +++ b/docs/src/operations/guides/_category_.json @@ -0,0 +1,7 @@ +{ + "position": 8, + "label": "Validator Guides", + "collapsible": true, + "collapsed": true, + "link": null +} diff --git a/docs/src/running-validator/restart-cluster.md b/docs/src/operations/guides/restart-cluster.md similarity index 100% rename from docs/src/running-validator/restart-cluster.md rename to docs/src/operations/guides/restart-cluster.md diff --git a/docs/src/running-validator/validator-failover.md b/docs/src/operations/guides/validator-failover.md similarity index 100% rename from docs/src/running-validator/validator-failover.md rename to docs/src/operations/guides/validator-failover.md diff --git a/docs/src/running-validator/validator-info.md b/docs/src/operations/guides/validator-info.md similarity index 100% rename from docs/src/running-validator/validator-info.md rename to docs/src/operations/guides/validator-info.md diff --git a/docs/src/running-validator/validator-monitor.md b/docs/src/operations/guides/validator-monitor.md similarity index 100% rename from docs/src/running-validator/validator-monitor.md rename to docs/src/operations/guides/validator-monitor.md diff --git a/docs/src/running-validator/validator-stake.md b/docs/src/operations/guides/validator-stake.md similarity index 66% rename from docs/src/running-validator/validator-stake.md rename to docs/src/operations/guides/validator-stake.md index 71c9cd213016e8..85da5c3380316a 100644 --- a/docs/src/running-validator/validator-stake.md +++ b/docs/src/operations/guides/validator-stake.md @@ -46,7 +46,8 @@ and then delegating that stake to your validator: solana delegate-stake ~/validator-stake-keypair.json ~/vote-account-keypair.json ``` -> Don’t delegate your remaining SOL, as your validator will use those tokens to vote. +> Don’t delegate your remaining SOL, as your validator will use those tokens to +> vote. Stakes can be re-delegated to another node at any time with the same command, but only one re-delegation is permitted per epoch: @@ -57,23 +58,30 @@ solana delegate-stake ~/validator-stake-keypair.json ~/some-other-vote-account-k ## Validator Stake Warm-up -To combat various attacks on consensus, new stake delegations are subject to -a [warm-up](/staking/stake-accounts#delegation-warmup-and-cooldown) -period. +To combat various attacks on consensus, new stake delegations are subject to a +[warm-up](https://solana.com/docs/economics/staking/stake-accounts#delegation-warmup-and-cooldown) period. Monitor a validator's stake during warmup by: -- View your vote account:`solana vote-account ~/vote-account-keypair.json` This displays the current state of all the votes the validator has submitted to the network. -- View your stake account, the delegation preference and details of your stake:`solana stake-account ~/validator-stake-keypair.json` -- `solana validators` displays the current active stake of all validators, including yours -- `solana stake-history` shows the history of stake warming up and cooling down over recent epochs -- Look for log messages on your validator indicating your next leader slot: `[2019-09-27T20:16:00.319721164Z INFO solana_core::replay_stage] voted and reset PoH at tick height ####. My next leader slot is ####` -- Once your stake is warmed up, you will see a stake balance listed for your validator by running `solana validators` +- View your vote account:`solana vote-account ~/vote-account-keypair.json` This + displays the current state of all the votes the validator has submitted to the + network. +- View your stake account, the delegation preference and details of your + stake:`solana stake-account ~/validator-stake-keypair.json` +- `solana validators` displays the current active stake of all validators, + including yours +- `solana stake-history` shows the history of stake warming up and cooling down + over recent epochs +- Look for log messages on your validator indicating your next leader slot: + `[2019-09-27T20:16:00.319721164Z INFO solana_core::replay_stage] voted and reset PoH at tick height ####. My next leader slot is ####` +- Once your stake is warmed up, you will see a stake balance listed for your + validator by running `solana validators` ## Validator Rewards Once your stake is warmed up, and assuming the node is voting, you will now be -generating validator rewards. Rewards are paid automatically on epoch boundaries. +generating validator rewards. Rewards are paid automatically on epoch +boundaries. The rewards lamports earned are split between your stake account and the vote account according to the commission rate set in the vote account. Rewards can @@ -88,17 +96,24 @@ account. This is a normal transaction so the standard transaction fee will apply. The transaction fee range is defined by the genesis block. The actual fee will fluctuate based on transaction load. You can determine the current fee via the -[RPC API “getRecentBlockhash”](../api/http#getrecentblockhash) -before submitting a transaction. +[RPC API “getRecentBlockhash”](https://solana.com/docs/rpc/deprecated/getrecentblockhash) before submitting +a transaction. -Learn more about [transaction fees here](../implemented-proposals/transaction-fees.md). +Learn more about +[transaction fees here](../../implemented-proposals/transaction-fees.md). ## Monitor Your Staked Validator -Confirm your validator becomes a [leader](../terminology.md#leader) +Confirm your validator becomes a +[leader](https://solana.com/docs/terminology#leader) -- After your validator is caught up, use the `solana balance` command to monitor the earnings as your validator is selected as leader and collects transaction fees -- Solana nodes offer a number of useful JSON-RPC methods to return information about the network and your validator's participation. Make a request by using curl \(or another http client of your choosing\), specifying the desired method in JSON-RPC-formatted data. For example: +- After your validator is caught up, use the `solana balance` command to monitor + the earnings as your validator is selected as leader and collects transaction + fees +- Solana nodes offer a number of useful JSON-RPC methods to return information + about the network and your validator's participation. Make a request by using + curl \(or another http client of your choosing\), specifying the desired + method in JSON-RPC-formatted data. For example: ```bash // Request @@ -110,9 +125,21 @@ Confirm your validator becomes a [leader](../terminology.md#leader) Helpful JSON-RPC methods: -- `getEpochInfo`[An epoch](../terminology.md#epoch) is the time, i.e. number of [slots](../terminology.md#slot), for which a [leader schedule](../terminology.md#leader-schedule) is valid. This will tell you what the current epoch is and how far into it the cluster is. -- `getVoteAccounts` This will tell you how much active stake your validator currently has. A % of the validator's stake is activated on an epoch boundary. You can learn more about staking on Solana [here](../cluster/stake-delegation-and-rewards.md). -- `getLeaderSchedule` At any given moment, the network expects only one validator to produce ledger entries. The [validator currently selected to produce ledger entries](../cluster/leader-rotation.md#leader-rotation) is called the “leader”. This will return the complete leader schedule \(on a slot-by-slot basis\) for currently activated stake, the identity pubkey will show up 1 or more times here. +- `getEpochInfo`[An epoch](https://solana.com/docs/terminology#epoch) is the + time, i.e. number of [slots](https://solana.com/docs/terminology#slot), for + which a [leader schedule](https://solana.com/docs/terminology#leader-schedule) + is valid. This will tell you what the current epoch is and how far into it the + cluster is. +- `getVoteAccounts` This will tell you how much active stake your validator + currently has. A % of the validator's stake is activated on an epoch boundary. + You can learn more about staking on Solana + [here](../../consensus/stake-delegation-and-rewards.md). +- `getLeaderSchedule` At any given moment, the network expects only one + validator to produce ledger entries. The + [validator currently selected to produce ledger entries](../../consensus/leader-rotation.md#leader-rotation) + is called the “leader”. This will return the complete leader schedule \(on a + slot-by-slot basis\) for currently activated stake, the identity pubkey will + show up 1 or more times here. ## Deactivating Stake @@ -124,8 +151,8 @@ solana deactivate-stake ~/validator-stake-keypair.json ``` Stake is not deactivated immediately and instead cools down in a similar fashion -as stake warm up. Your validator should remain attached to the cluster while -the stake is cooling down. While cooling down, your stake will continue to earn +as stake warm up. Your validator should remain attached to the cluster while the +stake is cooling down. While cooling down, your stake will continue to earn rewards. Only after stake cooldown is it safe to turn off your validator or withdraw it from the network. Cooldown may take several epochs to complete, depending on active stake and the size of your stake. diff --git a/docs/src/running-validator/validator-start.md b/docs/src/operations/guides/validator-start.md similarity index 77% rename from docs/src/running-validator/validator-start.md rename to docs/src/operations/guides/validator-start.md index ccd012aa79997c..69cef1315c05b8 100644 --- a/docs/src/running-validator/validator-start.md +++ b/docs/src/operations/guides/validator-start.md @@ -12,7 +12,7 @@ solana config set --url http://api.devnet.solana.com ``` While this section demonstrates how to connect to the Devnet cluster, the steps -are similar for the other [Solana Clusters](../clusters.md). +are similar for the other [Solana Clusters](../../clusters/available.md). ## Confirm The Cluster Is Reachable @@ -97,7 +97,7 @@ EOF" #### System Clock -Large system clock drift can prevent a node from properly participating in Solana's [gossip protocol](../validator/gossip.md). Ensure that your system clock is accurate. To check the current system clock, use: +Large system clock drift can prevent a node from properly participating in Solana's [gossip protocol](../../validator/gossip.md). Ensure that your system clock is accurate. To check the current system clock, use: ```bash timedatectl @@ -138,7 +138,7 @@ solana-keygen pubkey ASK and then entering your seed phrase. -See [Paper Wallet Usage](../wallet-guide/paper-wallet.md) for more info. +See [Paper Wallet Usage](../../cli/wallets/paper.md) for more info. --- @@ -215,7 +215,7 @@ Or to see in finer detail: solana balance --lamports ``` -Read more about the [difference between SOL and lamports here](../introduction.md#what-are-sols). +Read more about the [difference between SOL and lamports here](https://solana.com/docs/intro#what-are-sols). ## Create Authorized Withdrawer Account @@ -253,13 +253,7 @@ solana create-vote-account ~/vote-account-keypair.json ~/validator-keypair.json Remember to move your authorized withdrawer keypair into a very secure location after running the above command. -Read more about [creating and managing a vote account](vote-accounts.md). - -## Stake your validator - -Until your validator is staked, it will be unable to vote, propose leader blocks, or collect rewards. - -Follow the instructions to [stake your validator](validator-stake.md) +Read more about [creating and managing a vote account](./vote-accounts.md). ## Known validators @@ -296,7 +290,7 @@ The ledger will be placed in the `ledger/` directory by default, use the `--ledger` argument to specify a different location. > Note: You can use a -> [paper wallet seed phrase](../wallet-guide/paper-wallet.md) +> [paper wallet seed phrase](../../cli/wallets/paper.md) > for your `--identity` and/or > `--authorized-voter` keypairs. To use these, pass the respective argument as > `solana-validator --identity ASK ... --authorized-voter ASK ...` @@ -321,7 +315,7 @@ the validator to ports 11000-11020. ### Limiting ledger size to conserve disk space The `--limit-ledger-size` parameter allows you to specify how many ledger -[shreds](../terminology.md#shred) your node retains on disk. If you do not +[shreds](https://solana.com/docs/terminology#shred) your node retains on disk. If you do not include this parameter, the validator will keep all received ledger data until it runs out of disk space. Otherwise, the validator will continually purge the oldest data once to stay under the specified `--limit-ledger-size` @@ -436,3 +430,52 @@ which starts the solana validator process uses "exec" to do so (example: "exec solana-validator ..."); otherwise, when logrotate sends its signal to the validator, the enclosing script will die and take the validator process with it. + +### Using a ramdisk with spill-over into swap for the accounts database to reduce SSD wear + +If your machine has plenty of RAM, a tmpfs ramdisk +([tmpfs](https://man7.org/linux/man-pages/man5/tmpfs.5.html)) may be used to hold +the accounts database + +When using tmpfs it's essential to also configure swap on your machine as well to +avoid running out of tmpfs space periodically. + +A 300GB tmpfs partition is recommended, with an accompanying 250GB swap +partition. + +Example configuration: + +1. `sudo mkdir /mnt/solana-accounts` +2. Add a 300GB tmpfs partition by adding a new line containing `tmpfs /mnt/solana-accounts tmpfs rw,size=300G,user=sol 0 0` to `/etc/fstab` + (assuming your validator is running under the user "sol"). **CAREFUL: If you + incorrectly edit /etc/fstab your machine may no longer boot** +3. Create at least 250GB of swap space + +- Choose a device to use in place of `SWAPDEV` for the remainder of these instructions. + Ideally select a free disk partition of 250GB or greater on a fast disk. If one is not + available, create a swap file with `sudo dd if=/dev/zero of=/swapfile bs=1MiB count=250KiB`, + set its permissions with `sudo chmod 0600 /swapfile` and use `/swapfile` as `SWAPDEV` for + the remainder of these instructions +- Format the device for usage as swap with `sudo mkswap SWAPDEV` + +4. Add the swap file to `/etc/fstab` with a new line containing `SWAPDEV swap swap defaults 0 0` +5. Enable swap with `sudo swapon -a` and mount the tmpfs with `sudo mount /mnt/solana-accounts/` +6. Confirm swap is active with `free -g` and the tmpfs is mounted with `mount` + +Now add the `--accounts /mnt/solana-accounts` argument to your `solana-validator` +command-line arguments and restart the validator. + +### Account indexing + +As the number of populated accounts on the cluster grows, account-data RPC +requests that scan the entire account set -- like +[`getProgramAccounts`](https://solana.com/docs/rpc/http/getprogramaccounts) and +[SPL-token-specific requests](https://solana.com/docs/rpc/http/gettokenaccountsbydelegate) -- +may perform poorly. If your validator needs to support any of these requests, +you can use the `--account-index` parameter to activate one or more in-memory +account indexes that significantly improve RPC performance by indexing accounts +by the key field. Currently supports the following parameter values: + +- `program-id`: each account indexed by its owning program; used by [getProgramAccounts](https://solana.com/docs/rpc/http/getprogramaccounts) +- `spl-token-mint`: each SPL token account indexed by its token Mint; used by [getTokenAccountsByDelegate](https://solana.com/docs/rpc/http/gettokenaccountsbydelegate), and [getTokenLargestAccounts](https://solana.com/docs/rpc/http/gettokenlargestaccounts) +- `spl-token-owner`: each SPL token account indexed by the token-owner address; used by [getTokenAccountsByOwner](https://solana.com/docs/rpc/http/gettokenaccountsbyowner), and [getProgramAccounts](https://solana.com/docs/rpc/http/getprogramaccounts) requests that include an spl-token-owner filter. diff --git a/docs/src/running-validator/validator-troubleshoot.md b/docs/src/operations/guides/validator-troubleshoot.md similarity index 95% rename from docs/src/running-validator/validator-troubleshoot.md rename to docs/src/operations/guides/validator-troubleshoot.md index a9defd80062b33..abf8d8f442c33a 100644 --- a/docs/src/running-validator/validator-troubleshoot.md +++ b/docs/src/operations/guides/validator-troubleshoot.md @@ -2,7 +2,7 @@ title: Troubleshooting --- -There is a **\#validator-support** Discord channel available to reach other +There is a `#validator-support` Discord channel available to reach other testnet participants, [https://solana.com/discord](https://solana.com/discord) ## Useful Links & Discussion diff --git a/docs/src/running-validator/vote-accounts.md b/docs/src/operations/guides/vote-accounts.md similarity index 53% rename from docs/src/running-validator/vote-accounts.md rename to docs/src/operations/guides/vote-accounts.md index 6bb021588d7698..c86b66cb85bba4 100644 --- a/docs/src/running-validator/vote-accounts.md +++ b/docs/src/operations/guides/vote-accounts.md @@ -8,8 +8,8 @@ account is needed if you plan to run a validator node on Solana. ## Create a Vote Account A vote account can be created with the -[create-vote-account](../cli/usage.md#solana-create-vote-account) command. -The vote account can be configured when first created or after the validator is +[create-vote-account](../../cli/usage.md#solana-create-vote-account) command. The +vote account can be configured when first created or after the validator is running. All aspects of the vote account can be changed except for the [vote account address](#vote-account-address), which is fixed for the lifetime of the account. @@ -17,114 +17,117 @@ of the account. ### Configure an Existing Vote Account - To change the [validator identity](#validator-identity), use - [vote-update-validator](../cli/usage.md#solana-vote-update-validator). + [vote-update-validator](../../cli/usage.md#solana-vote-update-validator). - To change the [vote authority](#vote-authority), use - [vote-authorize-voter-checked](../cli/usage.md#solana-vote-authorize-voter-checked). + [vote-authorize-voter-checked](../../cli/usage.md#solana-vote-authorize-voter-checked). - To change the [authorized withdrawer](#authorized-withdrawer), use - [vote-authorize-withdrawer-checked](../cli/usage.md#solana-vote-authorize-withdrawer-checked). + [vote-authorize-withdrawer-checked](../../cli/usage.md#solana-vote-authorize-withdrawer-checked). - To change the [commission](#commission), use - [vote-update-commission](../cli/usage.md#solana-vote-update-commission). + [vote-update-commission](../../cli/usage.md#solana-vote-update-commission). ## Vote Account Structure ### Vote Account Address A vote account is created at an address that is either the public key of a -keypair file, or at a derived address based on a keypair file's public key and -a seed string. +keypair file, or at a derived address based on a keypair file's public key and a +seed string. -The address of a vote account is never needed to sign any transactions, -but is just used to look up the account information. +The address of a vote account is never needed to sign any transactions, but is +just used to look up the account information. -When someone wants to [delegate tokens in a stake account](../staking.md), +When someone wants to +[delegate tokens in a stake account](https://solana.com/docs/economics/staking), the delegation command is pointed at the vote account address of the validator to whom the token-holder wants to delegate. ### Validator Identity The _validator identity_ is a system account that is used to pay for all the -vote transaction fees submitted to the vote account. -Because the validator is expected to vote on most valid blocks it receives, -the validator identity account is frequently -(potentially multiple times per second) signing transactions and -paying fees. For this reason the validator identity keypair must be -stored as a "hot wallet" in a keypair file on the same system the validator -process is running. +vote transaction fees submitted to the vote account. Because the validator is +expected to vote on most valid blocks it receives, the validator identity +account is frequently (potentially multiple times per second) signing +transactions and paying fees. For this reason the validator identity keypair +must be stored as a "hot wallet" in a keypair file on the same system the +validator process is running. Because a hot wallet is generally less secure than an offline or "cold" wallet, the validator operator may choose to store only enough SOL on the identity account to cover voting fees for a limited amount of time, such as a few weeks -or months. The validator identity account could be periodically topped off -from a more secure wallet. +or months. The validator identity account could be periodically topped off from +a more secure wallet. -This practice can reduce the risk of loss of funds if the validator node's -disk or file system becomes compromised or corrupted. +This practice can reduce the risk of loss of funds if the validator node's disk +or file system becomes compromised or corrupted. -The validator identity is required to be provided when a vote account is created. -The validator identity can also be changed after an account is created by using -the [vote-update-validator](../cli/usage.md#solana-vote-update-validator) command. +The validator identity is required to be provided when a vote account is +created. The validator identity can also be changed after an account is created +by using the +[vote-update-validator](../../cli/usage.md#solana-vote-update-validator) command. ### Vote Authority The _vote authority_ keypair is used to sign each vote transaction the validator node wants to submit to the cluster. This doesn't necessarily have to be unique -from the validator identity, as you will see later in this document. Because -the vote authority, like the validator identity, is signing transactions -frequently, this also must be a hot keypair on the same file system as the -validator process. - -The vote authority can be set to the same address as the validator identity. -If the validator identity is also the vote authority, only one -signature per vote transaction is needed in order to both sign the vote and pay -the transaction fee. Because transaction fees on Solana are assessed -per-signature, having one signer instead of two will result in half the transaction -fee paid compared to setting the vote authority and validator identity to two -different accounts. +from the validator identity, as you will see later in this document. Because the +vote authority, like the validator identity, is signing transactions frequently, +this also must be a hot keypair on the same file system as the validator +process. + +The vote authority can be set to the same address as the validator identity. If +the validator identity is also the vote authority, only one signature per vote +transaction is needed in order to both sign the vote and pay the transaction +fee. Because transaction fees on Solana are assessed per-signature, having one +signer instead of two will result in half the transaction fee paid compared to +setting the vote authority and validator identity to two different accounts. The vote authority can be set when the vote account is created. If it is not -provided, the default behavior is to assign it the same as the validator identity. -The vote authority can be changed later with the -[vote-authorize-voter-checked](../cli/usage.md#solana-vote-authorize-voter-checked) command. +provided, the default behavior is to assign it the same as the validator +identity. The vote authority can be changed later with the +[vote-authorize-voter-checked](../../cli/usage.md#solana-vote-authorize-voter-checked) +command. The vote authority can be changed at most once per epoch. If the authority is -changed with [vote-authorize-voter-checked](../cli/usage.md#solana-vote-authorize-voter-checked), -this will not take effect until the beginning of the next epoch. -To support a smooth transition of the vote signing, -`solana-validator` allows the `--authorized-voter` argument to be specified -multiple times. This allows the validator process to keep voting successfully -when the network reaches an epoch boundary at which the validator's vote -authority account changes. +changed with +[vote-authorize-voter-checked](../../cli/usage.md#solana-vote-authorize-voter-checked), +this will not take effect until the beginning of the next epoch. To support a +smooth transition of the vote signing, `solana-validator` allows the +`--authorized-voter` argument to be specified multiple times. This allows the +validator process to keep voting successfully when the network reaches an epoch +boundary at which the validator's vote authority account changes. ### Authorized Withdrawer -The _authorized withdrawer_ keypair is used to withdraw funds from a vote account -using the [withdraw-from-vote-account](../cli/usage.md#solana-withdraw-from-vote-account) +The _authorized withdrawer_ keypair is used to withdraw funds from a vote +account using the +[withdraw-from-vote-account](../../cli/usage.md#solana-withdraw-from-vote-account) command. Any network rewards a validator earns are deposited into the vote -account and are only retrievable by signing with the authorized withdrawer keypair. +account and are only retrievable by signing with the authorized withdrawer +keypair. -The authorized withdrawer is also required to sign any transaction to change -a vote account's [commission](#commission), and to change the validator -identity on a vote account. +The authorized withdrawer is also required to sign any transaction to change a +vote account's [commission](#commission), and to change the validator identity +on a vote account. Because theft of an authorized withdrawer keypair can give complete control over the operation of a validator to an attacker, it is advised to keep the withdraw -authority keypair in an offline/cold wallet in a secure location. The withdraw +authority keypair in an offline/cold wallet in a secure location. The withdraw authority keypair is not needed during operation of a validator and should not stored on the validator itself. -The authorized withdrawer must be set when the vote account is created. It must +The authorized withdrawer must be set when the vote account is created. It must not be set to a keypair that is the same as either the validator identity keypair or the vote authority keypair. The authorized withdrawer can be changed later with the -[vote-authorize-withdrawer-checked](../cli/usage.md#solana-vote-authorize-withdrawer-checked) +[vote-authorize-withdrawer-checked](../../cli/usage.md#solana-vote-authorize-withdrawer-checked) command. ### Commission _Commission_ is the percent of network rewards earned by a validator that are -deposited into the validator's vote account. The remainder of the rewards -are distributed to all of the stake accounts delegated to that vote account, +deposited into the validator's vote account. The remainder of the rewards are +distributed to all of the stake accounts delegated to that vote account, proportional to the active stake weight of each stake account. For example, if a vote account has a commission of 10%, for all rewards earned @@ -134,21 +137,22 @@ will be deposited into delegated stake accounts as immediately active stake. A validator may choose to set a low commission to try to attract more stake delegations as a lower commission results in a larger percentage of rewards -passed along to the delegator. As there are costs associated with setting up -and operating a validator node, a validator would ideally set a high enough +passed along to the delegator. As there are costs associated with setting up and +operating a validator node, a validator would ideally set a high enough commission to at least cover their expenses. Commission can be set upon vote account creation with the `--commission` option. -If it is not provided, it will default to 100%, which will result in all -rewards deposited in the vote account, and none passed on to any delegated -stake accounts. +If it is not provided, it will default to 100%, which will result in all rewards +deposited in the vote account, and none passed on to any delegated stake +accounts. Commission can also be changed later with the -[vote-update-commission](../cli/usage.md#solana-vote-update-commission) command. +[vote-update-commission](../../cli/usage.md#solana-vote-update-commission) command. -When setting the commission, only integer values in the set [0-100] are accepted. -The integer represents the number of percentage points for the commission, so -creating an account with `--commission 10` will set a 10% commission. +When setting the commission, only integer values in the set [0-100] are +accepted. The integer represents the number of percentage points for the +commission, so creating an account with `--commission 10` will set a 10% +commission. Note that validators can only update their commission during the first half of any epoch. This prevents validators from stealing delegator rewards by setting a @@ -161,23 +165,27 @@ Rotating the vote account authority keys requires special handling when dealing with a live validator. Note that vote account key rotation has no effect on the stake accounts that -have been delegate to the vote account. For example it is possible to use key +have been delegated to the vote account. For example it is possible to use key rotation to transfer all authority of a vote account from one entity to another without any impact to staking rewards. ### Vote Account Validator Identity -You will need access to the _authorized withdrawer_ keypair for the vote account to -change the validator identity. The following steps assume that +You will need access to the _authorized withdrawer_ keypair for the vote account +to change the validator identity. The following steps assume that `~/authorized_withdrawer.json` is that keypair. -1. Create the new validator identity keypair, `solana-keygen new -o ~/new-validator-keypair.json`. -2. Ensure that the new identity account has been funded, `solana transfer ~/new-validator-keypair.json 500`. -3. Run `solana vote-update-validator ~/vote-account-keypair.json ~/new-validator-keypair.json ~/authorized_withdrawer.json` +1. Create the new validator identity keypair, + `solana-keygen new -o ~/new-validator-keypair.json`. +2. Ensure that the new identity account has been funded, + `solana transfer ~/new-validator-keypair.json 500`. +3. Run + `solana vote-update-validator ~/vote-account-keypair.json ~/new-validator-keypair.json ~/authorized_withdrawer.json` to modify the validator identity in your vote account -4. Restart your validator with the new identity keypair for the `--identity` argument +4. Restart your validator with the new identity keypair for the `--identity` + argument -**Additional steps are required if your validator has stake.** The leader +**Additional steps are required if your validator has stake.** The leader schedule is computed two epochs in advance. Therefore if your old validator identity was in the leader schedule, it will remain in the leader schedule for up to two epochs after the validator identity change. If extra steps are not @@ -185,12 +193,16 @@ taken your validator will produce no blocks until your new validator identity is added to the leader schedule. After your validator is restarted with the new identity keypair, per step 4, -start a second non-voting validator on a different machine with the old identity keypair -without providing the `--vote-account` argument, as well as with the `--no-wait-for-vote-to-start-leader` argument. +start a second non-voting validator on a different machine with the old identity +keypair without providing the `--vote-account` argument, as well as with the +`--no-wait-for-vote-to-start-leader` argument. + +This temporary validator should be run for two full epochs. During this time it +will: -This temporary validator should be run for two full epochs. During this time it will: -* Produce blocks for the remaining slots that are assigned to your old validator identity -* Receive the transaction fees and rent rewards for your old validator identity +- Produce blocks for the remaining slots that are assigned to your old validator + identity +- Receive the transaction fees and rent rewards for your old validator identity It is safe to stop this temporary validator when your old validator identity is no longer listed in the `solana leader-schedule` output. @@ -204,44 +216,64 @@ migration. 1. Run `solana epoch-info`. If there is not much time remaining time in the current epoch, consider waiting for the next epoch to allow your validator plenty of time to restart and catch up. -2. Create the new vote authority keypair, `solana-keygen new -o ~/new-vote-authority.json`. -3. Determine the current _vote authority_ keypair by running `solana vote-account ~/vote-account-keypair.json`. It may be validator's +2. Create the new vote authority keypair, + `solana-keygen new -o ~/new-vote-authority.json`. +3. Determine the current _vote authority_ keypair by running + `solana vote-account ~/vote-account-keypair.json`. It may be validator's identity account (the default) or some other keypair. The following steps assume that `~/validator-keypair.json` is that keypair. -4. Run `solana vote-authorize-voter-checked ~/vote-account-keypair.json ~/validator-keypair.json ~/new-vote-authority.json`. - The new vote authority is scheduled to become active starting at the next epoch. +4. Run + `solana vote-authorize-voter-checked ~/vote-account-keypair.json ~/validator-keypair.json ~/new-vote-authority.json`. + The new vote authority is scheduled to become active starting at the next + epoch. 5. `solana-validator` now needs to be restarted with the old and new vote authority keypairs, so that it can smoothly transition at the next epoch. Add - the two arguments on restart: `--authorized-voter ~/validator-keypair.json --authorized-voter ~/new-vote-authority.json` + the two arguments on restart: + `--authorized-voter ~/validator-keypair.json --authorized-voter ~/new-vote-authority.json` 6. After the cluster reaches the next epoch, remove the `--authorized-voter ~/validator-keypair.json` argument and restart `solana-validator`, as the old vote authority keypair is no longer required. ### Vote Account Authorized Withdrawer -No special handling or timing considerations are required. -Use the `solana vote-authorize-withdrawer-checked` command as needed. +No special handling or timing considerations are required. Use the +`solana vote-authorize-withdrawer-checked` command as needed. ### Consider Durable Nonces for a Trustless Transfer of the Authorized Voter or Withdrawer If the Authorized Voter or Withdrawer is to be transferred to another entity -then a two-stage signing process using a [Durable Nonce](../offline-signing/durable-nonce) is recommended. +then a two-stage signing process using a +[Durable Nonce](../../cli/examples/durable-nonce.md) is recommended. 1. Entity B creates a durable nonce using `solana create-nonce-account` -2. Entity B then runs a `solana vote-authorize-voter-checked` or `solana vote-authorize-withdrawer-checked` command, including: - - the `--sign-only` argument - - the `--nonce`, `--nonce-authority`, and `--blockhash` arguments to specify the nonce particulars - - the address of the Entity A's existing authority, and the keypair for Entity B's new authority -3. When the `solana vote-authorize-...-checked` command successfully executes, it will output transaction signatures that Entity B must share with Entity A -4. Entity A then runs a similar `solana vote-authorize-voter-checked` or `solana vote-authorize-withdrawer-checked` command with the following changes: - - the `--sign-only` argument is removed, and replaced with a `--signer` argument for each of the signatures provided by Entity B - - the address of Entity A's existing authority is replaced with the corresponding keypair, and the keypair for Entity B's new authority is replaced with the corresponding address - -On success the authority is now changed without Entity A or B having to reveal keypairs to the other even though both entities signed the transaction. +2. Entity B then runs a `solana vote-authorize-voter-checked` or + `solana vote-authorize-withdrawer-checked` command, including: + +- the `--sign-only` argument +- the `--nonce`, `--nonce-authority`, and `--blockhash` arguments to specify the + nonce particulars +- the address of the Entity A's existing authority, and the keypair for Entity + B's new authority + +3. When the `solana vote-authorize-...-checked` command successfully executes, + it will output transaction signatures that Entity B must share with Entity A +4. Entity A then runs a similar `solana vote-authorize-voter-checked` or + `solana vote-authorize-withdrawer-checked` command with the following + changes: + +- the `--sign-only` argument is removed, and replaced with a `--signer` argument + for each of the signatures provided by Entity B +- the address of Entity A's existing authority is replaced with the + corresponding keypair, and the keypair for Entity B's new authority is + replaced with the corresponding address + +On success the authority is now changed without Entity A or B having to reveal +keypairs to the other even though both entities signed the transaction. ## Close a Vote Account A vote account can be closed with the -[close-vote-account](../cli/usage.md#solana-close-vote-account) command. -Closing a vote account withdraws all remaining SOL funds to a supplied recipient address and renders it invalid as a vote account. -It is not possible to close a vote account with active stake. +[close-vote-account](../../cli/usage.md#solana-close-vote-account) command. Closing +a vote account withdraws all remaining SOL funds to a supplied recipient address +and renders it invalid as a vote account. It is not possible to close a vote +account with active stake. diff --git a/docs/src/operations/index.md b/docs/src/operations/index.md new file mode 100644 index 00000000000000..51e29ac4078cab --- /dev/null +++ b/docs/src/operations/index.md @@ -0,0 +1,8 @@ +--- +title: Operating a Validator +sidebar_position: 0 +--- + +This section describes how to run a Solana validator node. + +There are several clusters available to connect to; see [Choosing a Cluster](../cli/examples/choose-a-cluster.md) for an overview of each. diff --git a/docs/src/validator/overview/validator-prerequisites.md b/docs/src/operations/prerequisites.md similarity index 68% rename from docs/src/validator/overview/validator-prerequisites.md rename to docs/src/operations/prerequisites.md index 2bf2af278aa4fa..c44c15fc205300 100644 --- a/docs/src/validator/overview/validator-prerequisites.md +++ b/docs/src/operations/prerequisites.md @@ -1,6 +1,7 @@ --- title: Solana Validator Prerequisites -sidebar_label: Validator Prerequisites +sidebar_label: Prerequisites +sidebar_position: 2 --- Operating a Solana validator is an interesting and rewarding task. Generally speaking, it requires someone with a technical background but also involves community engagement and marketing. @@ -10,7 +11,7 @@ Operating a Solana validator is an interesting and rewarding task. Generally spe Here is a list of some of the requirements for being a good operator: - Performant computer hardware and a fast internet connection - - You can find a list of [hardware requirements here](../../running-validator/validator-reqs.md) + - You can find a list of [hardware requirements here](./requirements.md) - Solana helps facilitate data-center server rentals through the [Solana server program](https://solana.foundation/server-program) - Knowledge of the Linux terminal - Ubuntu system administration @@ -26,10 +27,10 @@ Here is a list of some of the requirements for being a good operator: - Marketing and communications to attract delegators - Customer support -Whether you decide to run a [validator](./what-is-a-validator.md) or an [RPC node](./what-is-an-rpc-node.md), you should consider all of these areas of expertise. A team of people is likely necessary for you to achieve your goals. +Whether you decide to run a [validator](../what-is-a-validator.md) or an [RPC node](../what-is-an-rpc-node.md), you should consider all of these areas of expertise. A team of people is likely necessary for you to achieve your goals. ## Can I use my computer at home? -While anyone can join the network, you should make sure that your home computer and network meets the specifications in the [hardware requirements](../../running-validator/validator-reqs.md) doc. Most home internet service providers do not provide consistent service that would allow your validator to perform well. If your home network or personal hardware is not performant enough to keep up with the Solana cluster, your validator will not be able to participate in consensus. +While anyone can join the network, you should make sure that your home computer and network meets the specifications in the [hardware requirements](./requirements.md) doc. Most home internet service providers do not provide consistent service that would allow your validator to perform well. If your home network or personal hardware is not performant enough to keep up with the Solana cluster, your validator will not be able to participate in consensus. In addition to performance considerations, you will want to make sure that your home computer is resistant to outages caused by loss of power, flooding, fire, theft, etc. If you are just getting started and learning about being an operator, a home setup may be sufficient, but you will want to consider all of these factors when you start operating your validator on the mainnet-beta cluster. \ No newline at end of file diff --git a/docs/src/running-validator/validator-reqs.md b/docs/src/operations/requirements.md similarity index 96% rename from docs/src/running-validator/validator-reqs.md rename to docs/src/operations/requirements.md index c3e9f937c9a35b..8c9e8d62cb5a08 100644 --- a/docs/src/running-validator/validator-reqs.md +++ b/docs/src/operations/requirements.md @@ -1,5 +1,7 @@ --- title: Validator Requirements +sidebar_label: Requirements +sidebar_position: 3 --- ## Minimum SOL requirements @@ -88,7 +90,7 @@ releases at [solanalabs/solana](https://hub.docker.com/r/solanalabs/solana). ## Software - We build and run on Ubuntu 20.04. -- See [Installing Solana](../cli/install-solana-cli-tools.md) for the current Solana software release. +- See [Installing Solana CLI](../cli/install.md) for the current Solana software release. Prebuilt binaries are available for Linux x86_64 on CPUs supporting AVX2 \(Ubuntu 20.04 recommended\). MacOS or WSL users may build from source. diff --git a/docs/src/validator/get-started/setup-a-validator.md b/docs/src/operations/setup-a-validator.md similarity index 90% rename from docs/src/validator/get-started/setup-a-validator.md rename to docs/src/operations/setup-a-validator.md index 8379b6f1d1c4d1..c65a6376cfe7e1 100644 --- a/docs/src/validator/get-started/setup-a-validator.md +++ b/docs/src/operations/setup-a-validator.md @@ -1,11 +1,14 @@ --- title: Setup a Solana Validator sidebar_label: Setup a Validator +sidebar_position: 5 --- This is a guide for getting your validator setup on the Solana testnet cluster for the first time. Testnet is a Solana cluster that is used for performance testing of the software before the software is used on mainnet. Since testnet is stress tested daily, it is a good cluster to practice validator operations. -Once you have a working validator on testnet, you will want to learn about [operational best practices](../best-practices/operations.md) in the next section. Although the guide is specific to testnet, it can be adapted to mainnet or devnet as well. Refer to the [clusters](../../clusters) section of the Solana docs to see example commands for each cluster. +Once you have a working validator on testnet, you will want to learn about [operational best practices](./best-practices/general.md) in the next section. Although the guide is specific to testnet, it can be adapted to mainnet or devnet as well. + +> Refer to the [Available Clusters](../clusters/available.md) section of the documentation to see example commands for each cluster. Now let's get started. @@ -19,9 +22,9 @@ To start this guide, you will be running commands on your trusted computer, not ## Install The Solana CLI Locally -To create your validator vote account, you need to install the [Solana command line interface](../../cli.md) on your local computer. +To create your validator vote account, you need to install the [Solana command line interface](../cli/index.md) on your local computer. -You can either use [Solana's Install Tool](../../cli/install-solana-cli-tools#use-solanas-install-tool) section from the Solana docs to install the CLI, or alternatively, you can also [build from source](../../cli/install-solana-cli-tools#build-from-source). +You can either use [Solana's Install Tool](../cli/install.md#use-solanas-install-tool) section from the within these docs to install the CLI, or alternatively, you can also [build from source](../cli/install.md#build-from-source). > Building from source is a great option for those that want a more secure and potentially more performant executable. @@ -53,9 +56,9 @@ You should see a line that says: `RPC URL: https://api.testnet.solana.com` ## Create Keys -On your local computer, create the 3 keypairs that you will need to run your validator ([docs for reference](../../running-validator/validator-start#generate-identity)): +On your local computer, create the 3 keypairs that you will need to run your validator ([docs for reference](./guides/validator-start.md#generate-identity)): -> **NOTE** Some operators choose to make vanity keypairs for their identity and vote account using the `grind` sub command ([docs for reference](../../running-validator/validator-start#vanity-keypair)). +> **NOTE** Some operators choose to make vanity keypairs for their identity and vote account using the `grind` sub command ([docs for reference](./guides/validator-start.md#vanity-keypair)). ``` solana-keygen new -o validator-keypair.json @@ -69,7 +72,7 @@ solana-keygen new -o vote-account-keypair.json solana-keygen new -o authorized-withdrawer-keypair.json ``` -> **IMPORTANT** the `authorized-withdrawer-keypair.json` should be considered very sensitive information. Many operators choose to use a multisig, hardware wallet, or paper wallet for the authorized withdrawer keypair. A keypair is created on disk in this example for simplicity. Additionally, the withdrawer keypair should always be stored safely. The authorized withdrawer keypair should **never** be stored on the remote machine that the validator software runs on. For more information, see [validator security best practices](../best-practices/security.md#do-not-store-your-withdrawer-key-on-your-validator) +> **IMPORTANT** the `authorized-withdrawer-keypair.json` should be considered very sensitive information. Many operators choose to use a multisig, hardware wallet, or paper wallet for the authorized withdrawer keypair. A keypair is created on disk in this example for simplicity. Additionally, the withdrawer keypair should always be stored safely. The authorized withdrawer keypair should **never** be stored on the remote machine that the validator software runs on. For more information, see [validator security best practices](./best-practices/security.md#do-not-store-your-withdrawer-key-on-your-validator) ## Create a Vote Account @@ -317,7 +320,7 @@ su - sol ## Install The Solana CLI on Remote Machine -Your remote machine will need the Solana cli installed to run the validator software. Refer again to [Solana's Install Tool](../../cli/install-solana-cli-tools#use-solanas-install-tool) or [build from source](../../cli/install-solana-cli-tools#build-from-source). It is best for operators to build from source rather than using the pre built binaries. +Your remote machine will need the Solana cli installed to run the validator software. Refer again to [Solana's Install Tool](../cli/install.md#use-solanas-install-tool) or [build from source](../cli/install.md#build-from-source). It is best for operators to build from source rather than using the pre built binaries. ## Create A Validator Startup Script @@ -358,7 +361,7 @@ exec solana-validator \ --limit-ledger-size ``` -Refer to `solana-validator --help` for more information on what each flag is doing in this script. Also refer to the section on [best practices for operating a validator](../best-practices/operations.md). +Refer to `solana-validator --help` for more information on what each flag is doing in this script. Also refer to the section on [best practices for operating a validator](./best-practices/general.md). ## Verifying Your Validator Is Working @@ -395,7 +398,7 @@ Assuming you do not see any error messages, exit out of the command. ### Gossip Protocol -Gossip is a protocol used in the Solana clusters to communicate between validator nodes. For more information on gossip, see [Gossip Service](../gossip.md). To verify that your validator is running properly, make sure that the validator has registered itself with the gossip network. +Gossip is a protocol used in the Solana clusters to communicate between validator nodes. For more information on gossip, see [Gossip Service](../validator/gossip.md). To verify that your validator is running properly, make sure that the validator has registered itself with the gossip network. In a new terminal window, connect to your server via ssh. Identify your validator's pubkey: @@ -447,7 +450,7 @@ Once you are happy that the validator can start up without errors, the next step ## Create a System Service -Follow these instructions for [running the validator as a system service](../../running-validator/validator-start#systemd-unit) +Follow these instructions for [running the validator as a system service](./guides/validator-start.md#systemd-unit) Make sure to implement log rotate as well. Once you have the system service configured, start your validator using the newly configured service: @@ -463,7 +466,7 @@ tail -f /home/sol/solana-validator*.log ## Monitoring -`solana-watchtower` is a command you can run on a separate machine to monitor your server. You can read more about handling [automatic restarts and monitoring](../best-practices/monitoring.md#solana-watchtower) using Solana Watchtower here in the docs. +`solana-watchtower` is a command you can run on a separate machine to monitor your server. You can read more about handling [automatic restarts and monitoring](./best-practices/monitoring.md#solana-watchtower) using Solana Watchtower here in the docs. ## Common issues @@ -473,4 +476,4 @@ Make sure your ledger is on drive with at least `2TB` of space. ### Validator not catching up -This could be a networking/hardware issue, or you may need to get the latest snapshot from another validator node. \ No newline at end of file +This could be a networking/hardware issue, or you may need to get the latest snapshot from another validator node. diff --git a/docs/src/validator/get-started/setup-an-rpc-node.md b/docs/src/operations/setup-an-rpc-node.md similarity index 81% rename from docs/src/validator/get-started/setup-an-rpc-node.md rename to docs/src/operations/setup-an-rpc-node.md index 2c3f35031042e7..e65e35542ea430 100644 --- a/docs/src/validator/get-started/setup-an-rpc-node.md +++ b/docs/src/operations/setup-an-rpc-node.md @@ -1,6 +1,7 @@ --- title: Setup a Solana RPC Node sidebar_label: Setup an RPC Node +sidebar_position: 6 --- Since a Solana RPC server runs the same process as a consensus validator, first follow the instructions on [how to setup a Solana validator](./setup-a-validator.md) to get started. Note, that you do not need to create a vote account if you are operating an RPC node. An RPC node typically does not vote. @@ -55,7 +56,7 @@ If you are interested in setting up your own bigtable instance, see these docs i ### Example Known Validators -The identities of the [known validators](../../running-validator/validator-start.md#known-validators) supplied in these example snippets (via the `--known-validator` flag) are: +The identities of the [known validators](./guides/validator-start.md#known-validators) supplied in these example snippets (via the `--known-validator` flag) are: - `5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on` - Solana Labs - `dDzy5SR3AXdYWVqbDEkVFdvSPCtS9ihF5kJkHCtXoFs` - MonkeDAO @@ -65,7 +66,7 @@ The identities of the [known validators](../../running-validator/validator-start ## Examples for other clusters -Additional examples of other Solana cluster specific validator commands can be found on the [Clusters](../../clusters.md) page. +Additional examples of other Solana cluster specific validator commands can be found on the [Clusters](../clusters/available.md) page. Keep in mind, you will still need to customize these commands to operate as an RPC node, as well other operator specific configuration settings. @@ -73,13 +74,13 @@ Keep in mind, you will still need to customize these commands to operate as an R As the number of populated accounts on the cluster grows, account-data RPC requests that scan the entire account set -- like -[`getProgramAccounts`](../../api/http#getprogramaccounts) and -[SPL-token-specific requests](../../api/http#gettokenaccountsbydelegate) -- +[`getProgramAccounts`](https://solana.com/docs/rpc/http/getprogramaccounts) and +[SPL-token-specific requests](https://solana.com/docs/rpc/http/gettokenaccountsbydelegate) -- may perform poorly. If your validator needs to support any of these requests, you can use the `--account-index` parameter to activate one or more in-memory account indexes that significantly improve RPC performance by indexing accounts by the key field. Currently supports the following parameter values: -- `program-id`: each account indexed by its owning program; used by [getProgramAccounts](../../api/http#getprogramaccounts) -- `spl-token-mint`: each SPL token account indexed by its token Mint; used by [getTokenAccountsByDelegate](../../api/http#gettokenaccountsbydelegate), and [getTokenLargestAccounts](../../api/http#gettokenlargestaccounts) -- `spl-token-owner`: each SPL token account indexed by the token-owner address; used by [getTokenAccountsByOwner](../../api/http#gettokenaccountsbyowner), and [getProgramAccounts](../../api/http#getprogramaccounts) requests that include an spl-token-owner filter. +- `program-id`: each account indexed by its owning program; used by [getProgramAccounts](https://solana.com/docs/rpc/http/getprogramaccounts) +- `spl-token-mint`: each SPL token account indexed by its token Mint; used by [getTokenAccountsByDelegate](https://solana.com/docs/rpc/http/gettokenaccountsbydelegate), and [getTokenLargestAccounts](https://solana.com/docs/rpc/http/gettokenlargestaccounts) +- `spl-token-owner`: each SPL token account indexed by the token-owner address; used by [getTokenAccountsByOwner](https://solana.com/docs/rpc/http/gettokenaccountsbyowner), and [getProgramAccounts](https://solana.com/docs/rpc/http/getprogramaccounts) requests that include an spl-token-owner filter. diff --git a/docs/src/validator/overview/validator-initiatives.md b/docs/src/operations/validator-initiatives.md similarity index 86% rename from docs/src/validator/overview/validator-initiatives.md rename to docs/src/operations/validator-initiatives.md index a410aba5e7e436..f0f149515936b8 100644 --- a/docs/src/validator/overview/validator-initiatives.md +++ b/docs/src/operations/validator-initiatives.md @@ -1,6 +1,7 @@ --- title: Solana Validator Initiatives sidebar_label: Validator Initiatives +sidebar_position: 4 --- There are a number of initiatives that may help operators get started or grow their delegation. All of these initiatives are completely optional. All Solana clusters are permissionless and an operator can join at any time. @@ -16,7 +17,7 @@ Delegation program participants who operate a performant testnet node, may also ## Solana Foundation Server Program -Separately from the delegation program, The Solana Foundation offers a server program that provides servers in various data-centers all over the world. If you would like to run a consensus validator or RPC node, you may use this program to rent bare metal servers in various data-centers. The servers meet or exceed the [Solana validator hardware specs](../../running-validator/validator-reqs#hardware-recommendations). No long-term lease commitments are required. To find out more, visit the [Solana server program page](https://solana.org/server-program). +Separately from the delegation program, The Solana Foundation offers a server program that provides servers in various data-centers all over the world. If you would like to run a consensus validator or RPC node, you may use this program to rent bare metal servers in various data-centers. The servers meet or exceed the [Solana validator hardware specs](./requirements.md#hardware-recommendations). No long-term lease commitments are required. To find out more, visit the [Solana server program page](https://solana.org/server-program). ## Stake Pools diff --git a/docs/src/operations/validator-or-rpc-node.md b/docs/src/operations/validator-or-rpc-node.md new file mode 100644 index 00000000000000..c07c5201f100f9 --- /dev/null +++ b/docs/src/operations/validator-or-rpc-node.md @@ -0,0 +1,91 @@ +--- +title: Consensus Validator or RPC Node? +sidebar_label: Validator vs RPC Node +sidebar_position: 1 +--- + +Operators who run a [consensus validator](../what-is-a-validator.md) have much +different incentives than operators who run an +[RPC node](../what-is-an-rpc-node.md). You will have to decide which choice is +best for you based on your interests, technical background, and goals. + +## Consensus Validators + +As a validator your primary focus is maintaining the network and making sure +that your node is performing optimally so that you can fully participate in the +cluster consensus. You will want to attract a delegation of SOL to your +validator which will allow your validator the opportunity to produce more blocks +and earn rewards. + +Each staked validator earns inflation rewards from +[vote credits](https://solana.com/docs/terminology#vote-credit). Vote credits +are assigned to validators that vote on +[blocks](https://solana.com/docs/terminology#block) produced by the +[leader](https://solana.com/docs/terminology#leader). The vote credits are given +to all validators that successfully vote on blocks that are added to the +blockchain. Additionally, when the validator is the leader, it can earn +transaction fees and storage +[rent fees](https://solana.com/docs/core/accounts#rent) for each block that it +produces that is added to the blockchain. + +Since all votes in Solana happen on the blockchain, a validator incurs a +transaction cost for each vote that it makes. These transaction fees amount to +approximately 1.0 SOL per day. + +> It is important to make sure your validator always has enough SOL in its +> identity account to pay for these transactions! + +### Economics of running a consensus validator + +As an operator, it is important to understand how a consensus validator spends +and earns sol through the protocol. + +All validators who vote (consensus validators) must pay vote transaction fees +for blocks that they agree with. The cost of voting can be up to 1.1 SOL per +day. + +A voting validator can earn SOL through 2 methods: + +1. Inflationary rewards paid at the end of an epoch. See + [staking rewards](../implemented-proposals/staking-rewards.md) +2. Earning 50% of transaction fees for the blocks produced by the validator. See + [transaction fee basic economic design](https://solana.com/docs/intro/transaction_fees#basic-economic-design) + +The following links are community provided resources that discuss the economics +of running a validator: + +- Michael Hubbard wrote an + [article](https://laine-sa.medium.com/solana-staking-rewards-validator-economics-how-does-it-work-6718e4cccc4e) + that explains the economics of Solana in more depth for stakers and for + validators. +- Congent Crypto has written a + [blog post](https://medium.com/@Cogent_Crypto/how-to-become-a-validator-on-solana-9dc4288107b7) + that discusses economics and getting started. +- Cogent Crypto also provides a + [validator profit calculator](https://cogentcrypto.io/ValidatorProfitCalculator) + +## RPC Nodes + +While RPC operators **do NOT** receive rewards (because the node is not +participating in voting), there are different motivations for running an RPC +node. + +An RPC operator is providing a service to users who want to interact with the +Solana blockchain. Because your primary user is often technical, you will have +to be able to answer technical questions about performance of RPC calls. This +option may require more understanding of the +[core Solana architecture](../clusters/index.md). + +If you are operating an RPC node as a business, your job will also involve +scaling your system to meet the demands of the users. For example, some RPC +providers create dedicated servers for projects that require a high volume of +requests to the node. Someone with a background in development operations or +software engineering will be a very important part of your team. You will need a +strong understanding of the Solana architecture and the +[JSON RPC API](https://solana.com/docs/rpc/http). + +Alternatively, you may be a development team that would like to run their own +infrastructure. In this case, the RPC infrastructure could be a part of your +production stack. A development team could use the +[Geyser plugin](../validator/geyser.md), for example, to get +real time access to information about accounts or blocks in the cluster. diff --git a/docs/src/pages/CodeDocBlock.module.css b/docs/src/pages/CodeDocBlock.module.css deleted file mode 100644 index 7cffc5625bd7a0..00000000000000 --- a/docs/src/pages/CodeDocBlock.module.css +++ /dev/null @@ -1,80 +0,0 @@ -/* stylelint-disable docusaurus/copyright-header */ - -.DocBlock { - border-top: 1px solid #414141; - padding-top: 3rem; - /* display: flex; */ - /* justify-content: space-between; */ - margin-top: 5rem; - /* width: 100%; */ - /* align-items: center; */ -} - -.DocSideBySide { - margin-top: 2rem; -} - -.CodeParams { - display: block; - width: 100%; -} - -.CodeSnippets { - display: block; - width: 100%; -} - -@media screen and (min-width: 768px) { - .DocSideBySide { - display: flex; - width: 100%; - } - .CodeParams { - margin-right: 3rem; - width: 50%; - } - .CodeSnippets { - width: 50%; - } -} - -.Parameter { - padding: 1em 0em; - margin-bottom: 1em; - border-top: 1px solid #414141; - /* // border-bottom: 1px solid #414141; */ -} - -.ParameterName { - font-weight: 700; -} - -.ParameterHeader { - font-family: mono; - padding: 0.1em 0em; -} - -.Field { - /* // padding: 1em 0em; */ - margin: 1em 0em 1em 1em; - /* // border-top: 1px solid #414141; */ - /* // border-bottom: 1px solid #414141; */ -} -.Field section { - padding: 0em 1em; -} - -.FlagItem { - margin: 0 0.5rem; - color: #767676; - font-weight: 600; -} - -.Heading { - font-size: 1.24rem; - font-weight: 700; -} -.SubHeading { - /* font-size: 1.24rem; */ - font-weight: 600; -} diff --git a/docs/src/pages/api.js b/docs/src/pages/api.js deleted file mode 100644 index 6252d21df5027e..00000000000000 --- a/docs/src/pages/api.js +++ /dev/null @@ -1,82 +0,0 @@ -import React from "react"; -import Link from "@docusaurus/Link"; -import styles from "./styles.module.css"; -import Card from "../../components/Card"; -import CardLayout from "../../layouts/CardLayout"; - -function APIPage() { - return ( - -
    -
    -
    -

    JSON RPC API

    - -
    -

    - Interact with Solana nodes directly with the JSON RPC API via - the HTTP and Websocket methods. -

    - - - Explore the API - -
    -
    - -
    -

    Explore the JSON RPC Methods

    - -
    - - - - - -
    -
    -
    -
    -
    - ); -} - -export default APIPage; diff --git a/docs/src/pages/developers.js b/docs/src/pages/developers.js deleted file mode 100644 index 059622e3654691..00000000000000 --- a/docs/src/pages/developers.js +++ /dev/null @@ -1,171 +0,0 @@ -import React from "react"; -import Link from "@docusaurus/Link"; -import styles from "./styles.module.css"; -import Card from "../../components/Card"; -import CardLayout from "../../layouts/CardLayout"; - -function Developers() { - return ( - -
    -
    -
    -

    Learn Solana Development

    - -
    -

    - Build and deploy your first on chain Solana program directly in - your browser. -

    - - - Get Started - -
    -
    - -
    -

    Learn core concepts

    - -
    - - - - - -
    -
    - -
    -

    Learn through coding

    - -
    - - - -
    -
    - -
    -

    Setup your local development

    - -
    - - - - {/* future card to replace the RPC API card */} - {/* */} - - -
    -
    -
    -
    -
    - ); -} - -export default Developers; diff --git a/docs/src/pages/getstarted.jsx b/docs/src/pages/getstarted.jsx deleted file mode 100644 index 57d2c591bcfd69..00000000000000 --- a/docs/src/pages/getstarted.jsx +++ /dev/null @@ -1,138 +0,0 @@ -import React from "react"; -import Link from "@docusaurus/Link"; -import styles from "./styles.module.css"; -import Card from "../../components/Card"; -import CardLayout from "../../layouts/CardLayout"; - -function GetStartedPage() { - return ( - -
    -
    -
    -

    Get started with Solana development

    - -
    -

    - Build and deploy your first on chain Solana program directly in - your browser. -

    - - - Get Started - -
    -
    - -
    -
    - - - - - - -
    -
    - -
    -

    Community Resources

    - -
    - - - - - -
    -
    -
    -
    -
    - ); -} - -export default GetStartedPage; diff --git a/docs/src/proposals.md b/docs/src/proposals.md index d2a6e9a3255ce4..61bd657ff353e3 100644 --- a/docs/src/proposals.md +++ b/docs/src/proposals.md @@ -1,5 +1,6 @@ --- title: System Design Proposals +sidebar_label: Overview --- Changes to the Solana architecture are performed through a public proposal process (via pull requests) on the [Solana GitHub repository](https://github.com/solana-labs/solana). New proposals should be submitted with the "[Submit a Design Proposal](#submit-a-design-proposal)" guide below. @@ -7,13 +8,13 @@ Changes to the Solana architecture are performed through a public proposal proce There are currently two different states of these design proposals: 1. [Accepted Proposals](./proposals/accepted-design-proposals.md) -2. [Implemented Proposals](./implemented-proposals/implemented-proposals.md) +2. [Implemented Proposals](./implemented-proposals/index.md) ## Accepted Proposals These architectural proposals have been accepted by the Solana team, but are not yet fully implemented. -Each proposal may be implemented as described, implemented differently as issues in the designs become evident, or not implemented at all. If implemented, the proposal will be moved to [Implemented Proposals](./implemented-proposals/implemented-proposals.md) and the details will be added to relevant sections of the docs. +Each proposal may be implemented as described, implemented differently as issues in the designs become evident, or not implemented at all. If implemented, the proposal will be moved to [Implemented Proposals](./implemented-proposals/index.md) and the details will be added to relevant sections of the docs. ## Implemented Proposals diff --git a/docs/src/proposals/accepted-design-proposals.md b/docs/src/proposals/accepted-design-proposals.md index e2145d26b656b1..61d2d2ef4f6e4e 100644 --- a/docs/src/proposals/accepted-design-proposals.md +++ b/docs/src/proposals/accepted-design-proposals.md @@ -8,7 +8,7 @@ These architectural proposals have been accepted by the Solana maintainers, but ## After Implemented -Once a proposal has been implemented, it will be moved to [Implemented Proposals](../implemented-proposals/implemented-proposals.md) and the details will be added to relevant sections of the docs. +Once a proposal has been implemented, it will be moved to [Implemented Proposals](../implemented-proposals/index.md) and the details will be added to relevant sections of the docs. ## Submit a New Proposal diff --git a/docs/src/proposals/accounts-db-replication.md b/docs/src/proposals/accounts-db-replication.md index 42fb6c211d2e21..a1b0a2fd6a39fb 100644 --- a/docs/src/proposals/accounts-db-replication.md +++ b/docs/src/proposals/accounts-db-replication.md @@ -72,7 +72,7 @@ slot for which it has not completed accounts db replication. The `ReplicaAccount the `ReplicaAccountMeta`, Hash and the AccountData. The `ReplicaAccountMeta` contains info about the existing `AccountMeta` in addition to the account data length in bytes. -The `ReplicaAccountsServer`: this service is reponsible for serving the `ReplicaAccountsRequest` +The `ReplicaAccountsServer`: this service is responsible for serving the `ReplicaAccountsRequest` and sends `ReplicaAccountsResponse` to the requestor. The response contains the count of the ReplAccountInfo and the vector of ReplAccountInfo. This service runs both in the validator and the replica relaying replication information. The server can stream the account information @@ -88,7 +88,7 @@ During replication we also need to replicate the information of accounts that ha up due to zero lamports, i.e. we need to be able to tell the difference between an account in a given slot which was not updated and hence has no storage entry in that slot, and one that holds 0 lamports and has been cleaned up through the history. We may record this via some -"Tombstone" mechanism -- recording the dead accounts cleaned up fora slot. The tombstones +"Tombstone" mechanism -- recording the dead accounts cleaned up for a slot. The tombstones themselves can be removed after exceeding the retention period expressed as epochs. Any attempt to replicate slots with tombstones removed will fail and the replica should skip this slot and try later ones. diff --git a/docs/src/proposals/bankless-leader.md b/docs/src/proposals/bankless-leader.md index eac7b990ccb35e..462b253a3379da 100644 --- a/docs/src/proposals/bankless-leader.md +++ b/docs/src/proposals/bankless-leader.md @@ -2,58 +2,94 @@ title: Bankless Leader --- -A bankless leader does the minimum amount of work to produce a valid block. The leader is tasked with ingress transactions, sorting and filtering valid transactions, arranging them into entries, shredding the entries and broadcasting the shreds. While a validator only needs to reassemble the block and replay execution of well formed entries. The leader does 3x more memory operations before any bank execution than the validator per processed transaction. +A bankless leader does the minimum amount of work to produce a valid block. The +leader is tasked with ingress transactions, sorting and filtering valid +transactions, arranging them into entries, shredding the entries and +broadcasting the shreds. While a validator only needs to reassemble the block +and replay execution of well formed entries. The leader does 3x more memory +operations before any bank execution than the validator per processed +transaction. ## Rationale -Normal bank operation for a spend needs to do 2 loads and 2 stores. With this design leader just does 1 load. so 4x less account_db work before generating the block. The store operations are likely to be more expensive than reads. +Normal bank operation for a spend needs to do 2 loads and 2 stores. With this +design leader just does 1 load. so 4x less account_db work before generating the +block. The store operations are likely to be more expensive than reads. -When replay stage starts processing the same transactions, it can assume that PoH is valid, and that all the entries are safe for parallel execution. The fee accounts that have been loaded to produce the block are likely to still be in memory, so the additional load should be warm and the cost is likely to be amortized. +When replay stage starts processing the same transactions, it can assume that +PoH is valid, and that all the entries are safe for parallel execution. The fee +accounts that have been loaded to produce the block are likely to still be in +memory, so the additional load should be warm and the cost is likely to be +amortized. ## Fee Account -The [fee account](../terminology.md#fee_account) pays for the transaction to be included in the block. The leader only needs to validate that the fee account has the balance to pay for the fee. +The [fee account](https://solana.com/docs/terminology#fee_account) pays for the +transaction to be included in the block. The leader only needs to validate that +the fee account has the balance to pay for the fee. ## Balance Cache -For the duration of the leaders consecutive blocks, the leader maintains a temporary balance cache for all the processed fee accounts. The cache is a map of pubkeys to lamports. +For the duration of the leaders consecutive blocks, the leader maintains a +temporary balance cache for all the processed fee accounts. The cache is a map +of pubkeys to lamports. -At the start of the first block the balance cache is empty. At the end of the last block the cache is destroyed. +At the start of the first block the balance cache is empty. At the end of the +last block the cache is destroyed. -The balance cache lookups must reference the same base fork for the entire duration of the cache. At the block boundary, the cache can be reset along with the base fork after replay stage finishes verifying the previous block. +The balance cache lookups must reference the same base fork for the entire +duration of the cache. At the block boundary, the cache can be reset along with +the base fork after replay stage finishes verifying the previous block. ## Balance Check -Prior to the balance check, the leader validates all the signatures in the transaction. +Prior to the balance check, the leader validates all the signatures in the +transaction. 1. Verify the accounts are not in use and BlockHash is valid. -2. Check if the fee account is present in the cache, or load the account from accounts_db and store the lamport balance in the cache. +2. Check if the fee account is present in the cache, or load the account from + accounts_db and store the lamport balance in the cache. 3. If the balance is less than the fee, drop the transaction. 4. Subtract the fee from the balance. -5. For all the keys in the transaction that are Credit-Debit and are referenced by an instruction, reduce their balance to 0 in the cache. The account fee is declared as Credit-Debit, but as long as it is not used in any instruction its balance will not be reduced to 0. +5. For all the keys in the transaction that are Credit-Debit and are referenced + by an instruction, reduce their balance to 0 in the cache. The account fee is + declared as Credit-Debit, but as long as it is not used in any instruction + its balance will not be reduced to 0. ## Leader Replay -Leaders will need to replay their blocks as part of the standard replay stage operation. +Leaders will need to replay their blocks as part of the standard replay stage +operation. ## Leader Replay With Consecutive Blocks -A leader can be scheduled to produce multiple blocks in a row. In that scenario the leader is likely to be producing the next block while the replay stage for the first block is playing. +A leader can be scheduled to produce multiple blocks in a row. In that scenario +the leader is likely to be producing the next block while the replay stage for +the first block is playing. -When the leader finishes the replay stage it can reset the balance cache by clearing it, and set a new fork as the base for the cache which can become active on the next block. +When the leader finishes the replay stage it can reset the balance cache by +clearing it, and set a new fork as the base for the cache which can become +active on the next block. ## Resetting the Balance Cache -1. At the start of the block, if the balance cache is uninitialized, set the base fork for the balance cache to be the parent of the block and create an empty cache. -2. if the cache is initialized, check if block's parents has a new frozen bank that is newer than the current base fork for the balance cache. -3. if a parent newer than the cache's base fork exist, reset the cache to the parent. +1. At the start of the block, if the balance cache is uninitialized, set the + base fork for the balance cache to be the parent of the block and create an + empty cache. +2. if the cache is initialized, check if block's parents has a new frozen bank + that is newer than the current base fork for the balance cache. +3. if a parent newer than the cache's base fork exist, reset the cache to the + parent. ## Impact on Clients -The same fee account can be reused many times in the same block until it is used once as Credit-Debit by an instruction. +The same fee account can be reused many times in the same block until it is used +once as Credit-Debit by an instruction. -Clients that transmit a large number of transactions per second should use a dedicated fee account that is not used as Credit-Debit in any instruction. +Clients that transmit a large number of transactions per second should use a +dedicated fee account that is not used as Credit-Debit in any instruction. -Once an account fee is used as Credit-Debit, it will fail the balance check until the balance cache is reset. +Once an account fee is used as Credit-Debit, it will fail the balance check +until the balance cache is reset. ### Check out the [SIMD here to contribute](https://github.com/solana-foundation/solana-improvement-documents/pull/5) diff --git a/docs/src/proposals/blockstore-rocksdb-compaction.md b/docs/src/proposals/blockstore-rocksdb-compaction.md index be89214f7332bd..39eaa5dece8844 100644 --- a/docs/src/proposals/blockstore-rocksdb-compaction.md +++ b/docs/src/proposals/blockstore-rocksdb-compaction.md @@ -22,7 +22,7 @@ required for storing each entry. In other words, RocksDB uses compactions to balance [write, space, and read amplifications](https://smalldatum.blogspot.com/2015/11/read-write-space-amplification-pick-2_23.html). As different workloads have different requirements, RocksDB makes its options -highly configerable. However, it also means its default settings might not +highly configurable. However, it also means its default settings might not be always suitable. This document focuses on RocksDB's compaction optimization for Solana's Blockstore. @@ -109,7 +109,7 @@ close to 1 read amplification. As each key is only inserted once, we have space amplification 1. ### Use Current Settings for Metadata Column Families -The second type of the column families related to shred insertion is medadata +The second type of the column families related to shred insertion is metadata column families. These metadata column families contributes ~1% of the shred insertion data in size. The largest metadata column family here is the Index column family, which occupies 0.8% of the shred insertion data. @@ -160,7 +160,7 @@ in Solana's BlockStore use case: Here we discuss Level to FIFO and FIFO to Level migrations: ### Level to FIFO -heoretically, FIFO compaction is the superset of all other compaction styles, +Theoretically, FIFO compaction is the superset of all other compaction styles, as it does not have any assumption of the LSM tree structure. However, the current RocksDB implementation does not offer such flexibility while it is theoretically doable. diff --git a/docs/src/proposals/handle-duplicate-block.md b/docs/src/proposals/handle-duplicate-block.md index dc463d5cf15b6c..2941f17d2702ba 100644 --- a/docs/src/proposals/handle-duplicate-block.md +++ b/docs/src/proposals/handle-duplicate-block.md @@ -18,14 +18,14 @@ potential forks that the cluster has to resolve. ## Protocol 1. When WindowStage detects a duplicate slot proof `P`, it checks the new `gossip_root` to see if `<= 1/3` of the nodes have rooted a slot `S >= P`. If so, it pushes a proof to `gossip_duplicate_slots` to gossip. WindowStage then signals ReplayStage about this duplicate slot `S`. These proofs can be purged from gossip once the validator sees > 2/3 of people gossiping roots `R > S`. -2. When ReplayStage receives the signal for a duplicate slot `S` from `1)` above, the validator monitors gossip and replay waiting for`>= DUPLICATE_THRESHOLD` votes for the same hash which implies the same version of the slot. If this conditon is met for some version with hash `H` of slot `S`, this is then known as the `duplicate_confirmed` version of the slot. +2. When ReplayStage receives the signal for a duplicate slot `S` from `1)` above, the validator monitors gossip and replay waiting for`>= DUPLICATE_THRESHOLD` votes for the same hash which implies the same version of the slot. If this condition is met for some version with hash `H` of slot `S`, this is then known as the `duplicate_confirmed` version of the slot. Before a duplicate slot `S` is `duplicate_confirmed`, it's first excluded from the vote candidate set in the fork choice rules. In addition, ReplayStage also resets PoH to the *latest* ancestor of the *earliest* `non-duplicate/confirmed_duplicate_slot`, so that block generation can start happening on the earliest known *safe* block. Some notes about the `DUPLICATE_THRESHOLD`. In the cases below, assume `DUPLICATE_THRESHOLD = 52`: a) If less than `2 * DUPLICATE_THRESHOLD - 1` percentage of the network is malicious, then there can only be one such `duplicate_confirmed` version of the slot. With `DUPLICATE_THRESHOLD = 52`, this is -a malcious tolerance of `4%` +a malicious tolerance of `4%` b) The liveness of the network is at most `1 - DUPLICATE_THRESHOLD - SWITCH_THRESHOLD`. This is because if you need at least `SWITCH_THRESHOLD` percentage of the stake voting on a different fork in order to switch off of a duplicate fork that has `< DUPLICATE_THRESHOLD` stake voting on it, and is *not* `duplicate_confirmed`. For `DUPLICATE_THRESHOLD = 52` and `DUPLICATE_THRESHOLD = 38`, this implies a liveness tolerance of `10%`. @@ -38,7 +38,7 @@ For example in the situation below, validators that voted on `2` can't vote any ``` -3. Switching proofs need to be extended to allow including vote hashes from different versions of the same same slot (detected through 1). Right now this is not supported since switching proofs can +3. Switching proofs need to be extended to allow including vote hashes from different versions of the same slot (detected through 1). Right now this is not supported since switching proofs can only be built using votes from banks in BankForks, and two different versions of the same slot cannot simultaneously exist in BankForks. For instance: @@ -73,7 +73,7 @@ This problem we need to solve is modeled simply by the below scenario: ``` Assume the following: -1. Due to gossiping duplciate proofs, we assume everyone will eventually see duplicate proofs for 2 and 4, so everyone agrees to remove them from fork choice until they are `duplicate_confirmed`. +1. Due to gossiping duplicate proofs, we assume everyone will eventually see duplicate proofs for 2 and 4, so everyone agrees to remove them from fork choice until they are `duplicate_confirmed`. 2. Due to lockouts, `> DUPLICATE_THRESHOLD` of the stake votes on 4, but not 2. This means at least `DUPLICATE_THRESHOLD` of people have the "correct" version of both slots 2 and 4. diff --git a/docs/src/proposals/ledger-replication-to-implement.md b/docs/src/proposals/ledger-replication-to-implement.md index 104a214641d4dc..fac6581fcb3016 100644 --- a/docs/src/proposals/ledger-replication-to-implement.md +++ b/docs/src/proposals/ledger-replication-to-implement.md @@ -219,7 +219,7 @@ For each turn of the PoRep game, both Validators and Archivers evaluate each sta For any random seed, we force everyone to use a signature that is derived from a PoH hash at the turn boundary. Everyone uses the same count, so the same PoH hash is signed by every participant. The signatures are then each cryptographically tied to the keypair, which prevents a leader from grinding on the resulting value for more than 1 identity. -Since there are many more client identities then encryption identities, we need to split the reward for multiple clients, and prevent Sybil attacks from generating many clients to acquire the same block of data. To remain BFT we want to avoid a single human entity from storing all the replications of a single chunk of the ledger. +Since there are many more client identities than encryption identities, we need to split the reward for multiple clients, and prevent Sybil attacks from generating many clients to acquire the same block of data. To remain BFT we want to avoid a single human entity from storing all the replications of a single chunk of the ledger. Our solution to this is to force the clients to continue using the same identity. If the first round is used to acquire the same block for many client identities, the second round for the same client identities will force a redistribution of the signatures, and therefore PoRep identities and blocks. Thus to get a reward for archivers need to store the first block for free and the network can reward long lived client identities more than new ones. diff --git a/docs/src/proposals/off-chain-message-signing.md b/docs/src/proposals/off-chain-message-signing.md index b234627dca7f7d..7d5d25b10f726e 100644 --- a/docs/src/proposals/off-chain-message-signing.md +++ b/docs/src/proposals/off-chain-message-signing.md @@ -64,7 +64,7 @@ This may be any arbitrary bytes. For instance the on-chain address of a program, DAO instance, Candy Machine, etc. This field **SHOULD** be displayed to users as a base58-encoded ASCII string rather -than interpretted otherwise. +than interpreted otherwise. #### Message Format diff --git a/docs/src/proposals/optimistic-transaction-propagation-signal.md b/docs/src/proposals/optimistic-transaction-propagation-signal.md index f85b38eedcd1ad..549cea0b08bbb2 100644 --- a/docs/src/proposals/optimistic-transaction-propagation-signal.md +++ b/docs/src/proposals/optimistic-transaction-propagation-signal.md @@ -13,7 +13,7 @@ concatenating (1), (2), and (3) deduplicating this list of entries by pubkey favoring entries with contact info filtering this list by entries with contact info -This list is then is randomly shuffled by stake weight. +This list is then randomly shuffled by stake weight. Shreds are then retransmitted to up to FANOUT neighbors and up to FANOUT children. @@ -37,7 +37,7 @@ First, only epoch staked nodes will be considered regardless of presence of contact info (and possibly including the validator node itself). A deterministic ordering of the epoch staked nodes will be created based on the -derministic shred seed using weighted_shuffle. +deterministic shred seed using weighted_shuffle. Let `neighbor_set` be selected from up to FANOUT neighbors of the current node. Let `child_set` be selected from up to FANOUT children of the current node. @@ -73,7 +73,7 @@ distribution levels. distribution levels because of lack of contact info. - Current node was part of original epoch staked shuffle from retransmitter but was filtered out because of missing contact info. Current node subsequently -receives retransmisison of shred and assumes that the retransmit was a result +receives retransmission of shred and assumes that the retransmit was a result of the deterministic tree calculation and not from subsequent random selection. This should be benign because the current node will underestimate prior stake weight in the retransmission tree. @@ -105,5 +105,5 @@ Practically, signals should fall into the following buckets: 1.2. can signal layer 1 + subset of layer 2 when retransmit is sent 3. layer 2 3.1. can signal layer 2 when shred is received -3.2. can signal layer 2 + subset of layer 3 when retrnasmit is sent +3.2. can signal layer 2 + subset of layer 3 when retransmit is sent 4. current node not a member of epoch staked nodes, no signal can be sent diff --git a/docs/src/proposals/optimistic_confirmation.md b/docs/src/proposals/optimistic_confirmation.md index f2b77ea3316f54..3cb553cdcb9e24 100644 --- a/docs/src/proposals/optimistic_confirmation.md +++ b/docs/src/proposals/optimistic_confirmation.md @@ -86,7 +86,7 @@ the votes must satisfy: - `X <= S.last`, `X' <= S'.last` - All `s` in `S` are ancestors/descendants of one another, - all `s'` in `S'` are ancsestors/descendants of one another, + all `s'` in `S'` are ancestors/descendants of one another, - - `X == X'` implies `S` is parent of `S'` or `S'` is a parent of `S` - `X' > X` implies `X' > S.last` and `S'.last > S.last` @@ -312,7 +312,7 @@ true that `B' > X` ``` `Proof`: Let `Vote(X, S)` be a vote in the `Optimistic Votes` set. Then by -definition, given the "optimistcally confirmed" block `B`, `X <= B <= S.last`. +definition, given the "optimistically confirmed" block `B`, `X <= B <= S.last`. Because `X` is a parent of `B`, and `B'` is not a parent or ancestor of `B`, then: @@ -322,7 +322,7 @@ then: Now consider if `B'` < `X`: -`Case B' < X`: We wll show this is a violation of lockouts. +`Case B' < X`: We will show this is a violation of lockouts. From above, we know `B'` is not a parent of `X`. Then because `B'` was rooted, and `B'` is not a parent of `X`, then the validator should not have been able to vote on the higher slot `X` that does not descend from `B'`. @@ -361,7 +361,7 @@ By `Lemma 2` we know `B' > X`, and from above `S_v.last > B'`, so then From above, `S.last >= B >= X` so for all such "switching votes", `X_v > B`. Now ordering all these "switching votes" in time, let `V` to be the validator -in `Optimistic Validators` that first submitted such a "swtching vote" +in `Optimistic Validators` that first submitted such a "switching vote" `Vote(X', S')`, where `X' > B`. We know that such a validator exists because we know from above that all delinquent validators must have submitted such a vote, and the delinquent validators are a subset of the diff --git a/docs/src/proposals/return-data.md b/docs/src/proposals/return-data.md index 93a546103f0440..3e70e87195e937 100644 --- a/docs/src/proposals/return-data.md +++ b/docs/src/proposals/return-data.md @@ -136,7 +136,7 @@ strings in the [stable log](https://github.com/solana-labs/solana/blob/952928419 Solidity on Ethereum allows the contract to return an error in the return data. In this case, all the account data changes for the account should be reverted. On Solana, any non-zero exit code -for a SBF prorgram means the entire transaction fails. We do not wish to support an error return +for a SBF program means the entire transaction fails. We do not wish to support an error return by returning success and then returning an error in the return data. This would mean we would have to support reverting the account data changes; this too expensive both on the VM side and the SBF contract side. diff --git a/docs/src/proposals/rip-curl.md b/docs/src/proposals/rip-curl.md index 8e2ab9707a39b7..c562f01bd1337e 100644 --- a/docs/src/proposals/rip-curl.md +++ b/docs/src/proposals/rip-curl.md @@ -39,7 +39,7 @@ Easier for validators to support: has no significant resource constraints. - Transaction status is never stored in memory and cannot be polled for. - Signatures are only stored in memory until the desired commitment level or - until the blockhash expires, which ever is later. + until the blockhash expires, whichever is later. How it works: diff --git a/docs/src/proposals/simple-payment-and-state-verification.md b/docs/src/proposals/simple-payment-and-state-verification.md index c8144396c67640..caa8a2d55e1f4c 100644 --- a/docs/src/proposals/simple-payment-and-state-verification.md +++ b/docs/src/proposals/simple-payment-and-state-verification.md @@ -90,7 +90,7 @@ code, but a single status bit to indicate the transaction's success. Currently, the Block-Merkle is not implemented, so to verify `E` was an entry in the block with bank hash `B`, we would need to provide all the entry hashes -in the block. Ideally this Block-Merkle would be implmented, as the alternative +in the block. Ideally this Block-Merkle would be implemented, as the alternative is very inefficient. #### Block Headers @@ -138,7 +138,7 @@ https://github.com/solana-labs/solana/blob/b6bfed64cb159ee67bb6bdbaefc7f833bbed3 Each vote is a signed transaction that includes the bank hash of the block the validator voted for, i.e. the `B` from the `Transaction Merkle` section above. Once a certain threshold `T` of the network has voted on a block, the block is -considered optimistially confirmed. The votes made by this group of `T` +considered optimistically confirmed. The votes made by this group of `T` validators is needed to show the block with bank hash `B` was optimistically confirmed. @@ -150,11 +150,11 @@ vote, and vote account pubkey responsible for the vote. Together, the transaction merkle and optimistic confirmation proofs can be provided over RPC to subscribers by extending the existing signature -subscrption logic. Clients who subscribe to the "Confirmed" confirmation +subscription logic. Clients who subscribe to the "Confirmed" confirmation level are already notified when optimistic confirmation is detected, a flag can be provided to signal the two proofs above should also be returned. -It is important to note that optimistcally confirming `B` also implies that all +It is important to note that optimistically confirming `B` also implies that all ancestor blocks of `B` are also optimistically confirmed, and also that not all blocks will be optimistically confirmed. @@ -164,7 +164,7 @@ B -> B' ``` -So in the example above if a block `B'` is optimisically confirmed, then so is +So in the example above if a block `B'` is optimistically confirmed, then so is `B`. Thus if a transaction was in block `B`, the transaction merkle in the proof will be for block `B`, but the votes presented in the proof will be for block `B'`. This is why the headers in the `Block headers` section above are @@ -174,10 +174,10 @@ important, the client will need to verify that `B` is indeed an ancestor of #### Proof of Stake Distribution Once presented with the transaction merkle and optimistic confirmation proofs -above, a client can verify a transaction `T` was optimistially confirmed in a +above, a client can verify a transaction `T` was optimistically confirmed in a block with bank hash `B`. The last missing piece is how to verify that the votes in the optimistic proofs above actually constitute the valid `T` -percentage of the stake necessay to uphold the safety guarantees of +percentage of the stake necessary to uphold the safety guarantees of "optimistic confirmation". One way to approach this might be for every epoch, when the stake set changes, @@ -191,7 +191,7 @@ block `B` was optimistically confirmed/rooted. An account's state (balance or other data) can be verified by submitting a transaction with a **_TBD_** Instruction to the cluster. The client can then use a [Transaction Inclusion Proof](#transaction-inclusion-proof) to verify -whether the cluster agrees that the acount has reached the expected state. +whether the cluster agrees that the account has reached the expected state. ### Validator Votes diff --git a/docs/src/proposals/tick-verification.md b/docs/src/proposals/tick-verification.md index 8be7ef939bc9ca..a84c42c985ddb0 100644 --- a/docs/src/proposals/tick-verification.md +++ b/docs/src/proposals/tick-verification.md @@ -18,12 +18,12 @@ number of hashes. Malicious transmissions `T` are handled in two ways: -1. If a leader can generate some erronenous transmission `T` and also some +1. If a leader can generate some erroneous transmission `T` and also some alternate transmission `T'` for the same slot without violating any slashing rules for duplicate transmissions (for instance if `T'` is a subset of `T`), then the cluster must handle the possibility of both transmissions being live. -Thus this means we cannot mark the erronenous transmission `T` as dead because +Thus this means we cannot mark the erroneous transmission `T` as dead because the cluster may have reached consensus on `T'`. These cases necessitate a slashing proof to punish this bad behavior. diff --git a/docs/src/proposals/timely-vote-credits.md b/docs/src/proposals/timely-vote-credits.md index 7875b815c6b11b..39c496ff9dfe1a 100644 --- a/docs/src/proposals/timely-vote-credits.md +++ b/docs/src/proposals/timely-vote-credits.md @@ -10,7 +10,7 @@ vote credits earned by validator votes. Vote credits are the accounting method used to determine what percentage of inflation rewards a validator earns on behalf of its stakers. Currently, when a slot that a validator has previously voted on is "rooted", it earns 1 vote -credit. A "rooted" slot is one which has received full committment by the +credit. A "rooted" slot is one which has received full commitment by the validator (i.e. has been finalized). One problem with this simple accounting method is that it awards one credit @@ -47,7 +47,7 @@ transmitted immediately and landed in an earlier slot. If landing a vote with 1 slot latency awarded more credit than landing that same vote in 2 slots latency, then validators who could land votes -consistently wihthin 1 slot would have a credits earning advantage over those +consistently within 1 slot would have a credits earning advantage over those who could not. Part of the latency when transmitting votes is unavoidable as it's a function of geographical distance between the sender and receiver of the vote. The Solana network is spread around the world but it is not evenly diff --git a/docs/src/running-validator.md b/docs/src/running-validator.md deleted file mode 100644 index 177cbdb19de0bb..00000000000000 --- a/docs/src/running-validator.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: Running a Validator ---- - -This section describes how to run a Solana validator node. - -There are several clusters available to connect to; see [choosing a Cluster](cli/choose-a-cluster.md) for an overview of each. diff --git a/docs/src/developing/runtime-facilities/programs.md b/docs/src/runtime/programs.md similarity index 99% rename from docs/src/developing/runtime-facilities/programs.md rename to docs/src/runtime/programs.md index 66219b2bf829ce..ed539e7e1495f6 100644 --- a/docs/src/developing/runtime-facilities/programs.md +++ b/docs/src/runtime/programs.md @@ -68,7 +68,7 @@ instruction via a program id, the Solana runtime will load both your the program and its owner, the BPF Upgradeable Loader. The runtime then passes your program to the BPF Upgradeable Loader to process the instruction. -[More information about deployment](cli/deploy-a-program.md) +[More information about deployment](../cli/examples/deploy-a-program.md) ## Ed25519 Program diff --git a/docs/src/developing/runtime-facilities/sysvars.md b/docs/src/runtime/sysvars.md similarity index 86% rename from docs/src/developing/runtime-facilities/sysvars.md rename to docs/src/runtime/sysvars.md index 908ecdafe50700..99d271f0a3c056 100644 --- a/docs/src/developing/runtime-facilities/sysvars.md +++ b/docs/src/runtime/sysvars.md @@ -3,10 +3,9 @@ title: Sysvar Cluster Data --- Solana exposes a variety of cluster state data to programs via -[`sysvar`](terminology.md#sysvar) accounts. These accounts are populated at -known addresses published along with the account layouts in the -[`solana-program` -crate](https://docs.rs/solana-program/VERSION_FOR_DOCS_RS/solana_program/sysvar/index.html), +[`sysvar`](https://solana.com/docs/terminology#sysvar) accounts. These accounts +are populated at known addresses published along with the account layouts in the +[`solana-program` crate](https://docs.rs/solana-program/VERSION_FOR_DOCS_RS/solana_program/sysvar/index.html), and outlined below. There are two ways for a program to access a sysvar. @@ -18,21 +17,25 @@ let clock = Clock::get() ``` The following sysvars support `get`: + - Clock - EpochSchedule - Fees - Rent - EpochRewards -The second is to pass the sysvar to the program as an account by including its address as one of the accounts in the `Instruction` and then deserializing the data during execution. Access to sysvars accounts is -always _readonly_. +The second is to pass the sysvar to the program as an account by including its +address as one of the accounts in the `Instruction` and then deserializing the +data during execution. Access to sysvars accounts is always _readonly_. ``` let clock_sysvar_info = next_account_info(account_info_iter)?; let clock = Clock::from_account_info(&clock_sysvar_info)?; ``` -The first method is more efficient and does not require that the sysvar account be passed to the program, or specified in the `Instruction` the program is processing. +The first method is more efficient and does not require that the sysvar account +be passed to the program, or specified in the `Instruction` the program is +processing. ## Clock @@ -40,13 +43,17 @@ The Clock sysvar contains data on cluster time, including the current slot, epoch, and estimated wall-clock Unix timestamp. It is updated every slot. - Address: `SysvarC1ock11111111111111111111111111111111` -- Layout: [Clock](https://docs.rs/solana-program/VERSION_FOR_DOCS_RS/solana_program/clock/struct.Clock.html) +- Layout: + [Clock](https://docs.rs/solana-program/VERSION_FOR_DOCS_RS/solana_program/clock/struct.Clock.html) - Fields: - `slot`: the current slot - - `epoch_start_timestamp`: the Unix timestamp of the first slot in this epoch. In the first slot of an epoch, this timestamp is identical to the `unix_timestamp` (below). + - `epoch_start_timestamp`: the Unix timestamp of the first slot in this epoch. + In the first slot of an epoch, this timestamp is identical to the + `unix_timestamp` (below). - `epoch`: the current epoch - - `leader_schedule_epoch`: the most recent epoch for which the leader schedule has already been generated + - `leader_schedule_epoch`: the most recent epoch for which the leader schedule + has already been generated - `unix_timestamp`: the Unix timestamp of this slot. Each slot has an estimated duration based on Proof of History. But in reality, @@ -69,7 +76,8 @@ epoch, and estimated wall-clock Unix timestamp. It is updated every slot. The EpochSchedule sysvar contains epoch scheduling constants that are set in genesis, and enables calculating the number of slots in a given epoch, the epoch -for a given slot, etc. (Note: the epoch schedule is distinct from the [`leader schedule`](terminology.md#leader-schedule)) +for a given slot, etc. (Note: the epoch schedule is distinct from the +[`leader schedule`](https://solana.com/docs/terminology#leader-schedule)) - Address: `SysvarEpochSchedu1e111111111111111111111111` - Layout: @@ -159,7 +167,8 @@ determining whether epoch rewards distribution has finished. ## LastRestartSlot -The LastRestartSlot sysvar contains the slot number of the last restart or _0_ (zero) if none ever happened. +The LastRestartSlot sysvar contains the slot number of the last restart or _0_ +(zero) if none ever happened. - Address: `SysvarLastRestartS1ot1111111111111111111111` - Layout: diff --git a/docs/src/developing/runtime-facilities/zk-docs/ciphertext_ciphertext_equality.pdf b/docs/src/runtime/zk-docs/ciphertext_ciphertext_equality.pdf similarity index 100% rename from docs/src/developing/runtime-facilities/zk-docs/ciphertext_ciphertext_equality.pdf rename to docs/src/runtime/zk-docs/ciphertext_ciphertext_equality.pdf diff --git a/docs/src/developing/runtime-facilities/zk-docs/ciphertext_commitment_equality.pdf b/docs/src/runtime/zk-docs/ciphertext_commitment_equality.pdf similarity index 100% rename from docs/src/developing/runtime-facilities/zk-docs/ciphertext_commitment_equality.pdf rename to docs/src/runtime/zk-docs/ciphertext_commitment_equality.pdf diff --git a/docs/src/developing/runtime-facilities/zk-docs/pubkey_proof.pdf b/docs/src/runtime/zk-docs/pubkey_proof.pdf similarity index 100% rename from docs/src/developing/runtime-facilities/zk-docs/pubkey_proof.pdf rename to docs/src/runtime/zk-docs/pubkey_proof.pdf diff --git a/docs/src/developing/runtime-facilities/zk-docs/twisted_elgamal.pdf b/docs/src/runtime/zk-docs/twisted_elgamal.pdf similarity index 100% rename from docs/src/developing/runtime-facilities/zk-docs/twisted_elgamal.pdf rename to docs/src/runtime/zk-docs/twisted_elgamal.pdf diff --git a/docs/src/developing/runtime-facilities/zk-docs/zero_proof.pdf b/docs/src/runtime/zk-docs/zero_proof.pdf similarity index 100% rename from docs/src/developing/runtime-facilities/zk-docs/zero_proof.pdf rename to docs/src/runtime/zk-docs/zero_proof.pdf diff --git a/docs/src/developing/runtime-facilities/zk-token-proof.md b/docs/src/runtime/zk-token-proof.md similarity index 100% rename from docs/src/developing/runtime-facilities/zk-token-proof.md rename to docs/src/runtime/zk-token-proof.md diff --git a/docs/src/staking.md b/docs/src/staking.md deleted file mode 100644 index 312f44fd99d347..00000000000000 --- a/docs/src/staking.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: Staking on Solana ---- - -_Note before reading: All references to increases in values are in absolute -terms with regards to balance of SOL. -This document makes no suggestion as to the monetary value of SOL at any time._ - -By staking your SOL tokens, you help secure the network and -[earn rewards](implemented-proposals/staking-rewards.md) while doing so. - -You can stake by delegating your tokens to validators who process transactions and run the network. - -Delegating stake is a shared-risk shared-reward financial model that may provide -returns to holders of tokens delegated for a long period. -This is achieved by aligning the financial incentives of the token-holders -(delegators) and the validators to whom they delegate. - -The more stake delegated to a validator, the more often this validator -is chosen to write new transactions to the ledger. The more transactions -the validator writes, the more rewards the validator and its delegators earn. -Validators who configure their systems to be able to process more transactions -earn proportionally more rewards and -because they keep the network running as fast and as smoothly as possible. - -Validators incur costs by running and maintaining their systems, and this is -passed on to delegators in the form of a fee collected as a percentage of -rewards earned. This fee is known as a _commission_. Since validators earn more -rewards the more stake is delegated to them, they may compete with one another -to offer the lowest commission for their services. - -You risk losing tokens when staking through a process known as -_slashing_. Slashing involves the removal and destruction of a portion of a -validator's delegated stake in response to intentional malicious behavior, -such as creating invalid transactions or censoring certain types of transactions -or network participants. - -When a validator is slashed, all token holders who have delegated stake to that -validator lose a portion of their delegation. While this means an immediate -loss for the token holder, it also is a loss of future rewards for the validator -due to their reduced total delegation. More details on the slashing roadmap can -be found -[here](proposals/optimistic-confirmation-and-slashing.md#slashing-roadmap). - -Rewards and slashing align validator and token holder interests which helps keep the network -secure, robust and performant. - - -## How do I stake my SOL tokens? - -You can stake SOL by moving your tokens -into a wallet that supports staking. The wallet provides steps to create a stake account -and do the delegation. - -#### Supported Wallets - -Many web and mobile wallets support Solana staking operations. Please check with -your favorite wallet's maintainers regarding status - -#### Solana command line tools -- Solana command line tools can perform all stake operations in conjunction - with a CLI-generated keypair file wallet, a paper wallet, or with a connected - Ledger Nano. - [Staking commands using the Solana Command Line Tools](cli/delegate-stake.md). - -#### Create a Stake Account - -Follow the wallet's instructions for creating a staking account. This account -will be of a different type than one used to simply send and receive tokens. - -#### Select a Validator - -Follow the wallet's instructions for selecting a validator. You can get -information about potentially performant validators from the links below. -The Solana Foundation does not recommend any particular validator. - -The site solanabeach.io is built and maintained by one of our validators, -Staking Facilities. It provides a some high-level graphical information about -the network as a whole, as well as a list of each validator and some recent -performance statistics about each one. - -- https://solanabeach.io - -To view block production statistics, use the Solana command-line tools: - -- `solana validators` -- `solana block-production` - -The Solana team does not make recommendations on how to interpret this -information. Do your own due diligence. - -#### Delegate your Stake - -Follow the wallet's instructions for delegating your to your chosen validator. - -## Stake Account Details - -For more information about the operations and permissions associated with a -stake account, please see [Stake Accounts](staking/stake-accounts.md) diff --git a/docs/src/staking/stake-accounts.md b/docs/src/staking/stake-accounts.md deleted file mode 100644 index 0890bfb2abb1d5..00000000000000 --- a/docs/src/staking/stake-accounts.md +++ /dev/null @@ -1,141 +0,0 @@ ---- -title: Stake Account Structure ---- - -A stake account on Solana can be used to delegate tokens to validators on -the network to potentially earn rewards for the owner of the stake account. -Stake accounts are created and managed differently than a traditional wallet -address, known as a _system account_. A system account is only able to send and -receive SOL from other accounts on the network, whereas a stake account supports -more complex operations needed to manage a delegation of tokens. - -Stake accounts on Solana also work differently than those of other Proof-of-Stake -blockchain networks that you may be familiar with. This document describes the -high-level structure and functions of a Solana stake account. - -#### Account Address - -Each stake account has a unique address which can be used to look up the account -information in the command line or in any network explorer tools. However, -unlike a wallet address in which the holder of the address's keypair controls -the wallet, the keypair associated with a stake account address does not necessarily have -any control over the account. In fact, a keypair or private key may not even -exist for a stake account's address. - -The only time a stake account's address has a keypair file is when [creating -a stake account using the command line tools](../cli/delegate-stake.md#create-a-stake-account). -A new keypair file is created first only to ensure that the stake account's -address is new and unique. - -#### Understanding Account Authorities - -Certain types of accounts may have one or more _signing authorities_ -associated with a given account. An account authority is used to sign certain -transactions for the account it controls. This is different from -some other blockchain networks where the holder of the keypair associated with -the account's address controls all of the account's activity. - -Each stake account has two signing authorities specified by their respective address, -each of which is authorized to perform certain operations on the stake account. - -The _stake authority_ is used to sign transactions for the following operations: - -- Delegating stake -- Deactivating the stake delegation -- Splitting the stake account, creating a new stake account with a portion of the - funds in the first account -- Merging two stake accounts into one -- Setting a new stake authority - -The _withdraw authority_ signs transactions for the following: - -- Withdrawing un-delegated stake into a wallet address -- Setting a new withdraw authority -- Setting a new stake authority - -The stake authority and withdraw authority are set when the stake account is -created, and they can be changed to authorize a new signing address at any time. -The stake and withdraw authority can be the same address or two different -addresses. - -The withdraw authority keypair holds more control over the account as it is -needed to liquidate the tokens in the stake account, and can be used to reset -the stake authority if the stake authority keypair becomes lost or compromised. - -Securing the withdraw authority against loss or theft is of utmost importance -when managing a stake account. - -#### Multiple Delegations - -Each stake account may only be used to delegate to one validator at a time. -All of the tokens in the account are either delegated or un-delegated, or in the -process of becoming delegated or un-delegated. To delegate a fraction of your -tokens to a validator, or to delegate to multiple validators, you must create -multiple stake accounts. - -This can be accomplished by creating multiple stake accounts from a wallet -address containing some tokens, or by creating a single large stake account -and using the stake authority to split the account into multiple accounts -with token balances of your choosing. - -The same stake and withdraw authorities can be assigned to multiple -stake accounts. - -#### Merging stake accounts - -Two stake accounts that have the same authorities and lockup can be merged into -a single resulting stake account. A merge is possible between two stakes in the -following states with no additional conditions: - -- two deactivated stakes -- an inactive stake into an activating stake during its activation epoch - -For the following cases, the voter pubkey and vote credits observed must match: - -- two activated stakes -- two activating accounts that share an activation epoch, during the activation epoch - -All other combinations of stake states will fail to merge, including all "transient" -states, where a stake is activating or deactivating with a non-zero effective stake. - -#### Delegation Warmup and Cooldown - -When a stake account is delegated, or a delegation is deactivated, the operation -does not take effect immediately. - -A delegation or deactivation takes several [epochs](../terminology.md#epoch) -to complete, with a fraction of the delegation becoming active or inactive at -each epoch boundary after the transaction containing the instructions has been -submitted to the cluster. - -There is also a limit on how much total stake can become delegated or -deactivated in a single epoch, to prevent large sudden changes in stake across -the network as a whole. Since warmup and cooldown are dependent on the behavior -of other network participants, their exact duration is difficult to predict. -Details on the warmup and cooldown timing can be found -[here](../cluster/stake-delegation-and-rewards.md#stake-warmup-cooldown-withdrawal). - -#### Lockups - -Stake accounts can have a lockup which prevents the tokens they hold from being -withdrawn before a particular date or epoch has been reached. While locked up, -the stake account can still be delegated, un-delegated, or split, and its stake -authority can be changed as normal. Only withdrawal into another wallet or -updating the withdraw authority is not allowed. - -A lockup can only be added when a stake account is first created, but it can be -modified later, by the _lockup authority_ or _custodian_, the address of which -is also set when the account is created. - -#### Destroying a Stake Account - -Like other types of accounts on the Solana network, a stake account that has a -balance of 0 SOL is no longer tracked. If a stake account is not delegated -and all of the tokens it contains are withdrawn to a wallet address, the account -at that address is effectively destroyed, and will need to be manually -re-created for the address to be used again. - -#### Viewing Stake Accounts - -Stake account details can be viewed on the [Solana Explorer](http://explorer.solana.com/accounts) -by copying and pasting an account address into the search bar. diff --git a/docs/src/staking/stake-programming.md b/docs/src/staking/stake-programming.md deleted file mode 100644 index 99dace2cd9619c..00000000000000 --- a/docs/src/staking/stake-programming.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: Stake Programming ---- - -To maximize stake distribution, decentralization, and censorship resistance on -the Solana network, staking can be performed programmatically. The team -and community have developed several on-chain and off-chain programs to make -stakes easier to manage. - -#### Stake-o-matic aka Auto-delegation Bots - -This off-chain program manages a large population of validators staked by a -central authority. The Solana Foundation uses an auto-delegation bot to regularly delegate its -stake to "non-delinquent" validators that meet specified performance requirements. - -#### Stake Pools - -This on-chain program pools together SOL to be staked by a manager, allowing SOL -holders to stake and earn rewards without managing stakes. -Users deposit SOL in exchange for SPL tokens (staking derivatives) that represent their ownership in the stake pool. The pool -manager stakes deposited SOL according to their strategy, perhaps using a variant -of an auto-delegation bot as described above. As stakes earn rewards, the pool and pool tokens -grow proportionally in value. Finally, pool token holders can send SPL tokens -back to the stake pool to redeem SOL, thereby participating in decentralization with much -less work required. More information can be found at the -[SPL stake pool documentation](https://spl.solana.com/stake-pool). diff --git a/docs/src/storage_rent_economics.md b/docs/src/storage_rent_economics.md deleted file mode 100644 index 4b65b250e140a7..00000000000000 --- a/docs/src/storage_rent_economics.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Storage Rent Economics ---- - -Each transaction that is submitted to the Solana ledger imposes costs. -Transaction fees paid by the submitter, and collected by a validator, in -theory, account for the acute, transactional, costs of validating and adding -that data to the ledger. Unaccounted in this process is the mid-term storage of -active ledger state, necessarily maintained by the rotating validator set. This -type of storage imposes costs not only to validators but also to the broader -network as active state grows so does data transmission and validation -overhead. To account for these costs, we describe here our preliminary design -and implementation of storage rent. - -Storage rent can be paid via one of two methods: - -Method 1: Set it and forget it - -With this approach, accounts with two-years worth of rent deposits secured are -exempt from network rent charges. By maintaining this minimum-balance, the -broader network benefits from reduced liquidity and the account holder can rest -assured that their `Account::data` will be retained for continual access/usage. - -Method 2: Pay per byte - -If an account has less than two-years worth of deposited rent the network -charges rent on a per-epoch basis, in credit for the next epoch. This rent is -deducted at a rate specified in genesis, in lamports per kilobyte-year. - -For information on the technical implementation details of this design, see the -[Rent](implemented-proposals/rent.md) section. - -**Note:** New accounts now **are required** to be initialized with enough -lamports to be rent exempt. Additionally, transactions that leave an account's -balance below the rent exempt minimum (and non-zero) will **fail**. This -essentially renders all accounts rent exempt. Rent-paying accounts that were -created before this requirement will continue paying rent until either (1) -their balance falls to zero, or (2) a transaction increases the account's -balance to be rent exempt. diff --git a/docs/src/terminology.md b/docs/src/terminology.md deleted file mode 100644 index 4bb8efdbbf7e96..00000000000000 --- a/docs/src/terminology.md +++ /dev/null @@ -1,404 +0,0 @@ ---- -title: Terminology -description: "Learn the essential terminology used throughout the Solana blockchain and development models." -keywords: - - terms - - dictionary - - definitions - - define - - programming models ---- - -The following terms are used throughout the Solana documentation and development ecosystem. - -## account - -A record in the Solana ledger that either holds data or is an executable program. - -Like an account at a traditional bank, a Solana account may hold funds called [lamports](#lamport). Like a file in Linux, it is addressable by a key, often referred to as a [public key](#public-key-pubkey) or pubkey. - -The key may be one of: - -- an ed25519 public key -- a program-derived account address (32byte value forced off the ed25519 curve) -- a hash of an ed25519 public key with a 32 character string - -## account owner - -The address of the program that owns the account. Only the owning program is capable of modifying the account. - -## app - -A front-end application that interacts with a Solana cluster. - -## bank state - -The result of interpreting all programs on the ledger at a given [tick height](#tick-height). It includes at least the set of all [accounts](#account) holding nonzero [native tokens](#native-token). - -## block - -A contiguous set of [entries](#entry) on the ledger covered by a [vote](#ledger-vote). A [leader](#leader) produces at most one block per [slot](#slot). - -## blockhash - -A unique value ([hash](#hash)) that identifies a record (block). Solana computes a blockhash from the last [entry id](#entry-id) of the block. - -## block height - -The number of [blocks](#block) beneath the current block. The first block after the [genesis block](#genesis-block) has height one. - -## bootstrap validator - -The [validator](#validator) that produces the genesis (first) [block](#block) of a block chain. - -## BPF loader - -The Solana program that owns and loads [BPF](developing/on-chain-programs/faq#berkeley-packet-filter-bpf) smart contract programs, allowing the program to interface with the runtime. - -## client - -A computer program that accesses the Solana server network [cluster](#cluster). - -## commitment - -A measure of the network confirmation for the [block](#block). - -## cluster - -A set of [validators](#validator) maintaining a single [ledger](#ledger). - -## compute budget - -The maximum number of [compute units](#compute-units) consumed per transaction. - -## compute units - -The smallest unit of measure for consumption of computational resources of the blockchain. - -## confirmation time - -The wallclock duration between a [leader](#leader) creating a [tick entry](#tick) and creating a [confirmed block](#confirmed-block). - -## confirmed block - -A [block](#block) that has received a [super majority](#supermajority) of [ledger votes](#ledger-vote). - -## control plane - -A gossip network connecting all [nodes](#node) of a [cluster](#cluster). - -## cooldown period - -Some number of [epochs](#epoch) after [stake](#stake) has been deactivated while it progressively becomes available for withdrawal. During this period, the stake is considered to be "deactivating". More info about: [warmup and cooldown](implemented-proposals/staking-rewards.md#stake-warmup-cooldown-withdrawal) - -## credit - -See [vote credit](#vote-credit). - -## cross-program invocation (CPI) - -A call from one smart contract program to another. For more information, see [calling between programs](developing/programming-model/calling-between-programs.md). - -## data plane - -A multicast network used to efficiently validate [entries](#entry) and gain consensus. - -## drone - -An off-chain service that acts as a custodian for a user's private key. It typically serves to validate and sign transactions. - -## entry - -An entry on the [ledger](#ledger) either a [tick](#tick) or a [transaction's entry](#transactions-entry). - -## entry id - -A preimage resistant [hash](#hash) over the final contents of an entry, which acts as the [entry's](#entry) globally unique identifier. The hash serves as evidence of: - -- The entry being generated after a duration of time -- The specified [transactions](#transaction) are those included in the entry -- The entry's position with respect to other entries in [ledger](#ledger) - -See [proof of history](#proof-of-history-poh). - -## epoch - -The time, i.e. number of [slots](#slot), for which a [leader schedule](#leader-schedule) is valid. - -## fee account - -The fee account in the transaction is the account that pays for the cost of including the transaction in the ledger. This is the first account in the transaction. This account must be declared as Read-Write (writable) in the transaction since paying for the transaction reduces the account balance. - -## finality - -When nodes representing 2/3rd of the [stake](#stake) have a common [root](#root). - -## fork - -A [ledger](#ledger) derived from common entries but then diverged. - -## genesis block - -The first [block](#block) in the chain. - -## genesis config - -The configuration file that prepares the [ledger](#ledger) for the [genesis block](#genesis-block). - -## hash - -A digital fingerprint of a sequence of bytes. - -## inflation - -An increase in token supply over time used to fund rewards for validation and to fund continued development of Solana. - -## inner instruction - -See [cross-program invocation](#cross-program-invocation-cpi). - -## instruction - -The smallest contiguous unit of execution logic in a [program](#program). An instruction specifies which program it is calling, which accounts it wants to read or modify, and additional data that serves as auxiliary input to the program. A [client](#client) can include one or multiple instructions in a [transaction](#transaction). An instruction may contain one or more [cross-program invocations](#cross-program-invocation-cpi). - -## keypair - -A [public key](#public-key-pubkey) and corresponding [private key](#private-key) for accessing an account. - -## lamport - -A fractional [native token](#native-token) with the value of 0.000000001 [sol](#sol). - -:::info -Within the compute budget, a quantity of _[micro-lamports](https://github.com/solana-labs/solana/blob/ced8f6a512c61e0dd5308095ae8457add4a39e94/program-runtime/src/prioritization_fee.rs#L1-L2)_ is used in the calculation of [prioritization fees](#prioritization-fee). -::: - -## leader - -The role of a [validator](#validator) when it is appending [entries](#entry) to the [ledger](#ledger). - -## leader schedule - -A sequence of [validator](#validator) [public keys](#public-key-pubkey) mapped to [slots](#slot). The cluster uses the leader schedule to determine which validator is the [leader](#leader) at any moment in time. - -## ledger - -A list of [entries](#entry) containing [transactions](#transaction) signed by [clients](#client). -Conceptually, this can be traced back to the [genesis block](#genesis-block), but an actual [validator](#validator)'s ledger may have only newer [blocks](#block) to reduce storage, as older ones are not needed for validation of future blocks by design. - -## ledger vote - -A [hash](#hash) of the [validator's state](#bank-state) at a given [tick height](#tick-height). It comprises a [validator's](#validator) affirmation that a [block](#block) it has received has been verified, as well as a promise not to vote for a conflicting [block](#block) \(i.e. [fork](#fork)\) for a specific amount of time, the [lockout](#lockout) period. - -## light client - -A type of [client](#client) that can verify it's pointing to a valid [cluster](#cluster). It performs more ledger verification than a [thin client](#thin-client) and less than a [validator](#validator). - -## loader - -A [program](#program) with the ability to interpret the binary encoding of other on-chain programs. - -## lockout - -The duration of time for which a [validator](#validator) is unable to [vote](#ledger-vote) on another [fork](#fork). - -## message - -The structured contents of a [transaction](#transaction). Generally containing a header, array of account addresses, recent [blockhash](#blockhash), and an array of [instructions](#instruction). - -Learn more about the [message formatting inside of transactions](./developing/programming-model/transactions.md#message-format) here. - -## native token - -The [token](#token) used to track work done by [nodes](#node) in a cluster. - -## node - -A computer participating in a [cluster](#cluster). - -## node count - -The number of [validators](#validator) participating in a [cluster](#cluster). - -## PoH - -See [Proof of History](#proof-of-history-poh). - -## point - -A weighted [credit](#credit) in a rewards regime. In the [validator](#validator) [rewards regime](cluster/stake-delegation-and-rewards.md), the number of points owed to a [stake](#stake) during redemption is the product of the [vote credits](#vote-credit) earned and the number of lamports staked. - -## private key - -The private key of a [keypair](#keypair). - -## program - -The executable code that interprets the [instructions](#instruction) sent inside of each [transaction](#transaction) on the Solana. These programs are often referred to as "[_smart contracts_](./developing//intro/programs.md)" on other blockchains. - -## program derived account (PDA) - -An account whose signing authority is a program and thus is not controlled by a private key like other accounts. - -## program id - -The public key of the [account](#account) containing a [program](#program). - -## proof of history (PoH) - -A stack of proofs, each of which proves that some data existed before the proof was created and that a precise duration of time passed before the previous proof. Like a [VDF](#verifiable-delay-function-vdf), a Proof of History can be verified in less time than it took to produce. - -## prioritization fee - -An additional fee user can specify in the compute budget [instruction](#instruction) to prioritize their [transactions](#transaction). - -The prioritization fee is calculated by multiplying the requested maximum compute units by the compute-unit price (specified in increments of 0.000001 lamports per compute unit) rounded up to the nearest lamport. - -Transactions should request the minimum amount of compute units required for execution to minimize fees. - -## public key (pubkey) - -The public key of a [keypair](#keypair). - -## rent - -Fee paid by [Accounts](#account) and [Programs](#program) to store data on the blockchain. When accounts do not have enough balance to pay rent, they may be Garbage Collected. - -See also [rent exempt](#rent-exempt) below. Learn more about rent here: [What is rent?](../src/developing/intro/rent.md). - -## rent exempt - -Accounts that maintain more than 2 years with of rent payments in their account are considered "_rent exempt_" and will not incur the [collection of rent](../src/developing/intro/rent.md#collecting-rent). - -## root - -A [block](#block) or [slot](#slot) that has reached maximum [lockout](#lockout) on a [validator](#validator). The root is the highest block that is an ancestor of all active forks on a validator. All ancestor blocks of a root are also transitively a root. Blocks that are not an ancestor and not a descendant of the root are excluded from consideration for consensus and can be discarded. - -## runtime - -The component of a [validator](#validator) responsible for [program](#program) execution. - -## Sealevel - -Solana's parallel smart contracts run-time. - -## shred - -A fraction of a [block](#block); the smallest unit sent between [validators](#validator). - -## signature - -A 64-byte ed25519 signature of R (32-bytes) and S (32-bytes). With the requirement that R is a packed Edwards point not of small order and S is a scalar in the range of 0 <= S < L. -This requirement ensures no signature malleability. Each transaction must have at least one signature for [fee account](terminology#fee-account). -Thus, the first signature in transaction can be treated as [transaction id](#transaction-id) - -## skip rate -The percentage of [skipped slots](#skipped-slot) out of the total leader slots in the current epoch. This metric can be misleading as it has high variance after the epoch boundary when the sample size is small, as well as for validators with a low number of leader slots, however can also be useful in identifying node misconfigurations at times. - -## skipped slot - -A past [slot](#slot) that did not produce a [block](#block), because the leader was offline or the [fork](#fork) containing the slot was abandoned for a better alternative by cluster consensus. A skipped slot will not appear as an ancestor for blocks at subsequent slots, nor increment the [block height](terminology#block-height), nor expire the oldest `recent_blockhash`. - -Whether a slot has been skipped can only be determined when it becomes older than the latest [rooted](#root) (thus not-skipped) slot. - -## slot - -The period of time for which each [leader](#leader) ingests transactions and produces a [block](#block). - -Collectively, slots create a logical clock. Slots are ordered sequentially and non-overlapping, comprising roughly equal real-world time as per [PoH](#proof-of-history-poh). - -## smart contract - -A program on a blockchain that can read and modify accounts over which it has control. - -## sol - -The [native token](#native-token) of a Solana [cluster](#cluster). - -## Solana Program Library (SPL) - -A [library of programs](https://spl.solana.com/) on Solana such as spl-token that facilitates tasks such as creating and using tokens. - -## stake - -Tokens forfeit to the [cluster](#cluster) if malicious [validator](#validator) behavior can be proven. - -## supermajority - -2/3 of a [cluster](#cluster). - -## sysvar - -A system [account](#account). [Sysvars](developing/runtime-facilities/sysvars.md) provide cluster state information such as current tick height, rewards [points](#point) values, etc. Programs can access Sysvars via a Sysvar account (pubkey) or by querying via a syscall. - -## thin client - -A type of [client](#client) that trusts it is communicating with a valid [cluster](#cluster). - -## tick - -A ledger [entry](#entry) that estimates wallclock duration. - -## tick height - -The Nth [tick](#tick) in the [ledger](#ledger). - -## token - -A digitally transferable asset. - -## tps - -[Transactions](#transaction) per second. - -## tpu - -[Transaction processing unit](validator/tpu.md). - -## transaction - -One or more [instructions](#instruction) signed by a [client](#client) using one or more [keypairs](#keypair) and executed atomically with only two possible outcomes: success or failure. - -## transaction id - -The first [signature](#signature) in a [transaction](#transaction), which can be used to uniquely identify the transaction across the complete [ledger](#ledger). - -## transaction confirmations - -The number of [confirmed blocks](#confirmed-block) since the transaction was accepted onto the [ledger](#ledger). A transaction is finalized when its block becomes a [root](#root). - -## transactions entry - -A set of [transactions](#transaction) that may be executed in parallel. - -## tvu - -[Transaction validation unit](validator/tvu.md). - -## validator - -A full participant in a Solana network [cluster](#cluster) that produces new [blocks](#block). A validator validates the transactions added to the [ledger](#ledger) - -## VDF - -See [verifiable delay function](#verifiable-delay-function-vdf). - -## verifiable delay function (VDF) - -A function that takes a fixed amount of time to execute that produces a proof that it ran, which can then be verified in less time than it took to produce. - -## vote - -See [ledger vote](#ledger-vote). - -## vote credit - -A reward tally for [validators](#validator). A vote credit is awarded to a validator in its vote account when the validator reaches a [root](#root). - -## wallet - -A collection of [keypairs](#keypair) that allows users to manage their funds. - -## warmup period - -Some number of [epochs](#epoch) after [stake](#stake) has been delegated while it progressively becomes effective. During this period, the stake is considered to be "activating". More info about: [warmup and cooldown](cluster/stake-delegation-and-rewards.md#stake-warmup-cooldown-withdrawal) diff --git a/docs/src/transaction_fees.md b/docs/src/transaction_fees.md deleted file mode 100644 index e3a65fb930ec40..00000000000000 --- a/docs/src/transaction_fees.md +++ /dev/null @@ -1,234 +0,0 @@ ---- -title: Transaction Fees -description: - "Transaction fees are the small fees paid to process instructions on the - network. These fees are based on computation and an optional prioritization - fee." -keywords: - - instruction fee - - processing fee - - storage fee - - low fee blockchain - - gas - - gwei - - cheap network - - affordable blockchain ---- - -The small fees paid to process [instructions](./terminology.md#instruction) on -the Solana blockchain are known as "_transaction fees_". - -As each transaction (which contains one or more instructions) is sent through -the network, it gets processed by the current leader validation-client. Once -confirmed as a global state transaction, this _transaction fee_ is paid to the -network to help support the [economic design](#economic-design) of the Solana -blockchain. - -> **NOTE:** Transaction fees are different from -> [account rent](./terminology.md#rent)! While transaction fees are paid to -> process instructions on the Solana network, rent is paid to store data on the -> blockchain. - -> You can learn more about rent here: -> [What is rent?](./developing/intro/rent.md) - -## Why pay transaction fees? - -Transaction fees offer many benefits in the Solana -[economic design](#basic-economic-design) described below. Mainly: - -- they provide compensation to the validator network for the CPU/GPU resources - necessary to process transactions, -- reduce network spam by introducing real cost to transactions, -- and provide long-term economic stability to the network through a - protocol-captured minimum fee amount per transaction - -> **NOTE:** Network consensus votes are sent as normal system transfers, which -> means that validators pay transaction fees to participate in consensus. - -## Basic economic design - -Many blockchain networks \(e.g. Bitcoin and Ethereum\), rely on inflationary -_protocol-based rewards_ to secure the network in the short-term. Over the -long-term, these networks will increasingly rely on _transaction fees_ to -sustain security. - -The same is true on Solana. Specifically: - -- A fixed proportion (initially 50%) of each transaction fee is _burned_ - (destroyed), with the remaining going to the current - [leader](./terminology.md#leader) processing the transaction. -- A scheduled global inflation rate provides a source for - [rewards](./implemented-proposals/staking-rewards.md) distributed to - [Solana Validators](../src/running-validator.md). - -### Why burn some fees? - -As mentioned above, a fixed proportion of each transaction fee is _burned_ -(destroyed). This is intended to cement the economic value of SOL and thus -sustain the network's security. Unlike a scheme where transactions fees are -completely burned, leaders are still incentivized to include as many -transactions as possible in their slots. - -Burnt fees can also help prevent malicious validators from censoring -transactions by being considered in [fork](./terminology.md#fork) selection. - -#### Example of an attack: - -In the case of a [Proof of History (PoH)](./terminology.md#proof-of-history-poh) -fork with a malicious, censoring leader: - -- due to the fees lost from censoring, we would expect the total fees burned to - be **_less than_** a comparable honest fork -- if the censoring leader is to compensate for these lost protocol fees, they - would have to replace the burnt fees on their fork themselves -- thus potentially reducing the incentive to censor in the first place - -## Calculating transaction fees - -Transactions fees are calculated based on two main parts: - -- a statically set base fee per signature, and -- the computational resources used during the transaction, measured in - "[_compute units_](./terminology.md#compute-units)" - -Since each transaction may require a different amount of computational -resources, they are alloted a maximum number of _compute units_ per transaction -known as the "[_compute budget_](./terminology.md#compute-budget)". - -The execution of each instruction within a transaction consumes a different -number of _compute units_. After the maximum number of _compute units_ has been -consumed (aka compute budget exhaustion), the runtime will halt the transaction -and return an error. This results in a failed transaction. - -> **Learn more:** compute units and the -> [Compute Budget](./developing/programming-model/runtime#compute-budget) in the -> Runtime and [requesting a fee estimate](../api/http#getfeeformessage) from the -> RPC. - -## Prioritization fee - -A Solana transaction can include an **optional** fee to prioritize itself -against others known as a -"_[prioritization fee](./terminology.md#prioritization-fee)_". Paying this -additional fee helps boost how a transaction is prioritized against others, -resulting in faster execution times. - -### How the prioritization fee is calculated - -A transaction's [prioritization fee](./terminology.md#prioritization-fee) is -calculated by multiplying the maximum number of **_compute units_** by the -**_compute unit price_** (measured in _micro-lamports_). - -Each transaction can set the maximum number of compute units it is allowed to -consume and the compute unit price by including a `SetComputeUnitLimit` and -`SetComputeUnitPrice` compute budget instruction respectively. - -:::info -[Compute Budget instructions](https://github.com/solana-labs/solana/blob/master/sdk/src/compute_budget.rs) -do **not** require any accounts. ::: - -If no `SetComputeUnitLimit` instruction is provided, the limit will be -calculated as the product of the number of instructions in the transaction and -the default per-instruction units, which is currently -[200k](https://github.com/solana-labs/solana/blob/4293f11cf13fc1e83f1baa2ca3bb2f8ea8f9a000/program-runtime/src/compute_budget.rs#L13). - -If no `SetComputeUnitPrice` instruction is provided, the transaction will -default to no additional elevated fee and the lowest priority. - -### How to set the prioritization fee - -A transaction's prioritization fee is set by including a `SetComputeUnitPrice` -instruction, and optionally a `SetComputeUnitLimit` instruction. The runtime -will use these values to calculate the prioritization fee, which will be used to -prioritize the given transaction within the block. - -You can craft each of these instructions via their `rust` or `@solana/web3.js` -functions. Each of these instructions can then be included in the transaction -and sent to the cluster like normal. See also the -[best practices](#prioritization-fee-best-practices) below. - -:::caution Transactions can only contain **one of each type** of compute budget -instruction. Duplicate types will result in an -[`TransactionError::DuplicateInstruction`](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction/error.rs#L144-145) -error, and ultimately transaction failure. ::: - -#### Rust - -The rust `solana-sdk` crate includes functions within -[`ComputeBudgetInstruction`](https://docs.rs/solana-sdk/latest/solana_sdk/compute_budget/enum.ComputeBudgetInstruction.html) -to craft instructions for setting the _compute unit limit_ and _compute unit -price_: - -```rust -let instruction = ComputeBudgetInstruction::set_compute_unit_limit(300_000); -``` - -```rust -let instruction = ComputeBudgetInstruction::set_compute_unit_price(1); -``` - -#### Javascript - -The `@solana/web3.js` library includes functions within the -[`ComputeBudgetProgram`](https://solana-labs.github.io/solana-web3.js/classes/ComputeBudgetProgram.html) -class to craft instructions for setting the _compute unit limit_ and _compute -unit price_: - -```js -const instruction = ComputeBudgetProgram.setComputeUnitLimit({ - units: 300_000, -}); -``` - -```js -const instruction = ComputeBudgetProgram.setComputeUnitPrice({ - microLamports: 1, -}); -``` - -### Prioritization fee best practices - -#### Request the minimum compute units - -Transactions should request the minimum amount of compute units required for -execution to minimize fees. Also note that fees are not adjusted when the number -of requested compute units exceeds the number of compute units actually consumed -by an executed transaction. - -#### Get recent prioritization fees - -Prior to sending a transaction to the cluster, you can use the -[`getRecentPrioritizationFees`](/api/http#getrecentprioritizationfees) RPC -method to get a list of the recent paid prioritization fees within the recent -blocks processed by the node. - -You could then use this data to estimate an appropriate prioritization fee for -your transaction to both (a) better ensure it gets processed by the cluster and -(b) minimize the fees paid. - -## Fee Collection - -Transactions are required to have at least one account which has signed the -transaction and is writable. Writable signer accounts are serialized first in -the list of transaction accounts and the first of these accounts is always used -as the "fee payer". - -Before any transaction instructions are processed, the fee payer account balance -will be deducted to pay for transaction fees. If the fee payer balance is not -sufficient to cover transaction fees, the transaction will be dropped by the -cluster. If the balance was sufficient, the fees will be deducted whether the -transaction is processed successfully or not. In fact, if any of the transaction -instructions return an error or violate runtime restrictions, all account -changes _except_ the transaction fee deduction will be rolled back. - -## Fee Distribution - -Transaction fees are partially burned and the remaining fees are collected by -the validator that produced the block that the corresponding transactions were -included in. The transaction fee burn rate was initialized as 50% when inflation -rewards were enabled at the beginning of 2021 and has not changed so far. These -fees incentivize a validator to process as many transactions as possible during -its slots in the leader schedule. Collected fees are deposited in the -validator's account (listed in the leader schedule for the current slot) after -processing all of the transactions included in a block. diff --git a/docs/src/validator/anatomy.md b/docs/src/validator/anatomy.md index 849e8a57a9efc4..5a61eeff7ef11c 100644 --- a/docs/src/validator/anatomy.md +++ b/docs/src/validator/anatomy.md @@ -1,5 +1,7 @@ --- title: Anatomy of a Validator +sidebar_label: Anatomy +sidebar_position: 1 --- ![Validator block diagrams](/img/validator.svg) diff --git a/docs/src/validator/best-practices/operations.md b/docs/src/validator/best-practices/operations.md deleted file mode 100644 index 0588fc9ee9053c..00000000000000 --- a/docs/src/validator/best-practices/operations.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -title: Validator Operations Best Practices -sidebar_label: General Operations ---- - -After you have successfully setup and started a [validator on testnet](../get-started/setup-a-validator.md) (or another cluster of your choice), you will want to become familiar with how to operate your validator on a day-to-day basis. During daily operations, you will be [monitoring your server](./monitoring.md), updating software regularly (both the Solana validator software and operating system packages), and managing your vote account and identity account. - -All of these skills are critical to practice. Maximizing your validator uptime is an important part of being a good operator. - -## Educational Workshops - -The Solana validator community holds regular educational workshops. You can watch past workshops through the [solana validator educational workshops playlist](https://www.youtube.com/watch?v=86zySQ5vGW8&list=PLilwLeBwGuK6jKrmn7KOkxRxS9tvbRa5p). - -## Help with the validator command line - -From within the Solana CLI, you can execute the `solana-validator` command with the `--help` flag to get a better understanding of the flags and sub commands available. - -``` -solana-validator --help -``` - -## Restarting your validator - -There are many operational reasons you may want to restart your validator. As a best practice, you should avoid a restart during a leader slot. A [leader slot](../../terminology.md#leader-schedule) is the time when your validator is expected to produce blocks. For the health of the cluster and also for your validator's ability to earn transaction fee rewards, you do not want your validator to be offline during an opportunity to produce blocks. - -To see the full leader schedule for an epoch, use the following command: - -``` -solana leader-schedule -``` - -Based on the current slot and the leader schedule, you can calculate open time windows where your validator is not expected to produce blocks. - -Assuming you are ready to restart, you may use the `solana-validator exit` command. The command exits your validator process when an appropriate idle time window is reached. Assuming that you have systemd implemented for your validator process, the validator should restart automatically after the exit. See the below help command for details: - -``` -solana-validator exit --help -``` - -## Upgrading - -There are many ways to upgrade the [Solana software](../../cli/install-solana-cli-tools.md). As an operator, you will need to upgrade often, so it is important to get comfortable with this process. - -> **Note** validator nodes do not need to be offline while the newest version is being downloaded or built from source. All methods below can be done before the validator process is restarted. - -### Building From Source - -It is a best practice to always build your Solana binaries from source. If you build from source, you are certain that the code you are building has not been tampered with before the binary was created. You may also be able to optimize your `solana-validator` binary to your specific hardware. - -If you build from source on the validator machine (or a machine with the same CPU), you can target your specific architecture using the `-march` flag. Refer to the Solana docs for [instructions on building from source](../../cli/install-solana-cli-tools.md#build-from-source). - -### solana-install - -If you are not comfortable building from source, or you need to quickly install a new version to test something out, you could instead try using the `solana-install` command. - -Assuming you want to install Solana version `1.14.17`, you would execute the following: - -``` -solana-install init 1.14.17 -``` - -This command downloads the executable for `1.14.17` and installs it into a `.local` directory. You can also look at `solana-install --help` for more options. - -> **Note** this command only works if you already have the solana cli installed. If you do not have the cli installed, refer to [install solana cli tools](../../cli/install-solana-cli-tools.md) - -### Restart - -For all install methods, the validator process will need to be restarted before the newly installed version is in use. Use `solana-validator exit` to restart your validator process. - -### Verifying version - -The best way to verify that your validator process has changed to the desired version is to grep the logs after a restart. The following grep command should show you the version that your validator restarted with: - -``` -grep -B1 'Starting validator with' -``` - -## Snapshots - -Validators operators who have not experienced significant downtime (multiple hours of downtime), should avoid downloading snapshots. It is important for the health of the cluster as well as your validator history to maintain the local ledger. Therefore, you should not download a new snapshot any time your validator is offline or experiences an issue. Downloading a snapshot should only be reserved for occasions when you do not have local state. Prolonged downtime or the first install of a new validator are examples of times when you may not have state locally. In other cases such as restarts for upgrades, a snapshot download should be avoided. - -To avoid downloading a snapshot on restart, add the following flag to the `solana-validator` command: - -``` ---no-snapshot-fetch -``` - -If you use this flag with the `solana-validator` command, make sure that you run `solana catchup ` after your validator starts to make sure that the validator is catching up in a reasonable time. After some time (potentially a few hours), if it appears that your validator continues to fall behind, then you may have to download a new snapshot. - -### Downloading Snapshots - -If you are starting a validator for the first time, or your validator has fallen too far behind after a restart, then you may have to download a snapshot. - -To download a snapshot, you must **_NOT_** use the `--no-snapshot-fetch` flag. Without the flag, your validator will automatically download a snapshot from your known validators that you specified with the `--known-validator` flag. - -If one of the known validators is downloading slowly, you can try adding the `--minimal-snapshot-download-speed` flag to your validator. This flag will switch to another known validator if the initial download speed is below the threshold that you set. - -### Manually Downloading Snapshots - -In the case that there are network troubles with one or more of your known validators, then you may have to manually download the snapshot. To manually download a snapshot from one of your known validators, first, find the IP address of the validator in using the `solana gossip` command. In the example below, `5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on` is the pubkey of one of my known validators: - -``` -solana gossip | grep 5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on -``` - -The IP address of the validators is `139.178.68.207` and the open port on this validator is `80`. You can see the IP address and port in the fifth column in the gossip output: - -``` -139.178.68.207 | 5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on | 8001 | 8004 | 139.178.68.207:80 | 1.10.27 | 1425680972 -``` - -Now that the IP and port are known, you can download a full snapshot or an incremental snapshot: - -``` -wget --trust-server-names http://139.178.68.207:80/snapshot.tar.bz2 -wget --trust-server-names http://139.178.68.207:80/incremental-snapshot.tar.bz2 -``` - -Now move those files into your snapshot directory. If you have not specified a snapshot directory, then you should put the files in your ledger directory. - -Once you have a local snapshot, you can restart your validator with the `--no-snapshot-fetch` flag. - -## Regularly Check Account Balances - -It is important that you do not accidentally run out of funds in your identity account, as your node will stop voting. It is also important to note that this account keypair is the most vulnerable of the three keypairs in a vote account because the keypair for the identity account is stored on your validator when running the `solana-validator` software. How much SOL you should store there is up to you. As a best practice, make sure to check the account regularly and refill or deduct from it as needed. To check the account balance do: - -``` -solana balance validator-keypair.json -``` - -> **Note** `solana-watchtower` can monitor for a minimum validator identity balance. See [monitoring best practices](./monitoring.md) for details. - -## Withdrawing From The Vote Account - -As a reminder, your withdrawer's keypair should **_NEVER_** be stored on your server. It should be stored on a hardware wallet, paper wallet, or multisig mitigates the risk of hacking and theft of funds. - -To withdraw your funds from your vote account, you will need to run `solana withdraw-from-vote-account` on a trusted computer. For example, on a trusted computer, you could withdraw all of the funds from your vote account (excluding the rent exempt minimum). The below example assumes you have a separate keypair to store your funds called `person-keypair.json` - -``` -solana withdraw-from-vote-account \ - vote-account-keypair.json \ - person-keypair.json ALL \ - --authorized-withdrawer authorized-withdrawer-keypair.json -``` - -To get more information on the command, use `solana withdraw-from-vote-account --help`. - -For a more detailed explanation of the different keypairs and other related operations refer to [vote account management](../../running-validator/vote-accounts.md). \ No newline at end of file diff --git a/docs/src/validator/blockstore.md b/docs/src/validator/blockstore.md index f4e8a6119986f2..e49e576bf26353 100644 --- a/docs/src/validator/blockstore.md +++ b/docs/src/validator/blockstore.md @@ -1,8 +1,9 @@ --- title: Blockstore +sidebar_position: 3 --- -After a block reaches finality, all blocks from that one on down to the genesis block form a linear chain with the familiar name blockchain. Until that point, however, the validator must maintain all potentially valid chains, called _forks_. The process by which forks naturally form as a result of leader rotation is described in [fork generation](../cluster/fork-generation.md). The _blockstore_ data structure described here is how a validator copes with those forks until blocks are finalized. +After a block reaches finality, all blocks from that one on down to the genesis block form a linear chain with the familiar name blockchain. Until that point, however, the validator must maintain all potentially valid chains, called _forks_. The process by which forks naturally form as a result of leader rotation is described in [fork generation](../consensus/fork-generation.md). The _blockstore_ data structure described here is how a validator copes with those forks until blocks are finalized. The blockstore allows a validator to record every shred it observes on the network, in any order, as long as the shred is signed by the expected leader for a given slot. diff --git a/docs/src/developing/plugins/geyser-plugins.md b/docs/src/validator/geyser.md similarity index 100% rename from docs/src/developing/plugins/geyser-plugins.md rename to docs/src/validator/geyser.md diff --git a/docs/src/validator/gossip.md b/docs/src/validator/gossip.md index c1ad139c0e66ab..3c637f5c707357 100644 --- a/docs/src/validator/gossip.md +++ b/docs/src/validator/gossip.md @@ -1,36 +1,54 @@ --- title: Gossip Service +sidebar_position: 5 --- -The Gossip Service acts as a gateway to nodes in the [control plane](terminology.md#control-plane). Validators use the service to ensure information is available to all other nodes in a cluster. The service broadcasts information using a [gossip protocol](https://en.wikipedia.org/wiki/Gossip_protocol). +The Gossip Service acts as a gateway to nodes in the +[control plane](https://solana.com/docs/terminology#control-plane). Validators +use the service to ensure information is available to all other nodes in a +cluster. The service broadcasts information using a +[gossip protocol](https://en.wikipedia.org/wiki/Gossip_protocol). ## Gossip Overview -Nodes continuously share signed data objects among themselves in order to manage a cluster. For example, they share their contact information, ledger height, and votes. +Nodes continuously share signed data objects among themselves in order to manage +a cluster. For example, they share their contact information, ledger height, and +votes. -Every tenth of a second, each node sends a "push" message and/or a "pull" message. Push and pull messages may elicit responses, and push messages may be forwarded on to others in the cluster. +Every tenth of a second, each node sends a "push" message and/or a "pull" +message. Push and pull messages may elicit responses, and push messages may be +forwarded on to others in the cluster. -Gossip runs on a well-known UDP/IP port or a port in a well-known range. Once a cluster is bootstrapped, nodes advertise to each other where to find their gossip endpoint (a socket address). +Gossip runs on a well-known UDP/IP port or a port in a well-known range. Once a +cluster is bootstrapped, nodes advertise to each other where to find their +gossip endpoint (a socket address). ## Gossip Records -Records shared over gossip are arbitrary, but signed and versioned (with a timestamp) as needed to make sense to the node receiving them. If a node receives two records from the same source, it updates its own copy with the record with the most recent timestamp. +Records shared over gossip are arbitrary, but signed and versioned (with a +timestamp) as needed to make sense to the node receiving them. If a node +receives two records from the same source, it updates its own copy with the +record with the most recent timestamp. ## Gossip Service Interface ### Push Message -A node sends a push message to tells the cluster it has information to share. Nodes send push messages to `PUSH_FANOUT` push peers. +A node sends a push message to tell the cluster it has information to share. +Nodes send push messages to `PUSH_FANOUT` push peers. Upon receiving a push message, a node examines the message for: -1. Duplication: if the message has been seen before, the node drops the message and may respond with `PushMessagePrune` if forwarded from a low staked node +1. Duplication: if the message has been seen before, the node drops the message + and may respond with `PushMessagePrune` if forwarded from a low staked node 2. New data: if the message is new to the node - - Stores the new information with an updated version in its cluster info and purges any previous older value + - Stores the new information with an updated version in its cluster info and + purges any previous older value - - Stores the message in `pushed_once` (used for detecting duplicates, purged after `PUSH_MSG_TIMEOUT * 5` ms) + - Stores the message in `pushed_once` (used for detecting duplicates, purged + after `PUSH_MSG_TIMEOUT * 5` ms) - Retransmits the messages to its own push peers @@ -38,36 +56,60 @@ Upon receiving a push message, a node examines the message for: ### Push Peers, Prune Message -A nodes selects its push peers at random from the active set of known peers. The node keeps this selection for a relatively long time. When a prune message is received, the node drops the push peer that sent the prune. Prune is an indication that there is another, higher stake weighted path to that node than direct push. +A node selects its push peers at random from the active set of known peers. The +node keeps this selection for a relatively long time. When a prune message is +received, the node drops the push peer that sent the prune. Prune is an +indication that there is another, higher stake weighted path to that node than +direct push. -The set of push peers is kept fresh by rotating a new node into the set every `PUSH_MSG_TIMEOUT/2` milliseconds. +The set of push peers is kept fresh by rotating a new node into the set every +`PUSH_MSG_TIMEOUT/2` milliseconds. ### Pull Message -A node sends a pull message to ask the cluster if there is any new information. A pull message is sent to a single peer at random and comprises a Bloom filter that represents things it already has. A node receiving a pull message iterates over its values and constructs a pull response of things that miss the filter and would fit in a message. +A node sends a pull message to ask the cluster if there is any new information. +A pull message is sent to a single peer at random and comprises a Bloom filter +that represents things it already has. A node receiving a pull message iterates +over its values and constructs a pull response of things that miss the filter +and would fit in a message. -A node constructs the pull Bloom filter by iterating over current values and recently purged values. +A node constructs the pull Bloom filter by iterating over current values and +recently purged values. -A node handles items in a pull response the same way it handles new data in a push message. +A node handles items in a pull response the same way it handles new data in a +push message. ## Purging -Nodes retain prior versions of values (those updated by a pull or push) and expired values (those older than `GOSSIP_PULL_CRDS_TIMEOUT_MS`) in `purged_values` (things I recently had). Nodes purge `purged_values` that are older than `5 * GOSSIP_PULL_CRDS_TIMEOUT_MS`. +Nodes retain prior versions of values (those updated by a pull or push) and +expired values (those older than `GOSSIP_PULL_CRDS_TIMEOUT_MS`) in +`purged_values` (things I recently had). Nodes purge `purged_values` that are +older than `5 * GOSSIP_PULL_CRDS_TIMEOUT_MS`. ## Eclipse Attacks -An eclipse attack is an attempt to take over the set of node connections with adversarial endpoints. +An eclipse attack is an attempt to take over the set of node connections with +adversarial endpoints. This is relevant to our implementation in the following ways. -- Pull messages select a random node from the network. An eclipse attack on _pull_ would require an attacker to influence the random selection in such a way that only adversarial nodes are selected for pull. -- Push messages maintain an active set of nodes and select a random fanout for every push message. An eclipse attack on _push_ would influence the active set selection, or the random fanout selection. +- Pull messages select a random node from the network. An eclipse attack on + _pull_ would require an attacker to influence the random selection in such a + way that only adversarial nodes are selected for pull. +- Push messages maintain an active set of nodes and select a random fanout for + every push message. An eclipse attack on _push_ would influence the active set + selection, or the random fanout selection. ### Time and Stake based weights -Weights are calculated based on `time since last picked` and the `natural log` of the `stake weight`. +Weights are calculated based on `time since last picked` and the `natural log` +of the `stake weight`. -Taking the `ln` of the stake weight allows giving all nodes a fairer chance of network coverage in a reasonable amount of time. It helps normalize the large possible `stake weight` differences between nodes. This way a node with low `stake weight`, compared to a node with large `stake weight` will only have to wait a few multiples of ln(`stake`) seconds before it gets picked. +Taking the `ln` of the stake weight allows giving all nodes a fairer chance of +network coverage in a reasonable amount of time. It helps normalize the large +possible `stake weight` differences between nodes. This way a node with low +`stake weight`, compared to a node with large `stake weight` will only have to +wait a few multiples of ln(`stake`) seconds before it gets picked. There is no way for an adversary to influence these parameters. @@ -79,7 +121,8 @@ A node is selected as a pull target based on the weights described above. A prune message can only remove an adversary from a potential connection. -Just like _pull message_, nodes are selected into the active set based on weights. +Just like _pull message_, nodes are selected into the active set based on +weights. ## Notable differences from PlumTree @@ -87,5 +130,9 @@ The active push protocol described here is based on [Plum Tree](https://haslab.uminho.pt/sites/default/files/jop/files/lpr07a.pdf). The main differences are: -- Push messages have a wallclock that is signed by the originator. Once the wallclock expires the message is dropped. A hop limit is difficult to implement in an adversarial setting. -- Lazy Push is not implemented because its not obvious how to prevent an adversary from forging the message fingerprint. A naive approach would allow an adversary to be prioritized for pull based on their input. +- Push messages have a wallclock that is signed by the originator. Once the + wallclock expires the message is dropped. A hop limit is difficult to + implement in an adversarial setting. +- Lazy Push is not implemented because it's not obvious how to prevent an + adversary from forging the message fingerprint. A naive approach would allow + an adversary to be prioritized for pull based on their input. diff --git a/docs/src/validator/overview/running-validator-or-rpc-node.md b/docs/src/validator/overview/running-validator-or-rpc-node.md deleted file mode 100644 index 07fff60431f0bf..00000000000000 --- a/docs/src/validator/overview/running-validator-or-rpc-node.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Consensus Validator or RPC Node? -sidebar_label: Running a Validator or RPC Node? ---- - -Operators who run a [consensus validator](./what-is-a-validator.md) have much different incentives than operators who run an [RPC node](./what-is-an-rpc-node.md). You will have to decide which choice is best for you based on your interests, technical background, and goals. - -## Consensus Validators - -As a validator your primary focus is maintaining the network and making sure that your node is performing optimally so that you can fully participate in the cluster consensus. You will want to attract a delegation of SOL to your validator which will allow your validator the opportunity to produce more blocks and earn rewards. - -Each staked validator earns inflation rewards from [vote credits](../../terminology.md#vote-credit). Vote credits are assigned to validators that vote on [blocks](../../terminology.md#block) produced by the [leader](../../terminology.md#leader). The vote credits are given to all validators that successfully vote on blocks that are added to the blockchain. Additionally, when the validator is the leader, it can earn transaction fees and storage [rent fees](../../developing/programming-model/accounts.md#rent) for each block that it produces that is added to the blockchain. - -Since all votes in Solana happen on the blockchain, a validator incurs a transaction cost for each vote that it makes. These transaction fees amount to approximately 1.0 SOL per day. - -> It is important to make sure your validator always has enough SOL in its identity account to pay for these transactions! - -### Economics of running a consensus validator - -As an operator, it is important to understand how a consensus validator spends and earns sol through the protocol. - -All validators who vote (consensus validators) must pay vote transaction fees for blocks that they agree with. The cost of voting can be up to 1.1 SOL per day. - -A voting validator can earn SOL through 2 methods: - -1. Inflationary rewards paid at the end of an epoch. See [staking rewards](./../../implemented-proposals/staking-rewards.md) -2. Earning 50% of transaction fees for the blocks produced by the validator. See [transaction fee basic economic design](../../transaction_fees.md#basic-economic-design) - -The following links are community provided resources that discuss the economics of running a validator: - -- Michael Hubbard wrote an [article](https://laine-sa.medium.com/solana-staking-rewards-validator-economics-how-does-it-work-6718e4cccc4e) that explains the economics of Solana in more depth for stakers and for validators. -- Congent Crypto has written a [blog post](https://medium.com/@Cogent_Crypto/how-to-become-a-validator-on-solana-9dc4288107b7) that discusses economics and getting started. -- Cogent Crypto also provides a [validator profit calculator](https://cogentcrypto.io/ValidatorProfitCalculator) - -## RPC Nodes - -While RPC operators **do NOT** receive rewards (because the node is not participating in voting), there are different motivations for running an RPC node. - -An RPC operator is providing a service to users who want to interact with the Solana blockchain. Because your primary user is often technical, you will have to be able to answer technical questions about performance of RPC calls. This option may require more understanding of the [core Solana architecture](../../cluster/overview.md). - -If you are operating an RPC node as a business, your job will also involve scaling your system to meet the demands of the users. For example, some RPC providers create dedicated servers for projects that require a high volume of requests to the node. Someone with a background in development operations or software engineering will be a very important part of your team. You will need a strong understanding of the Solana architecture and the [RPC API](../../api/http.md). - -Alternatively, you may be a development team that would like to run their own infrastructure. In this case, the RPC infrastructure could be a part of your production stack. A development team could use the [Geyser plugin](../../developing/plugins/geyser-plugins.md), for example, to get real time access to information about accounts or blocks in the cluster. \ No newline at end of file diff --git a/docs/src/validator/runtime.md b/docs/src/validator/runtime.md index f02030b13e41a5..2bf8a52563f88b 100644 --- a/docs/src/validator/runtime.md +++ b/docs/src/validator/runtime.md @@ -1,9 +1,8 @@ --- -title: The Runtime +title: Runtime +sidebar_position: 6 --- -## The Runtime - The runtime is a concurrent transaction processor. Transactions specify their data dependencies upfront and dynamic memory allocation is explicit. By separating program code from the state it operates on, the runtime is able to choreograph concurrent access. Transactions accessing only read-only accounts are executed in parallel whereas transactions accessing writable accounts are serialized. The runtime interacts with the program through an entrypoint with a well-defined interface. The data stored in an account is an opaque type, an array of bytes. The program has full control over its contents. The transaction structure specifies a list of public keys and signatures for those keys and a sequential list of instructions that will operate over the states associated with the account keys. For the transaction to be committed all the instructions must execute successfully; if any abort the whole transaction fails to commit. @@ -29,9 +28,9 @@ At the _execute_ stage, the loaded accounts have no data dependencies, so all th The runtime enforces the following rules: 1. Only the _owner_ program may modify the contents of an account. This means that upon assignment data vector is guaranteed to be zero. -2. Total balances on all the accounts is equal before and after execution of a transaction. +2. Total balances on all the accounts are equal before and after execution of a transaction. 3. After the transaction is executed, balances of read-only accounts must be equal to the balances before the transaction. -4. All instructions in the transaction executed atomically. If one fails, all account modifications are discarded. +4. All instructions in the transaction are executed atomically. If one fails, all account modifications are discarded. Execution of the program involves mapping the program's public key to an entrypoint which takes a pointer to the transaction, and an array of loaded accounts. diff --git a/docs/src/validator/tpu.md b/docs/src/validator/tpu.md index 4445d8d6984eb0..a3271add28cbbe 100644 --- a/docs/src/validator/tpu.md +++ b/docs/src/validator/tpu.md @@ -1,5 +1,7 @@ --- -title: TPU +title: Transaction Processing Unit +sidebar_label: TPU +sidebar_position: 2 --- TPU (Transaction Processing Unit) is the logic of the validator diff --git a/docs/src/validator/tvu.md b/docs/src/validator/tvu.md index 04657ab5e91474..e3ac7776f0d128 100644 --- a/docs/src/validator/tvu.md +++ b/docs/src/validator/tvu.md @@ -1,5 +1,7 @@ --- -title: TVU +title: Transaction Validation Unit +sidebar_label: TVU +sidebar_position: 3 --- TVU (Transaction Validation Unit) is the logic of the validator diff --git a/docs/src/wallet-guide.md b/docs/src/wallet-guide.md deleted file mode 100644 index f6a380b51d74df..00000000000000 --- a/docs/src/wallet-guide.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: Solana Wallet Guide ---- - -This document describes the different wallet options that are available to users -of Solana who want to be able to send, receive and interact with -SOL tokens on the Solana blockchain. - -## What is a Wallet? - -A crypto wallet is a device or application that stores a collection of keys and -can be used to send, receive, -and track ownership of cryptocurrencies. Wallets can take many forms. -A wallet might be a directory or file in your computer's file system, -a piece of paper, or a specialized device called a _hardware wallet_. -There are also various smartphone apps and computer programs -that provide a user-friendly way to create and manage wallets. - -A _keypair_ is a securely generated _private key_ and its -cryptographically-derived _public key_. A private key and its corresponding -public key are together known as a _keypair_. -A wallet contains a collection of one or more keypairs and provides some means -to interact with them. - -The _public key_ (commonly shortened to _pubkey_) is known as the wallet's -_receiving address_ or simply its _address_. The wallet address **may be shared -and displayed freely**. When another party is going to send some amount of -cryptocurrency to a wallet, they need to know the wallet's receiving address. -Depending on a blockchain's implementation, the address can also be used to view -certain information about a wallet, such as viewing the balance, -but has no ability to change anything about the wallet or withdraw any tokens. - -The _private key_ is required to digitally sign any transactions to send -cryptocurrencies to another address or to make any changes to the wallet. -The private key **must never be shared**. If someone gains access to the -private key to a wallet, they can withdraw all the tokens it contains. -If the private key for a wallet is lost, any tokens that have been sent -to that wallet's address are **permanently lost**. - -Different wallet solutions offer different approaches to keypair security, -interacting with the keypair, and signing transactions to use/spend the tokens. -Some are easier to use than others. -Some store and back up private keys more securely. -Solana supports multiple types of wallets so you can choose the right balance -of security and convenience. - -**If you want to be able to receive SOL tokens on the Solana blockchain, -you first will need to create a wallet.** - -## Supported Wallets - -Several browser and mobile app based wallets support Solana. Find the right one -for you on the [Solana Ecosystem](https://solana.com/ecosystem/explore?categories=wallet) -page. - -For advanced users or developers, the [command-line wallets](wallet-guide/cli.md) -may be more appropriate, as new features on the Solana blockchain will always be -supported on the command line first before being integrated into third-party -solutions. diff --git a/docs/src/wallet-guide/support.md b/docs/src/wallet-guide/support.md deleted file mode 100644 index 84df5e86678eb3..00000000000000 --- a/docs/src/wallet-guide/support.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Support / Troubleshooting ---- - -If you have questions or are having trouble setting up or using your wallet -of choice, please make sure you've read through all the relevant pages in our -[Wallet Guide](paper-wallet.md). The Solana team is working hard to support new -features on popular wallets, and we do our best to keep our documents up to date -with the latest available features. - -If you have questions after reading the docs, feel free to reach out to us on -our [Telegram](https://t.me/solana). - -For **technical support**, please ask a question on [StackOverflow](https://stackoverflow.com/questions/tagged/solana) and tag your questions with `solana`. diff --git a/docs/src/validator/overview/what-is-a-validator.md b/docs/src/what-is-a-validator.md similarity index 85% rename from docs/src/validator/overview/what-is-a-validator.md rename to docs/src/what-is-a-validator.md index f2a34d2f9cc4fe..acaef0d27c1586 100644 --- a/docs/src/validator/overview/what-is-a-validator.md +++ b/docs/src/what-is-a-validator.md @@ -6,13 +6,13 @@ A validator is a computer that helps to run the Solana network. Each validator e The more independent entities that run validators, the less vulnerable the cluster is to an attack or catastrophe that affects the cluster. -> For an more in depth look at the health of the Solana network, see the [Solana Foundation Validator Health Report](https://solana.com/news/validator-health-report-march-2023). +> For a more in depth look at the health of the Solana network, see the [Solana Foundation Validator Health Report](https://solana.com/news/validator-health-report-march-2023). By becoming a validator, you are helping to grow the network. You are also learning first hand how the Solana cluster functions at the lowest level. You will become part of an active community of operators that are passionate about the Solana ecosystem. ## Consensus vs RPC -Before, we discuss validators in more detail, it's useful to make some distinctions. Using the same validator software, you have the option of running a voting/consensus node or choosing to instead run an RPC node. An RPC node helps Solana devs and others interact with the blockchain but for performance reasons should not vote. We go into more detail on RPC nodes in the next section, [what is an rpc node](./what-is-an-rpc-node.md). +Before we discuss validators in more detail, it's useful to make some distinctions. Using the same validator software, you have the option of running a voting/consensus node or choosing to instead run an RPC node. An RPC node helps Solana devs and others interact with the blockchain but for performance reasons should not vote. We go into more detail on RPC nodes in the next section, [what is an rpc node](./what-is-an-rpc-node.md). For this document, when a validator is mentioned, we are talking about a voting/consensus node. Now, to better understand what your validator is doing, it would help to understand how the Solana network functions in more depth. @@ -36,4 +36,4 @@ Understanding how PoH works is not necessary to run a good validator, but a very As a validator, you are helping to secure the network by producing and voting on blocks and to improve decentralization by running an independent node. You have the right to participate in discussions of changes on the network. You are also assuming a responsibility to keep your system running properly, to make sure your system is secure, and to keep it up to date with the latest software. As more individuals stake their tokens to your validator, you can reward their trust by running a high performing and reliable validator. Hopefully, your validator is performing well a majority of the time, but you should also have systems in place to respond to an outage at any time of the day. If your validator is not responding late at night, someone (either you or other team members) need to be available to investigate and fix the issues. -Running a validator is a [technical and important task](./validator-prerequisites.md), but it can also be very rewarding. Good luck and welcome to the community. \ No newline at end of file +Running a validator is a [technical and important task](./operations/prerequisites.md), but it can also be very rewarding. Good luck and welcome to the community. diff --git a/docs/src/validator/overview/what-is-an-rpc-node.md b/docs/src/what-is-an-rpc-node.md similarity index 71% rename from docs/src/validator/overview/what-is-an-rpc-node.md rename to docs/src/what-is-an-rpc-node.md index 59cf2c8b4204db..6bf75915a881ff 100644 --- a/docs/src/validator/overview/what-is-an-rpc-node.md +++ b/docs/src/what-is-an-rpc-node.md @@ -2,11 +2,11 @@ title: What is an RPC Node? --- -An RPC (Remote Procedure Call) node runs the same software as a [validator](../overview/what-is-a-validator.md), but it does not participate in the consensus process. Technically you could run the RPC software and also allow your node to vote as a consensus node, but it is strongly discouraged because your node will not be performant enough to do either task well. +An RPC (Remote Procedure Call) node runs the same software as a [validator](./what-is-a-validator.md), but it does not participate in the consensus process. Technically you could run the RPC software and also allow your node to vote as a consensus node, but it is strongly discouraged because your node will not be performant enough to do either task well. A node that runs RPC has a much different purpose in the cluster. An RPC node responds to requests about the blockchain and also allows users of the RPC node to submit new transactions to be included in blocks. For example, a website might request to transfer tokens from wallet A to wallet B (given wallet A's permission). That website would have to use wallet A to sign a transaction and then send it to an RPC node to be submitted to the leader. So you could think of running an RPC node as a similar engineering task to providing an api for others to use. The users of the RPC node are often developers, so this option may require a more technical understanding of Solana. To better understand RPC node operations, you'll want to become familiar with the different RPC calls. -You can find the RPC API [here](../../api/http.md). \ No newline at end of file +You can find the [RPC API here](https://solana.com/docs/rpc/http). \ No newline at end of file diff --git a/docs/static/img/quickstarts/solana-get-started-build-and-deploy.png b/docs/static/img/quickstarts/solana-get-started-build-and-deploy.png deleted file mode 100644 index 59bb3ef527fd8a..00000000000000 Binary files a/docs/static/img/quickstarts/solana-get-started-build-and-deploy.png and /dev/null differ diff --git a/docs/static/img/quickstarts/solana-get-started-import-on-playground.png b/docs/static/img/quickstarts/solana-get-started-import-on-playground.png deleted file mode 100644 index cd90b00cb210ba..00000000000000 Binary files a/docs/static/img/quickstarts/solana-get-started-import-on-playground.png and /dev/null differ diff --git a/docs/static/img/quickstarts/solana-get-started-successful-build.png b/docs/static/img/quickstarts/solana-get-started-successful-build.png deleted file mode 100644 index 82b0a5df054930..00000000000000 Binary files a/docs/static/img/quickstarts/solana-get-started-successful-build.png and /dev/null differ diff --git a/docs/static/img/quickstarts/solana-overview-client-program.png b/docs/static/img/quickstarts/solana-overview-client-program.png deleted file mode 100644 index 42b80cee54ae58..00000000000000 Binary files a/docs/static/img/quickstarts/solana-overview-client-program.png and /dev/null differ diff --git a/download-utils/Cargo.toml b/download-utils/Cargo.toml index 66e8334ab3c09e..9321eb4c88db92 100644 --- a/download-utils/Cargo.toml +++ b/download-utils/Cargo.toml @@ -17,6 +17,9 @@ reqwest = { workspace = true, features = ["blocking", "brotli", "deflate", "gzip solana-runtime = { workspace = true } solana-sdk = { workspace = true } +[dev-dependencies] +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } + [lib] crate-type = ["lib"] name = "solana_download_utils" diff --git a/genesis/Cargo.toml b/genesis/Cargo.toml index b1e57a296cf79e..cd8e652a86b7ad 100644 --- a/genesis/Cargo.toml +++ b/genesis/Cargo.toml @@ -30,6 +30,9 @@ solana-version = { workspace = true } solana-vote-program = { workspace = true } tempfile = { workspace = true } +[dev-dependencies] +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } + [[bin]] name = "solana-genesis" path = "src/main.rs" diff --git a/genesis/src/main.rs b/genesis/src/main.rs index c254975379c937..6b7efd5e664339 100644 --- a/genesis/src/main.rs +++ b/genesis/src/main.rs @@ -547,7 +547,7 @@ fn main() -> Result<(), Box> { identity_pubkey, identity_pubkey, commission, - VoteState::get_rent_exempt_reserve(&rent).max(1), + VoteState::get_rent_exempt_reserve(&genesis_config.rent).max(1), ); genesis_config.add_account( @@ -558,7 +558,7 @@ fn main() -> Result<(), Box> { .unwrap_or(identity_pubkey), vote_pubkey, &vote_account, - &rent, + &genesis_config.rent, bootstrap_validator_stake_lamports, ), ); diff --git a/genesis/src/stakes.rs b/genesis/src/stakes.rs index 1d7c18f3a034a9..133fdf57f4968b 100644 --- a/genesis/src/stakes.rs +++ b/genesis/src/stakes.rs @@ -246,7 +246,7 @@ mod tests { let total_lamports = staker_reserve + reserve * 2 + 1; create_and_check_stakes( &mut GenesisConfig { - rent, + rent: rent.clone(), ..GenesisConfig::default() }, &StakerInfo { @@ -272,7 +272,7 @@ mod tests { let total_lamports = staker_reserve + reserve * 2 + 1; create_and_check_stakes( &mut GenesisConfig { - rent, + rent: rent.clone(), ..GenesisConfig::default() }, &StakerInfo { @@ -298,7 +298,7 @@ mod tests { let total_lamports = staker_reserve + (granularity + reserve) * 2; create_and_check_stakes( &mut GenesisConfig { - rent, + rent: rent.clone(), ..GenesisConfig::default() }, &StakerInfo { @@ -323,7 +323,7 @@ mod tests { let total_lamports = staker_reserve + (granularity + reserve + 1) * 2; create_and_check_stakes( &mut GenesisConfig { - rent, + rent: rent.clone(), ..GenesisConfig::default() }, &StakerInfo { diff --git a/geyser-plugin-interface/README.md b/geyser-plugin-interface/README.md index 761601d0dc4be1..0abbbd834e0488 100644 --- a/geyser-plugin-interface/README.md +++ b/geyser-plugin-interface/README.md @@ -6,10 +6,10 @@ # Solana Geyser Plugin Interface -This crate enables an plugin to be added into the Solana Validator runtime to +This crate enables a plugin to be added into the Solana Validator runtime to take actions at the time of account updates or block and transaction processing; for example, saving the account state to an external database. The plugin must -implement the `GeyserPlugin` trait. Please see the detail of the +implement the `GeyserPlugin` trait. Please see the details of the `geyser_plugin_interface.rs` for the interface definition. The plugin should produce a `cdylib` dynamic library, which must expose a `C` @@ -18,8 +18,8 @@ interface. The https://github.com/solana-labs/solana-accountsdb-plugin-postgres repository provides an example of how to create a plugin which saves the accounts data into -an external PostgreSQL databases. +an external PostgreSQL database. -More information about Solana is available in the [Solana documentation](https://docs.solana.com/). +More information about Solana is available in the [Solana documentation](https://solana.com/docs). Still have questions? Ask us on [Stack Exchange](https://sola.na/sse) diff --git a/geyser-plugin-interface/src/geyser_plugin_interface.rs b/geyser-plugin-interface/src/geyser_plugin_interface.rs index 8b31aba48bc654..037aedf8b87e89 100644 --- a/geyser-plugin-interface/src/geyser_plugin_interface.rs +++ b/geyser-plugin-interface/src/geyser_plugin_interface.rs @@ -185,12 +185,31 @@ pub struct ReplicaEntryInfo<'a> { pub executed_transaction_count: u64, } +#[derive(Clone, Debug)] +#[repr(C)] +pub struct ReplicaEntryInfoV2<'a> { + /// The slot number of the block containing this Entry + pub slot: Slot, + /// The Entry's index in the block + pub index: usize, + /// The number of hashes since the previous Entry + pub num_hashes: u64, + /// The Entry's SHA-256 hash, generated from the previous Entry's hash with + /// `solana_entry::entry::next_hash()` + pub hash: &'a [u8], + /// The number of executed transactions in the Entry + pub executed_transaction_count: u64, + /// The index-in-block of the first executed transaction in this Entry + pub starting_transaction_index: usize, +} + /// A wrapper to future-proof ReplicaEntryInfo handling. To make a change to the structure of /// ReplicaEntryInfo, add an new enum variant wrapping a newer version, which will force plugin /// implementations to handle the change. #[repr(u32)] pub enum ReplicaEntryInfoVersions<'a> { V0_0_1(&'a ReplicaEntryInfo<'a>), + V0_0_2(&'a ReplicaEntryInfoV2<'a>), } #[derive(Clone, Debug)] @@ -302,6 +321,35 @@ pub type Result = std::result::Result; /// Geyser plugins must describe desired behavior for load and unload, /// as well as how they will handle streamed data. pub trait GeyserPlugin: Any + Send + Sync + std::fmt::Debug { + /// The callback to allow the plugin to setup the logging configuration using the logger + /// and log level specified by the validator. Will be called first on load/reload, before any other + /// callback, and only called once. + /// # Examples + /// + /// ``` + /// use solana_geyser_plugin_interface::geyser_plugin_interface::{GeyserPlugin, + /// GeyserPluginError, Result}; + /// + /// #[derive(Debug)] + /// struct SamplePlugin; + /// impl GeyserPlugin for SamplePlugin { + /// fn setup_logger(&self, logger: &'static dyn log::Log, level: log::LevelFilter) -> Result<()> { + /// log::set_max_level(level); + /// if let Err(err) = log::set_logger(logger) { + /// return Err(GeyserPluginError::Custom(Box::new(err))); + /// } + /// Ok(()) + /// } + /// fn name(&self) -> &'static str { + /// &"sample" + /// } + /// } + /// ``` + #[allow(unused_variables)] + fn setup_logger(&self, logger: &'static dyn log::Log, level: log::LevelFilter) -> Result<()> { + Ok(()) + } + fn name(&self) -> &'static str; /// The callback called when a plugin is loaded by the system, @@ -310,7 +358,7 @@ pub trait GeyserPlugin: Any + Send + Sync + std::fmt::Debug { /// of the config file. The config must be in JSON format and /// include a field "libpath" indicating the full path /// name of the shared library implementing this interface. - fn on_load(&mut self, _config_file: &str) -> Result<()> { + fn on_load(&mut self, _config_file: &str, _is_reload: bool) -> Result<()> { Ok(()) } diff --git a/geyser-plugin-manager/src/entry_notifier.rs b/geyser-plugin-manager/src/entry_notifier.rs index ce6c3239c0946c..ea14592b615db8 100644 --- a/geyser-plugin-manager/src/entry_notifier.rs +++ b/geyser-plugin-manager/src/entry_notifier.rs @@ -4,7 +4,7 @@ use { log::*, solana_entry::entry::EntrySummary, solana_geyser_plugin_interface::geyser_plugin_interface::{ - ReplicaEntryInfo, ReplicaEntryInfoVersions, + ReplicaEntryInfoV2, ReplicaEntryInfoVersions, }, solana_ledger::entry_notifier_interface::EntryNotifier, solana_measure::measure::Measure, @@ -18,7 +18,13 @@ pub(crate) struct EntryNotifierImpl { } impl EntryNotifier for EntryNotifierImpl { - fn notify_entry<'a>(&'a self, slot: Slot, index: usize, entry: &'a EntrySummary) { + fn notify_entry<'a>( + &'a self, + slot: Slot, + index: usize, + entry: &'a EntrySummary, + starting_transaction_index: usize, + ) { let mut measure = Measure::start("geyser-plugin-notify_plugins_of_entry_info"); let plugin_manager = self.plugin_manager.read().unwrap(); @@ -26,13 +32,14 @@ impl EntryNotifier for EntryNotifierImpl { return; } - let entry_info = Self::build_replica_entry_info(slot, index, entry); + let entry_info = + Self::build_replica_entry_info(slot, index, entry, starting_transaction_index); for plugin in plugin_manager.plugins.iter() { if !plugin.entry_notifications_enabled() { continue; } - match plugin.notify_entry(ReplicaEntryInfoVersions::V0_0_1(&entry_info)) { + match plugin.notify_entry(ReplicaEntryInfoVersions::V0_0_2(&entry_info)) { Err(err) => { error!( "Failed to notify entry, error: ({}) to plugin {}", @@ -64,13 +71,15 @@ impl EntryNotifierImpl { slot: Slot, index: usize, entry: &'_ EntrySummary, - ) -> ReplicaEntryInfo<'_> { - ReplicaEntryInfo { + starting_transaction_index: usize, + ) -> ReplicaEntryInfoV2<'_> { + ReplicaEntryInfoV2 { slot, index, num_hashes: entry.num_hashes, hash: entry.hash.as_ref(), executed_transaction_count: entry.num_transactions, + starting_transaction_index, } } } diff --git a/geyser-plugin-manager/src/geyser_plugin_manager.rs b/geyser-plugin-manager/src/geyser_plugin_manager.rs index 20729146767c0a..02792525ad370c 100644 --- a/geyser-plugin-manager/src/geyser_plugin_manager.rs +++ b/geyser-plugin-manager/src/geyser_plugin_manager.rs @@ -4,12 +4,48 @@ use { libloading::Library, log::*, solana_geyser_plugin_interface::geyser_plugin_interface::GeyserPlugin, - std::path::Path, + std::{ + ops::{Deref, DerefMut}, + path::Path, + }, }; +#[derive(Debug)] +pub struct LoadedGeyserPlugin { + name: String, + plugin: Box, +} + +impl LoadedGeyserPlugin { + pub fn new(plugin: Box, name: Option) -> Self { + Self { + name: name.unwrap_or_else(|| plugin.name().to_owned()), + plugin, + } + } + + pub fn name(&self) -> &str { + &self.name + } +} + +impl Deref for LoadedGeyserPlugin { + type Target = Box; + + fn deref(&self) -> &Self::Target { + &self.plugin + } +} + +impl DerefMut for LoadedGeyserPlugin { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.plugin + } +} + #[derive(Default, Debug)] pub struct GeyserPluginManager { - pub plugins: Vec>, + pub plugins: Vec, libs: Vec, } @@ -107,9 +143,11 @@ impl GeyserPluginManager { }); } + setup_logger_for_plugin(&*new_plugin.plugin)?; + // Call on_load and push plugin new_plugin - .on_load(new_config_file) + .on_load(new_config_file, false) .map_err(|on_load_err| jsonrpc_core::Error { code: ErrorCode::InvalidRequest, message: format!( @@ -193,8 +231,10 @@ impl GeyserPluginManager { }); } + setup_logger_for_plugin(&*new_plugin.plugin)?; + // Attempt to on_load with new plugin - match new_plugin.on_load(new_parsed_config_file) { + match new_plugin.on_load(new_parsed_config_file, true) { // On success, push plugin and library Ok(()) => { self.plugins.push(new_plugin); @@ -217,12 +257,31 @@ impl GeyserPluginManager { } fn _drop_plugin(&mut self, idx: usize) { + let current_lib = self.libs.remove(idx); let mut current_plugin = self.plugins.remove(idx); - let _current_lib = self.libs.remove(idx); + let name = current_plugin.name().to_string(); current_plugin.on_unload(); + // The plugin must be dropped before the library to avoid a crash. + drop(current_plugin); + drop(current_lib); + info!("Unloaded plugin {name} at idx {idx}"); } } +// Initialize logging for the plugin +fn setup_logger_for_plugin(new_plugin: &dyn GeyserPlugin) -> Result<(), jsonrpc_core::Error> { + new_plugin + .setup_logger(log::logger(), log::max_level()) + .map_err(|setup_logger_err| jsonrpc_core::Error { + code: ErrorCode::InvalidRequest, + message: format!( + "setup_logger method of plugin {} failed: {setup_logger_err}", + new_plugin.name() + ), + data: None, + }) +} + #[derive(Debug)] pub enum GeyserPluginManagerRequest { ReloadPlugin { @@ -280,7 +339,7 @@ pub enum GeyserPluginManagerError { #[cfg(not(test))] pub(crate) fn load_plugin_from_config( geyser_plugin_config_file: &Path, -) -> Result<(Box, Library, &str), GeyserPluginManagerError> { +) -> Result<(LoadedGeyserPlugin, Library, &str), GeyserPluginManagerError> { use std::{fs::File, io::Read, path::PathBuf}; type PluginConstructor = unsafe fn() -> *mut dyn GeyserPlugin; use libloading::Symbol; @@ -323,6 +382,8 @@ pub(crate) fn load_plugin_from_config( libpath = config_dir.join(libpath); } + let plugin_name = result["name"].as_str().map(|s| s.to_owned()); + let config_file = geyser_plugin_config_file .as_os_str() .to_str() @@ -337,7 +398,11 @@ pub(crate) fn load_plugin_from_config( let plugin_raw = constructor(); (Box::from_raw(plugin_raw), lib) }; - Ok((plugin, lib, config_file)) + Ok(( + LoadedGeyserPlugin::new(plugin, plugin_name), + lib, + config_file, + )) } #[cfg(test)] @@ -353,7 +418,7 @@ const TESTPLUGIN2_CONFIG: &str = "TESTPLUGIN2_CONFIG"; #[cfg(test)] pub(crate) fn load_plugin_from_config( geyser_plugin_config_file: &Path, -) -> Result<(Box, Library, &str), GeyserPluginManagerError> { +) -> Result<(LoadedGeyserPlugin, Library, &str), GeyserPluginManagerError> { if geyser_plugin_config_file.ends_with(TESTPLUGIN_CONFIG) { Ok(tests::dummy_plugin_and_library( tests::TestPlugin, @@ -375,7 +440,7 @@ pub(crate) fn load_plugin_from_config( mod tests { use { crate::geyser_plugin_manager::{ - GeyserPluginManager, TESTPLUGIN2_CONFIG, TESTPLUGIN_CONFIG, + GeyserPluginManager, LoadedGeyserPlugin, TESTPLUGIN2_CONFIG, TESTPLUGIN_CONFIG, }, libloading::Library, solana_geyser_plugin_interface::geyser_plugin_interface::GeyserPlugin, @@ -385,9 +450,9 @@ mod tests { pub(super) fn dummy_plugin_and_library( plugin: P, config_path: &'static str, - ) -> (Box, Library, &'static str) { + ) -> (LoadedGeyserPlugin, Library, &'static str) { ( - Box::new(plugin), + LoadedGeyserPlugin::new(Box::new(plugin), None), Library::from(libloading::os::unix::Library::this()), config_path, ) @@ -430,7 +495,7 @@ mod tests { // Mock having loaded plugin (TestPlugin) let (mut plugin, lib, config) = dummy_plugin_and_library(TestPlugin, DUMMY_CONFIG); - plugin.on_load(config).unwrap(); + plugin.on_load(config, false).unwrap(); plugin_manager_lock.plugins.push(plugin); plugin_manager_lock.libs.push(lib); // plugin_manager_lock.libs.push(lib); @@ -465,12 +530,12 @@ mod tests { // Load two plugins // First let (mut plugin, lib, config) = dummy_plugin_and_library(TestPlugin, TESTPLUGIN_CONFIG); - plugin.on_load(config).unwrap(); + plugin.on_load(config, false).unwrap(); plugin_manager_lock.plugins.push(plugin); plugin_manager_lock.libs.push(lib); // Second let (mut plugin, lib, config) = dummy_plugin_and_library(TestPlugin2, TESTPLUGIN2_CONFIG); - plugin.on_load(config).unwrap(); + plugin.on_load(config, false).unwrap(); plugin_manager_lock.plugins.push(plugin); plugin_manager_lock.libs.push(lib); diff --git a/gossip/Cargo.toml b/gossip/Cargo.toml index 3696342ae83b24..f9870ac1ee380c 100644 --- a/gossip/Cargo.toml +++ b/gossip/Cargo.toml @@ -55,6 +55,7 @@ thiserror = { workspace = true } [dev-dependencies] num_cpus = { workspace = true } serial_test = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } test-case = { workspace = true } [build-dependencies] diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 67f713676d5d2f..cc539f5abe265d 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -39,6 +39,7 @@ use { epoch_slots::EpochSlots, gossip_error::GossipError, ping_pong::{self, PingCache, Pong}, + restart_crds_values::{RestartLastVotedForkSlots, RestartLastVotedForkSlotsError}, socketaddr, socketaddr_any, weighted_shuffle::WeightedShuffle, }, @@ -267,7 +268,7 @@ pub fn make_accounts_hashes_message( pub(crate) type Ping = ping_pong::Ping<[u8; GOSSIP_PING_TOKEN_SIZE]>; // TODO These messages should go through the gpu pipeline for spam filtering -#[frozen_abi(digest = "CVvKB495YW6JN4w1rWwajyZmG5wvNhmD97V99rSv9fGw")] +#[frozen_abi(digest = "7a2P1GeQjyqCHMyBrhNPTKfPfG4iv32vki7XHahoN55z")] #[derive(Serialize, Deserialize, Debug, AbiEnumVisitor, AbiExample)] #[allow(clippy::large_enum_variant)] pub(crate) enum Protocol { @@ -962,6 +963,26 @@ impl ClusterInfo { } } + pub fn push_restart_last_voted_fork_slots( + &self, + fork: &[Slot], + last_vote_bankhash: Hash, + ) -> Result<(), RestartLastVotedForkSlotsError> { + let now = timestamp(); + let last_voted_fork_slots = RestartLastVotedForkSlots::new( + self.id(), + now, + fork, + last_vote_bankhash, + self.my_shred_version(), + )?; + self.push_message(CrdsValue::new_signed( + CrdsData::RestartLastVotedForkSlots(last_voted_fork_slots), + &self.keypair(), + )); + Ok(()) + } + fn time_gossip_read_lock<'a>( &'a self, label: &'static str, @@ -1197,7 +1218,7 @@ impl ClusterInfo { } /// Returns epoch-slots inserted since the given cursor. - /// Excludes entries from nodes with unkown or different shred version. + /// Excludes entries from nodes with unknown or different shred version. pub fn get_epoch_slots(&self, cursor: &mut Cursor) -> Vec { let self_shred_version = Some(self.my_shred_version()); let gossip_crds = self.gossip.crds.read().unwrap(); @@ -1214,6 +1235,24 @@ impl ClusterInfo { .collect() } + pub fn get_restart_last_voted_fork_slots( + &self, + cursor: &mut Cursor, + ) -> Vec { + let self_shred_version = self.my_shred_version(); + let gossip_crds = self.gossip.crds.read().unwrap(); + gossip_crds + .get_entries(cursor) + .filter_map(|entry| { + let CrdsData::RestartLastVotedForkSlots(slots) = &entry.value.data else { + return None; + }; + (slots.shred_version == self_shred_version).then_some(slots) + }) + .cloned() + .collect() + } + /// Returns duplicate-shreds inserted since the given cursor. pub(crate) fn get_duplicate_shreds(&self, cursor: &mut Cursor) -> Vec { let gossip_crds = self.gossip.crds.read().unwrap(); @@ -1713,7 +1752,7 @@ impl ClusterInfo { match gossip_crds.trim(cap, &keep, stakes, timestamp()) { Err(err) => { self.stats.trim_crds_table_failed.add_relaxed(1); - // TODO: Stakes are comming from the root-bank. Debug why/when + // TODO: Stakes are coming from the root-bank. Debug why/when // they are empty/zero. debug!("crds table trim failed: {:?}", err); } @@ -2046,7 +2085,7 @@ impl ClusterInfo { score }; let score = match response.data { - CrdsData::LegacyContactInfo(_) => 2 * score, + CrdsData::LegacyContactInfo(_) | CrdsData::ContactInfo(_) => 2 * score, _ => score, }; ((addr, response), score) @@ -3041,6 +3080,7 @@ fn filter_on_shred_version( if crds.get_shred_version(from) == Some(self_shred_version) { values.retain(|value| match &value.data { // Allow contact-infos so that shred-versions are updated. + CrdsData::ContactInfo(_) => true, CrdsData::LegacyContactInfo(_) => true, CrdsData::NodeInstance(_) => true, // Only retain values with the same shred version. @@ -3050,6 +3090,7 @@ fn filter_on_shred_version( values.retain(|value| match &value.data { // Allow node to update its own contact info in case their // shred-version changes + CrdsData::ContactInfo(node) => node.pubkey() == from, CrdsData::LegacyContactInfo(node) => node.pubkey() == from, CrdsData::NodeInstance(_) => true, _ => false, @@ -3068,6 +3109,11 @@ fn filter_on_shred_version( { Some(msg) } + CrdsData::ContactInfo(node) + if node.shred_version() == 0 || node.shred_version() == self_shred_version => + { + Some(msg) + } _ => { stats.skip_pull_shred_version.add_relaxed(1); None @@ -4487,4 +4533,73 @@ mod tests { assert_eq!(shred_data.chunk_index() as usize, i); } } + + #[test] + fn test_push_restart_last_voted_fork_slots() { + let keypair = Arc::new(Keypair::new()); + let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0); + let cluster_info = ClusterInfo::new(contact_info, keypair, SocketAddrSpace::Unspecified); + let slots = cluster_info.get_restart_last_voted_fork_slots(&mut Cursor::default()); + assert!(slots.is_empty()); + let mut update: Vec = vec![0]; + for i in 0..81 { + for j in 0..1000 { + update.push(i * 1050 + j); + } + } + assert!(cluster_info + .push_restart_last_voted_fork_slots(&update, Hash::default()) + .is_ok()); + cluster_info.flush_push_queue(); + + let mut cursor = Cursor::default(); + let slots = cluster_info.get_restart_last_voted_fork_slots(&mut cursor); + assert_eq!(slots.len(), 1); + let retrieved_slots = slots[0].to_slots(0); + assert!(retrieved_slots[0] < 69000); + assert_eq!(retrieved_slots.last(), Some(84999).as_ref()); + + let slots = cluster_info.get_restart_last_voted_fork_slots(&mut cursor); + assert!(slots.is_empty()); + + // Test with different shred versions. + let mut rng = rand::thread_rng(); + let node_pubkey = Pubkey::new_unique(); + let mut node = LegacyContactInfo::new_rand(&mut rng, Some(node_pubkey)); + node.set_shred_version(42); + let mut slots = RestartLastVotedForkSlots::new_rand(&mut rng, Some(node_pubkey)); + slots.shred_version = 42; + let entries = vec![ + CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(node)), + CrdsValue::new_unsigned(CrdsData::RestartLastVotedForkSlots(slots)), + ]; + { + let mut gossip_crds = cluster_info.gossip.crds.write().unwrap(); + for entry in entries { + assert!(gossip_crds + .insert(entry, /*now=*/ 0, GossipRoute::LocalMessage) + .is_ok()); + } + } + // Should exclude other node's last-voted-fork-slot because of different + // shred-version. + let slots = cluster_info.get_restart_last_voted_fork_slots(&mut Cursor::default()); + assert_eq!(slots.len(), 1); + assert_eq!(slots[0].from, cluster_info.id()); + + // Match shred versions. + { + let mut node = cluster_info.my_contact_info.write().unwrap(); + node.set_shred_version(42); + } + assert!(cluster_info + .push_restart_last_voted_fork_slots(&update, Hash::default()) + .is_ok()); + cluster_info.flush_push_queue(); + // Should now include both slots. + let slots = cluster_info.get_restart_last_voted_fork_slots(&mut Cursor::default()); + assert_eq!(slots.len(), 2); + assert_eq!(slots[0].from, node_pubkey); + assert_eq!(slots[1].from, cluster_info.id()); + } } diff --git a/gossip/src/contact_info.rs b/gossip/src/contact_info.rs index b09957f2ce5db7..b3ca9c94a762a5 100644 --- a/gossip/src/contact_info.rs +++ b/gossip/src/contact_info.rs @@ -350,7 +350,7 @@ impl ContactInfo { } // Removes the IP address at the given index if - // no socket entry refrences that index. + // no socket entry references that index. fn maybe_remove_addr(&mut self, index: u8) { if !self.sockets.iter().any(|entry| entry.index == index) { self.addrs.remove(usize::from(index)); diff --git a/gossip/src/crds_gossip_pull.rs b/gossip/src/crds_gossip_pull.rs index c3044dbba8cc43..7f70e79bf0add5 100644 --- a/gossip/src/crds_gossip_pull.rs +++ b/gossip/src/crds_gossip_pull.rs @@ -18,7 +18,7 @@ use { crds::{Crds, GossipRoute, VersionedCrdsValue}, crds_gossip, crds_gossip_error::CrdsGossipError, - crds_value::{CrdsData, CrdsValue}, + crds_value::CrdsValue, legacy_contact_info::LegacyContactInfo as ContactInfo, ping_pong::PingCache, }, @@ -28,7 +28,7 @@ use { Rng, }, rayon::{prelude::*, ThreadPool}, - solana_bloom::bloom::{AtomicBloom, Bloom}, + solana_bloom::bloom::{Bloom, ConcurrentBloom}, solana_sdk::{ hash::{hash, Hash}, native_token::LAMPORTS_PER_SOL, @@ -141,7 +141,7 @@ impl CrdsFilter { /// A vector of crds filters that together hold a complete set of Hashes. struct CrdsFilterSet { - filters: Vec>>, + filters: Vec>>, mask_bits: u32, } @@ -159,7 +159,7 @@ impl CrdsFilterSet { let k = rng.gen_range(0..indices.len()); let k = indices.swap_remove(k); let filter = Bloom::random(max_items as usize, FALSE_RATE, max_bits as usize); - filters[k] = Some(AtomicBloom::::from(filter)); + filters[k] = Some(ConcurrentBloom::::from(filter)); } Self { filters, mask_bits } } @@ -488,11 +488,6 @@ impl CrdsGossipPull { let out: Vec<_> = crds .filter_bitmask(filter.mask, filter.mask_bits) .filter(pred) - .filter(|entry| { - // Exclude the new ContactInfo from the pull responses - // until the cluster has upgraded. - !matches!(&entry.value.data, CrdsData::ContactInfo(_)) - }) .map(|entry| entry.value.clone()) .take(output_size_limit.load(Ordering::Relaxed).max(0) as usize) .collect(); diff --git a/gossip/src/crds_gossip_push.rs b/gossip/src/crds_gossip_push.rs index 345c9eaf17287f..72ffc30a4863ca 100644 --- a/gossip/src/crds_gossip_push.rs +++ b/gossip/src/crds_gossip_push.rs @@ -16,7 +16,7 @@ use { cluster_info::{Ping, CRDS_UNIQUE_PUBKEY_CAPACITY}, crds::{Crds, CrdsError, Cursor, GossipRoute}, crds_gossip, - crds_value::{CrdsData, CrdsValue}, + crds_value::CrdsValue, ping_pong::PingCache, push_active_set::PushActiveSet, received_cache::ReceivedCache, @@ -191,11 +191,6 @@ impl CrdsGossipPush { let crds = crds.read().unwrap(); let entries = crds .get_entries(crds_cursor.deref_mut()) - .filter(|entry| { - // Exclude the new ContactInfo from outgoing push messages - // until the cluster has upgraded. - !matches!(&entry.value.data, CrdsData::ContactInfo(_)) - }) .map(|entry| &entry.value) .filter(|value| wallclock_window.contains(&value.wallclock())); for value in entries { diff --git a/gossip/src/crds_value.rs b/gossip/src/crds_value.rs index 63efa141bdf129..61d916e76bc780 100644 --- a/gossip/src/crds_value.rs +++ b/gossip/src/crds_value.rs @@ -1,11 +1,12 @@ use { crate::{ - cluster_info::{MAX_ACCOUNTS_HASHES, MAX_CRDS_OBJECT_SIZE}, + cluster_info::MAX_ACCOUNTS_HASHES, contact_info::ContactInfo, deprecated, duplicate_shred::{DuplicateShred, DuplicateShredIndex, MAX_DUPLICATE_SHREDS}, - epoch_slots::{CompressedSlots, EpochSlots, MAX_SLOTS_PER_ENTRY}, + epoch_slots::EpochSlots, legacy_contact_info::LegacyContactInfo, + restart_crds_values::RestartLastVotedForkSlots, }, bincode::{serialize, serialized_size}, rand::{CryptoRng, Rng}, @@ -490,87 +491,6 @@ impl Sanitize for NodeInstance { } } -#[derive(Serialize, Deserialize, Clone, Default, PartialEq, Eq, AbiExample, Debug)] -pub struct RestartLastVotedForkSlots { - pub from: Pubkey, - pub wallclock: u64, - pub slots: Vec, - pub last_voted_hash: Hash, - pub shred_version: u16, -} - -impl Sanitize for RestartLastVotedForkSlots { - fn sanitize(&self) -> std::result::Result<(), SanitizeError> { - if self.slots.is_empty() { - return Err(SanitizeError::InvalidValue); - } - self.slots.sanitize()?; - self.last_voted_hash.sanitize() - } -} - -impl RestartLastVotedForkSlots { - pub fn new(from: Pubkey, now: u64, last_voted_hash: Hash, shred_version: u16) -> Self { - Self { - from, - wallclock: now, - slots: Vec::new(), - last_voted_hash, - shred_version, - } - } - - /// New random Version for tests and benchmarks. - pub fn new_rand(rng: &mut R, pubkey: Option) -> Self { - let pubkey = pubkey.unwrap_or_else(solana_sdk::pubkey::new_rand); - let mut result = - RestartLastVotedForkSlots::new(pubkey, new_rand_timestamp(rng), Hash::new_unique(), 1); - let num_slots = rng.gen_range(2..20); - let mut slots = std::iter::repeat_with(|| 47825632 + rng.gen_range(0..512)) - .take(num_slots) - .collect::>(); - slots.sort(); - result.fill(&slots); - result - } - - pub fn fill(&mut self, slots: &[Slot]) -> usize { - let slots = &slots[slots.len().saturating_sub(MAX_SLOTS_PER_ENTRY)..]; - let mut num = 0; - let space = self.max_compressed_slot_size(); - if space == 0 { - return 0; - } - while num < slots.len() { - let mut cslot = CompressedSlots::new(space as usize); - num += cslot.add(&slots[num..]); - self.slots.push(cslot); - } - num - } - - pub fn deflate(&mut self) { - for s in self.slots.iter_mut() { - let _ = s.deflate(); - } - } - - pub fn max_compressed_slot_size(&self) -> isize { - let len_header = serialized_size(self).unwrap(); - let len_slot = serialized_size(&CompressedSlots::default()).unwrap(); - MAX_CRDS_OBJECT_SIZE as isize - (len_header + len_slot) as isize - } - - pub fn to_slots(&self, min_slot: Slot) -> Vec { - self.slots - .iter() - .filter(|s| min_slot < s.first_slot() + s.num_slots() as u64) - .filter_map(|s| s.to_slots(min_slot).ok()) - .flatten() - .collect() - } -} - /// Type of the replicated value /// These are labels for values in a record that is associated with `Pubkey` #[derive(PartialEq, Hash, Eq, Clone, Debug)] @@ -1146,7 +1066,7 @@ mod test { assert!(!other.check_duplicate(&node_crds)); assert_eq!(node.overrides(&other_crds), None); assert_eq!(other.overrides(&node_crds), None); - // Differnt crds value is not a duplicate. + // Different crds value is not a duplicate. let other = LegacyContactInfo::new_rand(&mut rng, Some(pubkey)); let other = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(other)); assert!(!node.check_duplicate(&other)); @@ -1169,58 +1089,4 @@ mod test { assert!(node.should_force_push(&pubkey)); assert!(!node.should_force_push(&Pubkey::new_unique())); } - - #[test] - fn test_restart_last_voted_fork_slots() { - let keypair = Keypair::new(); - let slot = 53; - let slot_parent = slot - 5; - let shred_version = 21; - let mut slots = RestartLastVotedForkSlots::new( - keypair.pubkey(), - timestamp(), - Hash::default(), - shred_version, - ); - let original_slots_vec = [slot_parent, slot]; - slots.fill(&original_slots_vec); - let value = - CrdsValue::new_signed(CrdsData::RestartLastVotedForkSlots(slots.clone()), &keypair); - assert_eq!(value.sanitize(), Ok(())); - let label = value.label(); - assert_eq!( - label, - CrdsValueLabel::RestartLastVotedForkSlots(keypair.pubkey()) - ); - assert_eq!(label.pubkey(), keypair.pubkey()); - assert_eq!(value.wallclock(), slots.wallclock); - let retrived_slots = slots.to_slots(0); - assert_eq!(retrived_slots.len(), 2); - assert_eq!(retrived_slots[0], slot_parent); - assert_eq!(retrived_slots[1], slot); - - let empty_slots = RestartLastVotedForkSlots::new( - keypair.pubkey(), - timestamp(), - Hash::default(), - shred_version, - ); - let bad_value = - CrdsValue::new_signed(CrdsData::RestartLastVotedForkSlots(empty_slots), &keypair); - assert_eq!(bad_value.sanitize(), Err(SanitizeError::InvalidValue)); - - let last_slot: Slot = (MAX_SLOTS_PER_ENTRY + 10).try_into().unwrap(); - let mut large_slots = RestartLastVotedForkSlots::new( - keypair.pubkey(), - timestamp(), - Hash::default(), - shred_version, - ); - let large_slots_vec: Vec = (0..last_slot + 1).collect(); - large_slots.fill(&large_slots_vec); - let retrived_slots = large_slots.to_slots(0); - assert_eq!(retrived_slots.len(), MAX_SLOTS_PER_ENTRY); - assert_eq!(retrived_slots.first(), Some(&11)); - assert_eq!(retrived_slots.last(), Some(&last_slot)); - } } diff --git a/gossip/src/duplicate_shred.rs b/gossip/src/duplicate_shred.rs index b1ceab79b26949..70e56d35e82334 100644 --- a/gossip/src/duplicate_shred.rs +++ b/gossip/src/duplicate_shred.rs @@ -30,7 +30,7 @@ pub struct DuplicateShred { pub(crate) wallclock: u64, pub(crate) slot: Slot, _unused: u32, - shred_type: ShredType, + _unused_shred_type: ShredType, // Serialized DuplicateSlotProof split into chunks. num_chunks: u8, chunk_index: u8, @@ -90,8 +90,8 @@ pub enum Error { /// Check that `shred1` and `shred2` indicate a valid duplicate proof /// - Must be for the same slot -/// - Must have the same `shred_type` /// - Must both sigverify for the correct leader +/// - Must have a merkle root conflict, otherwise `shred1` and `shred2` must have the same `shred_type` /// - If `shred1` and `shred2` share the same index they must be not equal /// - If `shred1` and `shred2` do not share the same index and are data shreds /// verify that they indicate an index conflict. One of them must be the @@ -106,10 +106,6 @@ where return Err(Error::SlotMismatch); } - if shred1.shred_type() != shred2.shred_type() { - return Err(Error::ShredTypeMismatch); - } - if let Some(leader_schedule) = leader_schedule { let slot_leader = leader_schedule(shred1.slot()).ok_or(Error::UnknownSlotLeader(shred1.slot()))?; @@ -118,6 +114,20 @@ where } } + // Merkle root conflict check + if shred1.fec_set_index() == shred2.fec_set_index() + && shred1.merkle_root().ok() != shred2.merkle_root().ok() + { + // This catches a mixture of legacy and merkle shreds + // as well as merkle shreds with different roots in the + // same fec set + return Ok(()); + } + + if shred1.shred_type() != shred2.shred_type() { + return Err(Error::ShredTypeMismatch); + } + if shred1.index() == shred2.index() { if shred1.payload() != shred2.payload() { return Ok(()); @@ -164,7 +174,7 @@ where } let other_shred = Shred::new_from_serialized_shred(other_payload)?; check_shreds(leader_schedule, &shred, &other_shred)?; - let (slot, shred_type) = (shred.slot(), shred.shred_type()); + let slot = shred.slot(); let proof = DuplicateSlotProof { shred1: shred.into_payload(), shred2: other_shred.into_payload(), @@ -184,27 +194,21 @@ where from: self_pubkey, wallclock, slot, - shred_type, num_chunks, chunk_index: i as u8, chunk, _unused: 0, + _unused_shred_type: ShredType::Code, }); Ok(chunks) } // Returns a predicate checking if a duplicate-shred chunk matches -// (slot, shred_type) and has valid chunk_index. -fn check_chunk( - slot: Slot, - shred_type: ShredType, - num_chunks: u8, -) -> impl Fn(&DuplicateShred) -> Result<(), Error> { +// the slot and has valid chunk_index. +fn check_chunk(slot: Slot, num_chunks: u8) -> impl Fn(&DuplicateShred) -> Result<(), Error> { move |dup| { if dup.slot != slot { Err(Error::SlotMismatch) - } else if dup.shred_type != shred_type { - Err(Error::ShredTypeMismatch) } else if dup.num_chunks != num_chunks { Err(Error::NumChunksMismatch) } else if dup.chunk_index >= num_chunks { @@ -226,13 +230,12 @@ pub(crate) fn into_shreds( let mut chunks = chunks.into_iter(); let DuplicateShred { slot, - shred_type, num_chunks, chunk_index, chunk, .. } = chunks.next().ok_or(Error::InvalidDuplicateShreds)?; - let check_chunk = check_chunk(slot, shred_type, num_chunks); + let check_chunk = check_chunk(slot, num_chunks); let mut data = HashMap::new(); data.insert(chunk_index, chunk); for chunk in chunks { @@ -260,8 +263,6 @@ pub(crate) fn into_shreds( let shred2 = Shred::new_from_serialized_shred(proof.shred2)?; if shred1.slot() != slot || shred2.slot() != slot { Err(Error::SlotMismatch) - } else if shred1.shred_type() != shred_type || shred2.shred_type() != shred_type { - Err(Error::ShredTypeMismatch) } else { check_shreds(Some(|_| Some(slot_leader).copied()), &shred1, &shred2)?; Ok((shred1, shred2)) @@ -300,7 +301,7 @@ pub(crate) mod tests { from: Pubkey::new_unique(), wallclock: u64::MAX, slot: Slot::MAX, - shred_type: ShredType::Data, + _unused_shred_type: ShredType::Data, num_chunks: u8::MAX, chunk_index: u8::MAX, chunk: Vec::default(), @@ -421,7 +422,7 @@ pub(crate) mod tests { wallclock: u64, max_size: usize, // Maximum serialized size of each DuplicateShred. ) -> Result, Error> { - let (slot, shred_type) = (shred.slot(), shred.shred_type()); + let slot = shred.slot(); let proof = DuplicateSlotProof { shred1: shred.into_payload(), shred2: other_shred.into_payload(), @@ -437,11 +438,11 @@ pub(crate) mod tests { from: self_pubkey, wallclock, slot, - shred_type, num_chunks, chunk_index: i as u8, chunk, _unused: 0, + _unused_shred_type: ShredType::Code, }); Ok(chunks) } @@ -949,4 +950,186 @@ pub(crate) mod tests { ); } } + + #[test] + fn test_merkle_root_conflict_round_trip() { + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0); + let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap(); + let next_shred_index = rng.gen_range(0..31_000); + let leader_schedule = |s| { + if s == slot { + Some(leader.pubkey()) + } else { + None + } + }; + + let (data_shreds, coding_shreds) = new_rand_shreds( + &mut rng, + next_shred_index, + next_shred_index, + 10, + true, /* merkle_variant */ + &shredder, + &leader, + false, + ); + + let (legacy_data_shreds, legacy_coding_shreds) = new_rand_shreds( + &mut rng, + next_shred_index, + next_shred_index, + 10, + false, /* merkle_variant */ + &shredder, + &leader, + true, + ); + + let (diff_data_shreds, diff_coding_shreds) = new_rand_shreds( + &mut rng, + next_shred_index, + next_shred_index, + 10, + true, /* merkle_variant */ + &shredder, + &leader, + false, + ); + + let test_cases = vec![ + (data_shreds[0].clone(), diff_data_shreds[1].clone()), + (coding_shreds[0].clone(), diff_coding_shreds[1].clone()), + (data_shreds[0].clone(), diff_coding_shreds[0].clone()), + (coding_shreds[0].clone(), diff_data_shreds[0].clone()), + // Mix of legacy and merkle for same fec set + (legacy_coding_shreds[0].clone(), data_shreds[0].clone()), + (coding_shreds[0].clone(), legacy_data_shreds[0].clone()), + (legacy_data_shreds[0].clone(), coding_shreds[0].clone()), + (data_shreds[0].clone(), legacy_coding_shreds[0].clone()), + ]; + for (shred1, shred2) in test_cases.into_iter() { + let chunks: Vec<_> = from_shred( + shred1.clone(), + Pubkey::new_unique(), // self_pubkey + shred2.payload().clone(), + Some(leader_schedule), + rng.gen(), // wallclock + 512, // max_size + ) + .unwrap() + .collect(); + assert!(chunks.len() > 4); + let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks).unwrap(); + assert_eq!(shred1, shred3); + assert_eq!(shred2, shred4); + } + } + + #[test] + fn test_merkle_root_conflict_invalid() { + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0); + let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap(); + let next_shred_index = rng.gen_range(0..31_000); + let leader_schedule = |s| { + if s == slot { + Some(leader.pubkey()) + } else { + None + } + }; + + let (data_shreds, coding_shreds) = new_rand_shreds( + &mut rng, + next_shred_index, + next_shred_index, + 10, + true, + &shredder, + &leader, + true, + ); + + let (next_data_shreds, next_coding_shreds) = new_rand_shreds( + &mut rng, + next_shred_index + 1, + next_shred_index + 1, + 10, + true, + &shredder, + &leader, + true, + ); + + let (legacy_data_shreds, legacy_coding_shreds) = new_rand_shreds( + &mut rng, + next_shred_index, + next_shred_index, + 10, + false, + &shredder, + &leader, + true, + ); + + let test_cases = vec![ + // Same fec set same merkle root + (coding_shreds[0].clone(), data_shreds[0].clone()), + (data_shreds[0].clone(), coding_shreds[0].clone()), + // Different FEC set different merkle root + (coding_shreds[0].clone(), next_data_shreds[0].clone()), + (next_coding_shreds[0].clone(), data_shreds[0].clone()), + (data_shreds[0].clone(), next_coding_shreds[0].clone()), + (next_data_shreds[0].clone(), coding_shreds[0].clone()), + // Legacy shreds + ( + legacy_coding_shreds[0].clone(), + legacy_data_shreds[0].clone(), + ), + ( + legacy_data_shreds[0].clone(), + legacy_coding_shreds[0].clone(), + ), + // Mix of legacy and merkle with different fec index + (legacy_coding_shreds[0].clone(), next_data_shreds[0].clone()), + (next_coding_shreds[0].clone(), legacy_data_shreds[0].clone()), + (legacy_data_shreds[0].clone(), next_coding_shreds[0].clone()), + (next_data_shreds[0].clone(), legacy_coding_shreds[0].clone()), + ]; + for (shred1, shred2) in test_cases.into_iter() { + assert_matches!( + from_shred( + shred1.clone(), + Pubkey::new_unique(), // self_pubkey + shred2.payload().clone(), + Some(leader_schedule), + rng.gen(), // wallclock + 512, // max_size + ) + .err() + .unwrap(), + Error::ShredTypeMismatch + ); + + let chunks: Vec<_> = from_shred_bypass_checks( + shred1.clone(), + Pubkey::new_unique(), // self_pubkey + shred2.clone(), + rng.gen(), // wallclock + 512, // max_size + ) + .unwrap() + .collect(); + assert!(chunks.len() > 4); + + assert_matches!( + into_shreds(&leader.pubkey(), chunks).err().unwrap(), + Error::ShredTypeMismatch + ); + } + } } diff --git a/gossip/src/lib.rs b/gossip/src/lib.rs index 11b609f3a37f52..2aea3078bbd7e7 100644 --- a/gossip/src/lib.rs +++ b/gossip/src/lib.rs @@ -24,6 +24,7 @@ pub mod legacy_contact_info; pub mod ping_pong; mod push_active_set; mod received_cache; +pub mod restart_crds_values; pub mod weighted_shuffle; #[macro_use] diff --git a/gossip/src/push_active_set.rs b/gossip/src/push_active_set.rs index 8b6dcb6f58843d..1e7e3cbb22844c 100644 --- a/gossip/src/push_active_set.rs +++ b/gossip/src/push_active_set.rs @@ -2,7 +2,7 @@ use { crate::weighted_shuffle::WeightedShuffle, indexmap::IndexMap, rand::Rng, - solana_bloom::bloom::{AtomicBloom, Bloom}, + solana_bloom::bloom::{Bloom, ConcurrentBloom}, solana_sdk::{native_token::LAMPORTS_PER_SOL, pubkey::Pubkey}, std::collections::HashMap, }; @@ -19,7 +19,7 @@ pub(crate) struct PushActiveSet([PushActiveSetEntry; NUM_PUSH_ACTIVE_SET_ENTRIES // Keys are gossip nodes to push messages to. // Values are which origins the node has pruned. #[derive(Default)] -struct PushActiveSetEntry(IndexMap>); +struct PushActiveSetEntry(IndexMap>); impl PushActiveSet { #[cfg(debug_assertions)] @@ -151,7 +151,7 @@ impl PushActiveSetEntry { if self.0.contains_key(node) { continue; } - let bloom = AtomicBloom::from(Bloom::random( + let bloom = ConcurrentBloom::from(Bloom::random( num_bloom_filter_items, Self::BLOOM_FALSE_RATE, Self::BLOOM_MAX_BITS, diff --git a/gossip/src/restart_crds_values.rs b/gossip/src/restart_crds_values.rs new file mode 100644 index 00000000000000..02f9359cce71f0 --- /dev/null +++ b/gossip/src/restart_crds_values.rs @@ -0,0 +1,320 @@ +use { + crate::crds_value::new_rand_timestamp, + bv::BitVec, + itertools::Itertools, + rand::Rng, + solana_sdk::{ + clock::Slot, + hash::Hash, + pubkey::Pubkey, + sanitize::{Sanitize, SanitizeError}, + serde_varint, + }, + thiserror::Error, +}; + +#[derive(Serialize, Deserialize, Clone, PartialEq, Eq, AbiExample, Debug)] +pub struct RestartLastVotedForkSlots { + pub from: Pubkey, + pub wallclock: u64, + offsets: SlotsOffsets, + pub last_voted_slot: Slot, + pub last_voted_hash: Hash, + pub shred_version: u16, +} + +#[derive(Debug, Error)] +pub enum RestartLastVotedForkSlotsError { + #[error("Last voted fork cannot be empty")] + LastVotedForkEmpty, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, AbiExample, AbiEnumVisitor)] +enum SlotsOffsets { + RunLengthEncoding(RunLengthEncoding), + RawOffsets(RawOffsets), +} + +#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq, AbiExample)] +struct U16(#[serde(with = "serde_varint")] u16); + +// The vector always starts with 1. Encode number of 1's and 0's consecutively. +// For example, 110000111 is [2, 4, 3]. +#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq, AbiExample)] +struct RunLengthEncoding(Vec); + +#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq, AbiExample)] +struct RawOffsets(BitVec); + +impl Sanitize for RestartLastVotedForkSlots { + fn sanitize(&self) -> std::result::Result<(), SanitizeError> { + self.last_voted_hash.sanitize() + } +} + +impl RestartLastVotedForkSlots { + // This number is MAX_CRDS_OBJECT_SIZE - empty serialized RestartLastVotedForkSlots. + const MAX_BYTES: usize = 824; + + // Per design doc, we should start wen_restart within 7 hours. + pub const MAX_SLOTS: usize = u16::MAX as usize; + + pub fn new( + from: Pubkey, + now: u64, + last_voted_fork: &[Slot], + last_voted_hash: Hash, + shred_version: u16, + ) -> Result { + let Some((&first_voted_slot, &last_voted_slot)) = + last_voted_fork.iter().minmax().into_option() + else { + return Err(RestartLastVotedForkSlotsError::LastVotedForkEmpty); + }; + let max_size = last_voted_slot.saturating_sub(first_voted_slot) + 1; + let mut uncompressed_bitvec = BitVec::new_fill(false, max_size); + for slot in last_voted_fork { + uncompressed_bitvec.set(last_voted_slot - *slot, true); + } + let run_length_encoding = RunLengthEncoding::new(&uncompressed_bitvec); + let offsets = + if run_length_encoding.num_encoded_slots() > RestartLastVotedForkSlots::MAX_BYTES * 8 { + SlotsOffsets::RunLengthEncoding(run_length_encoding) + } else { + SlotsOffsets::RawOffsets(RawOffsets::new(uncompressed_bitvec)) + }; + Ok(Self { + from, + wallclock: now, + offsets, + last_voted_slot, + last_voted_hash, + shred_version, + }) + } + + /// New random Version for tests and benchmarks. + pub fn new_rand(rng: &mut R, pubkey: Option) -> Self { + let pubkey = pubkey.unwrap_or_else(solana_sdk::pubkey::new_rand); + let num_slots = rng.gen_range(2..20); + let slots = std::iter::repeat_with(|| 47825632 + rng.gen_range(0..512)) + .take(num_slots) + .collect::>(); + RestartLastVotedForkSlots::new( + pubkey, + new_rand_timestamp(rng), + &slots, + Hash::new_unique(), + 1, + ) + .unwrap() + } + + pub fn to_slots(&self, min_slot: Slot) -> Vec { + match &self.offsets { + SlotsOffsets::RunLengthEncoding(run_length_encoding) => { + run_length_encoding.to_slots(self.last_voted_slot, min_slot) + } + SlotsOffsets::RawOffsets(raw_offsets) => { + raw_offsets.to_slots(self.last_voted_slot, min_slot) + } + } + } +} + +impl RunLengthEncoding { + fn new(bits: &BitVec) -> Self { + let encoded = (0..bits.len()) + .map(|i| bits.get(i)) + .dedup_with_count() + .map_while(|(count, _)| u16::try_from(count).ok()) + .scan(0, |current_bytes, count| { + *current_bytes += ((u16::BITS - count.leading_zeros() + 6) / 7).max(1) as usize; + (*current_bytes <= RestartLastVotedForkSlots::MAX_BYTES).then_some(U16(count)) + }) + .collect(); + Self(encoded) + } + + fn num_encoded_slots(&self) -> usize { + self.0.iter().map(|x| usize::from(x.0)).sum() + } + + fn to_slots(&self, last_slot: Slot, min_slot: Slot) -> Vec { + let mut slots: Vec = self + .0 + .iter() + .map(|bit_count| usize::from(bit_count.0)) + .zip([1, 0].iter().cycle()) + .flat_map(|(bit_count, bit)| std::iter::repeat(bit).take(bit_count)) + .enumerate() + .filter(|(_, bit)| **bit == 1) + .map_while(|(offset, _)| { + let offset = Slot::try_from(offset).ok()?; + last_slot.checked_sub(offset) + }) + .take(RestartLastVotedForkSlots::MAX_SLOTS) + .take_while(|slot| *slot >= min_slot) + .collect(); + slots.reverse(); + slots + } +} + +impl RawOffsets { + fn new(mut bits: BitVec) -> Self { + bits.truncate(RestartLastVotedForkSlots::MAX_BYTES as u64 * 8); + bits.shrink_to_fit(); + Self(bits) + } + + fn to_slots(&self, last_slot: Slot, min_slot: Slot) -> Vec { + let mut slots: Vec = (0..self.0.len()) + .filter(|index| self.0.get(*index)) + .map_while(|offset| last_slot.checked_sub(offset)) + .take_while(|slot| *slot >= min_slot) + .collect(); + slots.reverse(); + slots + } +} + +#[cfg(test)] +mod test { + use { + super::*, + crate::{ + cluster_info::MAX_CRDS_OBJECT_SIZE, + crds_value::{CrdsData, CrdsValue, CrdsValueLabel}, + }, + bincode::serialized_size, + solana_sdk::{signature::Signer, signer::keypair::Keypair, timing::timestamp}, + std::iter::repeat_with, + }; + + fn make_rand_slots(rng: &mut R) -> impl Iterator + '_ { + repeat_with(|| rng.gen_range(1..5)).scan(0, |slot, step| { + *slot += step; + Some(*slot) + }) + } + + #[test] + fn test_restart_last_voted_fork_slots_max_bytes() { + let keypair = Keypair::new(); + let header = RestartLastVotedForkSlots::new( + keypair.pubkey(), + timestamp(), + &[1, 2], + Hash::default(), + 0, + ) + .unwrap(); + // If the following assert fails, please update RestartLastVotedForkSlots::MAX_BYTES + assert_eq!( + RestartLastVotedForkSlots::MAX_BYTES, + MAX_CRDS_OBJECT_SIZE - serialized_size(&header).unwrap() as usize + ); + + // Create large enough slots to make sure we are discarding some to make slots fit. + let mut rng = rand::thread_rng(); + let large_length = 8000; + let range: Vec = make_rand_slots(&mut rng).take(large_length).collect(); + let large_slots = RestartLastVotedForkSlots::new( + keypair.pubkey(), + timestamp(), + &range, + Hash::default(), + 0, + ) + .unwrap(); + assert!(serialized_size(&large_slots).unwrap() <= MAX_CRDS_OBJECT_SIZE as u64); + let retrieved_slots = large_slots.to_slots(0); + assert!(retrieved_slots.len() <= range.len()); + assert!(retrieved_slots.last().unwrap() - retrieved_slots.first().unwrap() > 5000); + } + + #[test] + fn test_restart_last_voted_fork_slots() { + let keypair = Keypair::new(); + let slot = 53; + let slot_parent = slot - 5; + let shred_version = 21; + let original_slots_vec = [slot_parent, slot]; + let slots = RestartLastVotedForkSlots::new( + keypair.pubkey(), + timestamp(), + &original_slots_vec, + Hash::default(), + shred_version, + ) + .unwrap(); + let value = + CrdsValue::new_signed(CrdsData::RestartLastVotedForkSlots(slots.clone()), &keypair); + assert_eq!(value.sanitize(), Ok(())); + let label = value.label(); + assert_eq!( + label, + CrdsValueLabel::RestartLastVotedForkSlots(keypair.pubkey()) + ); + assert_eq!(label.pubkey(), keypair.pubkey()); + assert_eq!(value.wallclock(), slots.wallclock); + let retrieved_slots = slots.to_slots(0); + assert_eq!(retrieved_slots.len(), 2); + assert_eq!(retrieved_slots[0], slot_parent); + assert_eq!(retrieved_slots[1], slot); + + let bad_value = RestartLastVotedForkSlots::new( + keypair.pubkey(), + timestamp(), + &[], + Hash::default(), + shred_version, + ); + assert!(bad_value.is_err()); + + let last_slot: Slot = 8000; + let large_slots_vec: Vec = (0..last_slot + 1).collect(); + let large_slots = RestartLastVotedForkSlots::new( + keypair.pubkey(), + timestamp(), + &large_slots_vec, + Hash::default(), + shred_version, + ) + .unwrap(); + assert!(serialized_size(&large_slots).unwrap() < MAX_CRDS_OBJECT_SIZE as u64); + let retrieved_slots = large_slots.to_slots(0); + assert_eq!(retrieved_slots, large_slots_vec); + } + + fn check_run_length_encoding(slots: Vec) { + let last_voted_slot = slots[slots.len() - 1]; + let mut bitvec = BitVec::new_fill(false, last_voted_slot - slots[0] + 1); + for slot in &slots { + bitvec.set(last_voted_slot - slot, true); + } + let rle = RunLengthEncoding::new(&bitvec); + let retrieved_slots = rle.to_slots(last_voted_slot, 0); + assert_eq!(retrieved_slots, slots); + } + + #[test] + fn test_run_length_encoding() { + check_run_length_encoding((1000..16384 + 1000).map(|x| x as Slot).collect_vec()); + check_run_length_encoding([1000 as Slot].into()); + check_run_length_encoding( + [ + 1000 as Slot, + RestartLastVotedForkSlots::MAX_SLOTS as Slot + 999, + ] + .into(), + ); + check_run_length_encoding((1000..1800).step_by(2).map(|x| x as Slot).collect_vec()); + + let mut rng = rand::thread_rng(); + let large_length = 500; + let range: Vec = make_rand_slots(&mut rng).take(large_length).collect(); + check_run_length_encoding(range); + } +} diff --git a/install/src/command.rs b/install/src/command.rs index ed8d37ff0f3b8e..d7b92c17690bda 100644 --- a/install/src/command.rs +++ b/install/src/command.rs @@ -501,7 +501,6 @@ fn add_to_path(new_path: &str) -> bool { fn append_file(dest: &Path, line: &str) -> io::Result<()> { use std::io::Write; let mut dest_file = fs::OpenOptions::new() - .write(true) .append(true) .create(true) .open(dest)?; diff --git a/ledger-tool/Cargo.toml b/ledger-tool/Cargo.toml index c64dfa07e91a91..ddc1ca9b564e94 100644 --- a/ledger-tool/Cargo.toml +++ b/ledger-tool/Cargo.toml @@ -45,9 +45,11 @@ solana-stake-program = { workspace = true } solana-storage-bigtable = { workspace = true } solana-streamer = { workspace = true } solana-transaction-status = { workspace = true } +solana-unified-scheduler-pool = { workspace = true } solana-version = { workspace = true } solana-vote-program = { workspace = true } solana_rbpf = { workspace = true, features = ["debugger"] } +thiserror = { workspace = true } tokio = { workspace = true, features = ["full"] } [target.'cfg(not(target_env = "msvc"))'.dependencies] diff --git a/ledger-tool/src/args.rs b/ledger-tool/src/args.rs index 0bb28e4a2779ca..63198c1c6188fa 100644 --- a/ledger-tool/src/args.rs +++ b/ledger-tool/src/args.rs @@ -2,7 +2,7 @@ use { crate::LEDGER_TOOL_DIRECTORY, clap::{value_t, values_t_or_exit, ArgMatches}, solana_accounts_db::{ - accounts_db::{AccountsDb, AccountsDbConfig, FillerAccountsConfig}, + accounts_db::{AccountsDb, AccountsDbConfig}, accounts_index::{AccountsIndexConfig, IndexLimitMb}, partitioned_rewards::TestPartitionedEpochRewards, }, @@ -53,11 +53,6 @@ pub fn get_accounts_db_config( ..AccountsIndexConfig::default() }; - let filler_accounts_config = FillerAccountsConfig { - count: value_t!(arg_matches, "accounts_filler_count", usize).unwrap_or(0), - size: value_t!(arg_matches, "accounts_filler_size", usize).unwrap_or(0), - }; - let accounts_hash_cache_path = arg_matches .value_of("accounts_hash_cache_path") .map(Into::into) @@ -77,7 +72,6 @@ pub fn get_accounts_db_config( index: Some(accounts_index_config), base_working_path: Some(ledger_tool_ledger_path), accounts_hash_cache_path: Some(accounts_hash_cache_path), - filler_accounts_config, ancient_append_vec_offset: value_t!(arg_matches, "accounts_db_ancient_append_vecs", i64) .ok(), exhaustively_verify_refcounts: arg_matches.is_present("accounts_db_verify_refcounts"), diff --git a/ledger-tool/src/bigtable.rs b/ledger-tool/src/bigtable.rs index c4d5c77f302669..cf153aae6ce2cb 100644 --- a/ledger-tool/src/bigtable.rs +++ b/ledger-tool/src/bigtable.rs @@ -1,6 +1,9 @@ //! The `bigtable` subcommand use { - crate::ledger_path::canonicalize_ledger_path, + crate::{ + ledger_path::canonicalize_ledger_path, + output::{CliBlockWithEntries, CliEntries, EncodedConfirmedBlockWithEntries}, + }, clap::{ value_t, value_t_or_exit, values_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand, }, @@ -23,8 +26,8 @@ use { solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature}, solana_storage_bigtable::CredentialType, solana_transaction_status::{ - BlockEncodingOptions, ConfirmedBlock, EncodeError, TransactionDetails, - UiTransactionEncoding, VersionedConfirmedBlock, + BlockEncodingOptions, ConfirmedBlock, EncodeError, EncodedConfirmedBlock, + TransactionDetails, UiTransactionEncoding, VersionedConfirmedBlock, }, std::{ cmp::min, @@ -113,6 +116,7 @@ async fn first_available_block( async fn block( slot: Slot, output_format: OutputFormat, + show_entries: bool, config: solana_storage_bigtable::LedgerStorageConfig, ) -> Result<(), Box> { let bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(config) @@ -126,7 +130,7 @@ async fn block( BlockEncodingOptions { transaction_details: TransactionDetails::Full, show_rewards: true, - max_supported_transaction_version: None, + max_supported_transaction_version: Some(0), }, ) .map_err(|err| match err { @@ -134,12 +138,43 @@ async fn block( format!("Failed to process unsupported transaction version ({version}) in block") } })?; + let encoded_block: EncodedConfirmedBlock = encoded_block.into(); + + if show_entries { + let entries = bigtable.get_entries(slot).await?; + let cli_block = CliBlockWithEntries { + encoded_confirmed_block: EncodedConfirmedBlockWithEntries::try_from( + encoded_block, + entries, + )?, + slot, + }; + println!("{}", output_format.formatted_string(&cli_block)); + } else { + let cli_block = CliBlock { + encoded_confirmed_block: encoded_block, + slot, + }; + println!("{}", output_format.formatted_string(&cli_block)); + } + Ok(()) +} - let cli_block = CliBlock { - encoded_confirmed_block: encoded_block.into(), +async fn entries( + slot: Slot, + output_format: OutputFormat, + config: solana_storage_bigtable::LedgerStorageConfig, +) -> Result<(), Box> { + let bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(config) + .await + .map_err(|err| format!("Failed to connect to storage: {err:?}"))?; + + let entries = bigtable.get_entries(slot).await?; + let cli_entries = CliEntries { + entries: entries.map(Into::into).collect(), slot, }; - println!("{}", output_format.formatted_string(&cli_block)); + println!("{}", output_format.formatted_string(&cli_entries)); Ok(()) } @@ -165,16 +200,6 @@ async fn compare_blocks( config: solana_storage_bigtable::LedgerStorageConfig, ref_config: solana_storage_bigtable::LedgerStorageConfig, ) -> Result<(), Box> { - let owned_bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(config) - .await - .map_err(|err| format!("failed to connect to owned bigtable: {err:?}"))?; - let owned_bigtable_slots = owned_bigtable - .get_confirmed_blocks(starting_slot, limit) - .await?; - info!( - "owned bigtable {} blocks found ", - owned_bigtable_slots.len() - ); let reference_bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(ref_config) .await .map_err(|err| format!("failed to connect to reference bigtable: {err:?}"))?; @@ -187,13 +212,38 @@ async fn compare_blocks( reference_bigtable_slots.len(), ); + if reference_bigtable_slots.is_empty() { + println!("Reference bigtable is empty after {starting_slot}. Aborting."); + return Ok(()); + } + + let owned_bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(config) + .await + .map_err(|err| format!("failed to connect to owned bigtable: {err:?}"))?; + let owned_bigtable_slots = owned_bigtable + .get_confirmed_blocks(starting_slot, limit) + .await?; + info!( + "owned bigtable {} blocks found ", + owned_bigtable_slots.len() + ); + + let MissingBlocksData { + last_block_checked, + missing_blocks, + superfluous_blocks, + num_reference_blocks, + num_owned_blocks, + } = missing_blocks(&reference_bigtable_slots, &owned_bigtable_slots); + println!( "{}", json!({ - "num_reference_slots": json!(reference_bigtable_slots.len()), - "num_owned_slots": json!(owned_bigtable_slots.len()), - "reference_last_block": json!(reference_bigtable_slots.len().checked_sub(1).map(|i| reference_bigtable_slots[i])), - "missing_blocks": json!(missing_blocks(&reference_bigtable_slots, &owned_bigtable_slots)), + "num_reference_slots": json!(num_reference_blocks), + "num_owned_slots": json!(num_owned_blocks), + "reference_last_block": json!(last_block_checked), + "missing_blocks": json!(missing_blocks), + "superfluous_blocks": json!(superfluous_blocks), }) ); @@ -453,7 +503,10 @@ async fn copy(args: CopyArgs) -> Result<(), Box> { debug!("worker {}: received slot {}", i, slot); if !args.force { - match destination_bigtable_clone.confirmed_block_exists(slot).await { + match destination_bigtable_clone + .confirmed_block_exists(slot) + .await + { Ok(exist) => { if exist { skip_slots_clone.lock().unwrap().push(slot); @@ -461,7 +514,11 @@ async fn copy(args: CopyArgs) -> Result<(), Box> { } } Err(err) => { - error!("confirmed_block_exists() failed from the destination Bigtable, slot: {}, err: {}", slot, err); + error!( + "confirmed_block_exists() failed from the destination \ + Bigtable, slot: {}, err: {}", + slot, err + ); failed_slots_clone.lock().unwrap().push(slot); continue; } @@ -481,33 +538,44 @@ async fn copy(args: CopyArgs) -> Result<(), Box> { } } Err(err) => { - error!("failed to get a confirmed block from the source Bigtable, slot: {}, err: {}", slot, err); + error!( + "failed to get a confirmed block from the source Bigtable, \ + slot: {}, err: {}", + slot, err + ); failed_slots_clone.lock().unwrap().push(slot); continue; } }; } else { let confirmed_block = - match source_bigtable_clone.get_confirmed_block(slot).await { - Ok(block) => match VersionedConfirmedBlock::try_from(block) { - Ok(block) => block, + match source_bigtable_clone.get_confirmed_block(slot).await { + Ok(block) => match VersionedConfirmedBlock::try_from(block) { + Ok(block) => block, + Err(err) => { + error!( + "failed to convert confirmed block to versioned \ + confirmed block, slot: {}, err: {}", + slot, err + ); + failed_slots_clone.lock().unwrap().push(slot); + continue; + } + }, + Err(solana_storage_bigtable::Error::BlockNotFound(slot)) => { + debug!("block not found, slot: {}", slot); + block_not_found_slots_clone.lock().unwrap().push(slot); + continue; + } Err(err) => { - error!("failed to convert confirmed block to versioned confirmed block, slot: {}, err: {}", slot, err); + error!( + "failed to get confirmed block, slot: {}, err: {}", + slot, err + ); failed_slots_clone.lock().unwrap().push(slot); continue; } - }, - Err(solana_storage_bigtable::Error::BlockNotFound(slot)) => { - debug!("block not found, slot: {}", slot); - block_not_found_slots_clone.lock().unwrap().push(slot); - continue; - } - Err(err) => { - error!("failed to get confirmed block, slot: {}, err: {}", slot, err); - failed_slots_clone.lock().unwrap().push(slot); - continue; - } - }; + }; match destination_bigtable_clone .upload_confirmed_block(slot, confirmed_block) @@ -609,7 +677,7 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .value_name("INSTANCE_NAME") .default_value(solana_storage_bigtable::DEFAULT_INSTANCE_NAME) - .help("Name of the target Bigtable instance") + .help("Name of the target Bigtable instance"), ) .arg( Arg::with_name("rpc_bigtable_app_profile_id") @@ -618,7 +686,7 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .value_name("APP_PROFILE_ID") .default_value(solana_storage_bigtable::DEFAULT_APP_PROFILE_ID) - .help("Bigtable application profile id to use in requests") + .help("Bigtable application profile id to use in requests"), ) .subcommand( SubCommand::with_name("upload") @@ -648,9 +716,9 @@ impl BigTableSubCommand for App<'_, '_> { .long("force") .takes_value(false) .help( - "Force reupload of any blocks already present in BigTable instance\ - Note: reupload will *not* delete any data from the tx-by-addr table;\ - Use with care.", + "Force reupload of any blocks already present in BigTable \ + instance. Note: reupload will *not* delete any data from the \ + tx-by-addr table; Use with care.", ), ), ) @@ -658,24 +726,25 @@ impl BigTableSubCommand for App<'_, '_> { SubCommand::with_name("delete-slots") .about("Delete ledger information from BigTable") .arg( - Arg::with_name("slots") - .index(1) - .value_name("SLOTS") - .takes_value(true) - .multiple(true) - .required(true) - .help("Slots to delete"), - ) - .arg( - Arg::with_name("force") - .long("force") - .takes_value(false) - .help( - "Deletions are only performed when the force flag is enabled. \ - If force is not enabled, show stats about what ledger data \ - will be deleted in a real deletion. "), - ), + Arg::with_name("slots") + .index(1) + .value_name("SLOTS") + .takes_value(true) + .multiple(true) + .required(true) + .help("Slots to delete"), ) + .arg( + Arg::with_name("force") + .long("force") + .takes_value(false) + .help( + "Deletions are only performed when the force flag is enabled. \ + If force is not enabled, show stats about what ledger data \ + will be deleted in a real deletion. ", + ), + ), + ) .subcommand( SubCommand::with_name("first-available-block") .about("Get the first available block in the storage"), @@ -708,8 +777,10 @@ impl BigTableSubCommand for App<'_, '_> { ) .subcommand( SubCommand::with_name("compare-blocks") - .about("Find the missing confirmed blocks of an owned bigtable for a given range \ - by comparing to a reference bigtable") + .about( + "Find the missing confirmed blocks of an owned bigtable for a given \ + range by comparing to a reference bigtable", + ) .arg( Arg::with_name("starting_slot") .validator(is_slot) @@ -745,7 +816,7 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .value_name("INSTANCE_NAME") .default_value(solana_storage_bigtable::DEFAULT_INSTANCE_NAME) - .help("Name of the reference Bigtable instance to compare to") + .help("Name of the reference Bigtable instance to compare to"), ) .arg( Arg::with_name("reference_app_profile_id") @@ -753,12 +824,33 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .value_name("APP_PROFILE_ID") .default_value(solana_storage_bigtable::DEFAULT_APP_PROFILE_ID) - .help("Reference Bigtable application profile id to use in requests") + .help( + "Reference Bigtable application profile id to use in requests", + ), ), ) .subcommand( SubCommand::with_name("block") .about("Get a confirmed block") + .arg( + Arg::with_name("slot") + .long("slot") + .validator(is_slot) + .value_name("SLOT") + .takes_value(true) + .index(1) + .required(true), + ) + .arg( + Arg::with_name("show_entries") + .long("show-entries") + .required(false) + .help("Display the transactions in their entries"), + ), + ) + .subcommand( + SubCommand::with_name("entries") + .about("Get the entry data for a block") .arg( Arg::with_name("slot") .long("slot") @@ -785,8 +877,8 @@ impl BigTableSubCommand for App<'_, '_> { .subcommand( SubCommand::with_name("transaction-history") .about( - "Show historical transactions affecting the given address \ - from newest to oldest", + "Show historical transactions affecting the given address from newest \ + to oldest", ) .arg( Arg::with_name("address") @@ -815,8 +907,8 @@ impl BigTableSubCommand for App<'_, '_> { .default_value("1000") .help( "Number of transaction signatures to query at once. \ - Smaller: more responsive/lower throughput. \ - Larger: less responsive/higher throughput", + Smaller: more responsive/lower throughput. \ + Larger: less responsive/higher throughput", ), ) .arg( @@ -850,7 +942,8 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .conflicts_with("emulated_source") .help( - "Source Bigtable credential filepath (credential may be readonly)", + "Source Bigtable credential filepath (credential may be \ + readonly)", ), ) .arg( @@ -859,9 +952,7 @@ impl BigTableSubCommand for App<'_, '_> { .value_name("EMULATED_SOURCE") .takes_value(true) .conflicts_with("source_credential_path") - .help( - "Source Bigtable emulated source", - ), + .help("Source Bigtable emulated source"), ) .arg( Arg::with_name("source_instance_name") @@ -869,7 +960,7 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .value_name("SOURCE_INSTANCE_NAME") .default_value(solana_storage_bigtable::DEFAULT_INSTANCE_NAME) - .help("Source Bigtable instance name") + .help("Source Bigtable instance name"), ) .arg( Arg::with_name("source_app_profile_id") @@ -877,7 +968,7 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .value_name("SOURCE_APP_PROFILE_ID") .default_value(solana_storage_bigtable::DEFAULT_APP_PROFILE_ID) - .help("Source Bigtable app profile id") + .help("Source Bigtable app profile id"), ) .arg( Arg::with_name("destination_credential_path") @@ -886,7 +977,8 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .conflicts_with("emulated_destination") .help( - "Destination Bigtable credential filepath (credential must have Bigtable write permissions)", + "Destination Bigtable credential filepath (credential must \ + have Bigtable write permissions)", ), ) .arg( @@ -895,9 +987,7 @@ impl BigTableSubCommand for App<'_, '_> { .value_name("EMULATED_DESTINATION") .takes_value(true) .conflicts_with("destination_credential_path") - .help( - "Destination Bigtable emulated destination", - ), + .help("Destination Bigtable emulated destination"), ) .arg( Arg::with_name("destination_instance_name") @@ -905,7 +995,7 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .value_name("DESTINATION_INSTANCE_NAME") .default_value(solana_storage_bigtable::DEFAULT_INSTANCE_NAME) - .help("Destination Bigtable instance name") + .help("Destination Bigtable instance name"), ) .arg( Arg::with_name("destination_app_profile_id") @@ -913,7 +1003,7 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .value_name("DESTINATION_APP_PROFILE_ID") .default_value(solana_storage_bigtable::DEFAULT_APP_PROFILE_ID) - .help("Destination Bigtable app profile id") + .help("Destination Bigtable app profile id"), ) .arg( Arg::with_name("starting_slot") @@ -922,9 +1012,7 @@ impl BigTableSubCommand for App<'_, '_> { .value_name("START_SLOT") .takes_value(true) .required(true) - .help( - "Start copying at this slot", - ), + .help("Start copying at this slot (inclusive)"), ) .arg( Arg::with_name("ending_slot") @@ -932,26 +1020,25 @@ impl BigTableSubCommand for App<'_, '_> { .validator(is_slot) .value_name("END_SLOT") .takes_value(true) - .help("Stop copying at this slot (inclusive, START_SLOT ..= END_SLOT)"), + .help("Stop copying at this slot (inclusive)"), ) .arg( Arg::with_name("force") - .long("force") - .value_name("FORCE") - .takes_value(false) - .help( - "Force copy of blocks already present in destination Bigtable instance", - ), + .long("force") + .value_name("FORCE") + .takes_value(false) + .help( + "Force copy of blocks already present in destination Bigtable \ + instance", + ), ) .arg( Arg::with_name("dry_run") - .long("dry-run") - .value_name("DRY_RUN") - .takes_value(false) - .help( - "Dry run. It won't upload any blocks", - ), - ) + .long("dry-run") + .value_name("DRY_RUN") + .takes_value(false) + .help("Dry run. It won't upload any blocks"), + ), ), ) } @@ -988,9 +1075,7 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) { let runtime = tokio::runtime::Runtime::new().unwrap(); let verbose = matches.is_present("verbose"); - let force_update_to_open = matches.is_present("force_update_to_open"); let output_format = OutputFormat::from_matches(matches, "output_format", verbose); - let enforce_ulimit_nofile = !matches.is_present("ignore_ulimit_nofile_error"); let (subcommand, sub_matches) = matches.subcommand(); let instance_name = get_global_subcommand_arg( @@ -1013,10 +1098,8 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) { let force_reupload = arg_matches.is_present("force_reupload"); let blockstore = crate::open_blockstore( &canonicalize_ledger_path(ledger_path), + arg_matches, AccessType::Secondary, - None, - force_update_to_open, - enforce_ulimit_nofile, ); let config = solana_storage_bigtable::LedgerStorageConfig { read_only: false, @@ -1053,19 +1136,30 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) { } ("block", Some(arg_matches)) => { let slot = value_t_or_exit!(arg_matches, "slot", Slot); + let show_entries = arg_matches.is_present("show_entries"); let config = solana_storage_bigtable::LedgerStorageConfig { - read_only: false, + read_only: true, instance_name, app_profile_id, ..solana_storage_bigtable::LedgerStorageConfig::default() }; - runtime.block_on(block(slot, output_format, config)) + runtime.block_on(block(slot, output_format, show_entries, config)) + } + ("entries", Some(arg_matches)) => { + let slot = value_t_or_exit!(arg_matches, "slot", Slot); + let config = solana_storage_bigtable::LedgerStorageConfig { + read_only: true, + instance_name, + app_profile_id, + ..solana_storage_bigtable::LedgerStorageConfig::default() + }; + runtime.block_on(entries(slot, output_format, config)) } ("blocks", Some(arg_matches)) => { let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); let limit = value_t_or_exit!(arg_matches, "limit", usize); let config = solana_storage_bigtable::LedgerStorageConfig { - read_only: false, + read_only: true, instance_name, app_profile_id, ..solana_storage_bigtable::LedgerStorageConfig::default() @@ -1077,7 +1171,7 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) { let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); let limit = value_t_or_exit!(arg_matches, "limit", usize); let config = solana_storage_bigtable::LedgerStorageConfig { - read_only: false, + read_only: true, instance_name, app_profile_id, ..solana_storage_bigtable::LedgerStorageConfig::default() @@ -1094,7 +1188,7 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) { let ref_app_profile_id = value_t_or_exit!(arg_matches, "reference_app_profile_id", String); let ref_config = solana_storage_bigtable::LedgerStorageConfig { - read_only: false, + read_only: true, credential_type: CredentialType::Filepath(credential_path), instance_name: ref_instance_name, app_profile_id: ref_app_profile_id, @@ -1110,7 +1204,7 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) { .parse() .expect("Invalid signature"); let config = solana_storage_bigtable::LedgerStorageConfig { - read_only: false, + read_only: true, instance_name, app_profile_id, ..solana_storage_bigtable::LedgerStorageConfig::default() @@ -1157,21 +1251,80 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) { }); } -fn missing_blocks(reference: &[Slot], owned: &[Slot]) -> Vec { +#[derive(Debug, PartialEq)] +struct MissingBlocksData { + last_block_checked: Slot, + missing_blocks: Vec, + superfluous_blocks: Vec, + num_reference_blocks: usize, + num_owned_blocks: usize, +} + +fn missing_blocks(reference: &[Slot], owned: &[Slot]) -> MissingBlocksData { + // Generally, callers should return early and not bother calling + // `missing_blocks()` when the reference set is empty. This code block + // included for completeness, to prevent panics. + if reference.is_empty() { + return MissingBlocksData { + last_block_checked: owned.last().cloned().unwrap_or_default(), + missing_blocks: vec![], + superfluous_blocks: owned.to_owned(), + num_reference_blocks: 0, + num_owned_blocks: owned.len(), + }; + } + + // Because the owned bigtable may include superfluous slots, stop checking + // the reference set at owned.last() or else the remaining reference slots + // will show up as missing. + let last_reference_block = reference + .last() + .expect("already returned if reference is empty"); + let last_block_checked = owned + .last() + .map(|last_owned_block| min(last_owned_block, last_reference_block)) + .unwrap_or(last_reference_block); + if owned.is_empty() && !reference.is_empty() { - return reference.to_owned(); - } else if owned.is_empty() { - return vec![]; + return MissingBlocksData { + last_block_checked: *last_block_checked, + missing_blocks: reference.to_owned(), + superfluous_blocks: vec![], + num_reference_blocks: reference.len(), + num_owned_blocks: 0, + }; } - let owned_hashset: HashSet<_> = owned.iter().collect(); - let mut missing_slots = vec![]; - for slot in reference { - if !owned_hashset.contains(slot) { - missing_slots.push(slot.to_owned()); - } + let owned_hashset: HashSet<_> = owned + .iter() + .take_while(|&slot| slot <= last_block_checked) + .cloned() + .collect(); + let reference_hashset: HashSet<_> = reference + .iter() + .take_while(|&slot| slot <= last_block_checked) + .cloned() + .collect(); + + let mut missing_blocks: Vec<_> = reference_hashset + .difference(&owned_hashset) + .cloned() + .collect(); + missing_blocks.sort_unstable(); // Unstable sort is fine, as we've already ensured no duplicates + + let mut superfluous_blocks: Vec<_> = owned_hashset + .difference(&reference_hashset) + .cloned() + .collect(); + superfluous_blocks.sort_unstable(); // Unstable sort is fine, as we've already ensured no duplicates + + MissingBlocksData { + last_block_checked: *last_block_checked, + missing_blocks, + superfluous_blocks, + num_reference_blocks: reference_hashset.len(), + num_owned_blocks: owned_hashset.len(), } - missing_slots } #[cfg(test)] @@ -1185,25 +1338,66 @@ mod tests { let owned_slots_leftshift = vec![0, 25, 26, 27, 28, 29, 30, 31, 32]; let owned_slots_rightshift = vec![0, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54]; let missing_slots = vec![37, 41, 42]; - let missing_slots_leftshift = vec![37, 38, 39, 40, 41, 42, 43, 44, 45]; let missing_slots_rightshift = vec![37, 38, 39, 40, 41, 42, 43, 45]; - assert!(missing_blocks(&[], &[]).is_empty()); - assert!(missing_blocks(&[], &owned_slots).is_empty()); + assert_eq!( + missing_blocks(&[], &[]), + MissingBlocksData { + last_block_checked: 0, + missing_blocks: vec![], + superfluous_blocks: vec![], + num_reference_blocks: 0, + num_owned_blocks: 0, + } + ); + assert_eq!( + missing_blocks(&[], &owned_slots), + MissingBlocksData { + last_block_checked: *owned_slots.last().unwrap(), + missing_blocks: vec![], + superfluous_blocks: owned_slots.clone(), + num_reference_blocks: 0, + num_owned_blocks: owned_slots.len(), + } + ); assert_eq!( missing_blocks(&reference_slots, &[]), - reference_slots.to_owned() + MissingBlocksData { + last_block_checked: *reference_slots.last().unwrap(), + missing_blocks: reference_slots.clone(), + superfluous_blocks: vec![], + num_reference_blocks: reference_slots.len(), + num_owned_blocks: 0, + } ); assert_eq!( missing_blocks(&reference_slots, &owned_slots), - missing_slots + MissingBlocksData { + last_block_checked: *reference_slots.last().unwrap(), // reference_slots.last() < owned_slots.last() + missing_blocks: missing_slots.clone(), + superfluous_blocks: vec![], + num_reference_blocks: reference_slots.len(), + num_owned_blocks: owned_slots.len() - 2, + } ); assert_eq!( missing_blocks(&reference_slots, &owned_slots_leftshift), - missing_slots_leftshift + MissingBlocksData { + last_block_checked: *owned_slots_leftshift.last().unwrap(), + missing_blocks: vec![], + superfluous_blocks: owned_slots_leftshift[1..].to_vec(), + num_reference_blocks: 1, + num_owned_blocks: owned_slots_leftshift.len(), + } ); assert_eq!( missing_blocks(&reference_slots, &owned_slots_rightshift), - missing_slots_rightshift + MissingBlocksData { + last_block_checked: *reference_slots.last().unwrap(), // reference_slots.last() < missing_slots_rightshift.last() + missing_blocks: missing_slots_rightshift.clone(), + superfluous_blocks: vec![], + num_reference_blocks: reference_slots.len(), + num_owned_blocks: owned_slots_rightshift.len() - 9, + } ); } } diff --git a/ledger-tool/src/ledger_utils.rs b/ledger-tool/src/ledger_utils.rs index 6514312bc5d43d..9de68a95857e11 100644 --- a/ledger-tool/src/ledger_utils.rs +++ b/ledger-tool/src/ledger_utils.rs @@ -7,10 +7,12 @@ use { solana_core::{ accounts_hash_verifier::AccountsHashVerifier, validator::BlockVerificationMethod, }, - solana_geyser_plugin_manager::geyser_plugin_service::GeyserPluginService, + solana_geyser_plugin_manager::geyser_plugin_service::{ + GeyserPluginService, GeyserPluginServiceError, + }, solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo}, solana_ledger::{ - bank_forks_utils, + bank_forks_utils::{self, BankForksUtilsError}, blockstore::{Blockstore, BlockstoreError}, blockstore_options::{ AccessType, BlockstoreOptions, BlockstoreRecoveryMode, LedgerColumnOptions, @@ -28,18 +30,20 @@ use { PrunedBanksRequestHandler, SnapshotRequestHandler, }, bank_forks::BankForks, + prioritization_fee_cache::PrioritizationFeeCache, snapshot_config::SnapshotConfig, snapshot_hash::StartingSnapshotHashes, snapshot_utils::{ self, clean_orphaned_account_snapshot_dirs, create_all_accounts_run_and_snapshot_dirs, - move_and_async_delete_path_contents, + move_and_async_delete_path_contents, SnapshotError, }, }, solana_sdk::{ - genesis_config::GenesisConfig, signature::Signer, signer::keypair::Keypair, - timing::timestamp, + clock::Slot, genesis_config::GenesisConfig, pubkey::Pubkey, signature::Signer, + signer::keypair::Keypair, timing::timestamp, transaction::VersionedTransaction, }, solana_streamer::socket::SocketAddrSpace, + solana_unified_scheduler_pool::DefaultSchedulerPool, std::{ path::{Path, PathBuf}, process::exit, @@ -48,20 +52,68 @@ use { Arc, RwLock, }, }, + thiserror::Error, }; -pub fn get_shred_storage_type(ledger_path: &Path, message: &str) -> ShredStorageType { - // TODO: the following shred_storage_type inference must be updated once - // the rocksdb options can be constructed via load_options_file() as the - // value picked by passing None for `max_shred_storage_size` could affect - // the persisted rocksdb options file. - match ShredStorageType::from_ledger_path(ledger_path, None) { - Some(s) => s, - None => { - info!("{}", message); - ShredStorageType::RocksLevel - } - } +const PROCESS_SLOTS_HELP_STRING: &str = + "The starting slot is either the latest found snapshot slot, or genesis (slot 0) if the \ + --no-snapshot flag was specified or if no snapshots were found. \ + The ending slot is the snapshot creation slot for create-snapshot, the value for \ + --halt-at-slot if specified, or the highest slot in the blockstore."; + +#[derive(Error, Debug)] +pub(crate) enum LoadAndProcessLedgerError { + #[error("failed to clean orphaned account snapshot directories: {0}")] + CleanOrphanedAccountSnapshotDirectories(#[source] SnapshotError), + + #[error("failed to create all run and snapshot directories: {0}")] + CreateAllAccountsRunAndSnapshotDirectories(#[source] SnapshotError), + + #[error("custom accounts path is not supported with seconday blockstore access")] + CustomAccountsPathUnsupported(#[source] BlockstoreError), + + #[error( + "failed to process blockstore from starting slot {0} to ending slot {1}; the ending slot \ + is less than the starting slot. {2}" + )] + EndingSlotLessThanStartingSlot(Slot, Slot, String), + + #[error( + "failed to process blockstore from starting slot {0} to ending slot {1}; the blockstore \ + does not contain a replayable sequence of blocks between these slots. {2}" + )] + EndingSlotNotReachableFromStartingSlot(Slot, Slot, String), + + #[error("failed to setup geyser service: {0}")] + GeyserServiceSetup(#[source] GeyserPluginServiceError), + + #[error("failed to load bank forks: {0}")] + LoadBankForks(#[source] BankForksUtilsError), + + #[error("failed to process blockstore from root: {0}")] + ProcessBlockstoreFromRoot(#[source] BlockstoreProcessorError), +} + +pub fn load_and_process_ledger_or_exit( + arg_matches: &ArgMatches, + genesis_config: &GenesisConfig, + blockstore: Arc, + process_options: ProcessOptions, + snapshot_archive_path: Option, + incremental_snapshot_archive_path: Option, +) -> (Arc>, Option) { + load_and_process_ledger( + arg_matches, + genesis_config, + blockstore, + process_options, + snapshot_archive_path, + incremental_snapshot_archive_path, + ) + .unwrap_or_else(|err| { + eprintln!("Exiting. Failed to load and process ledger: {err}"); + exit(1); + }) } pub fn load_and_process_ledger( @@ -71,7 +123,7 @@ pub fn load_and_process_ledger( process_options: ProcessOptions, snapshot_archive_path: Option, incremental_snapshot_archive_path: Option, -) -> Result<(Arc>, Option), BlockstoreProcessorError> { +) -> Result<(Arc>, Option), LoadAndProcessLedgerError> { let bank_snapshots_dir = if blockstore.is_primary_access() { blockstore.ledger_path().join("snapshot") } else { @@ -109,8 +161,6 @@ pub fn load_and_process_ledger( }) }; - let start_slot_msg = "The starting slot will be the latest snapshot slot, or genesis if \ - the --no-snapshot flag is specified or if no snapshots are found."; match process_options.halt_at_slot { // Skip the following checks for sentinel values of Some(0) and None. // For Some(0), no slots will be be replayed after starting_slot. @@ -118,20 +168,21 @@ pub fn load_and_process_ledger( None | Some(0) => {} Some(halt_slot) => { if halt_slot < starting_slot { - eprintln!( - "Unable to process blockstore from starting slot {starting_slot} to \ - {halt_slot}; the ending slot is less than the starting slot. {start_slot_msg}" - ); - exit(1); + return Err(LoadAndProcessLedgerError::EndingSlotLessThanStartingSlot( + starting_slot, + halt_slot, + PROCESS_SLOTS_HELP_STRING.to_string(), + )); } // Check if we have the slot data necessary to replay from starting_slot to >= halt_slot. if !blockstore.slot_range_connected(starting_slot, halt_slot) { - eprintln!( - "Unable to process blockstore from starting slot {starting_slot} to \ - {halt_slot}; the blockstore does not contain a replayable chain between these \ - slots. {start_slot_msg}" + return Err( + LoadAndProcessLedgerError::EndingSlotNotReachableFromStartingSlot( + starting_slot, + halt_slot, + PROCESS_SLOTS_HELP_STRING.to_string(), + ), ); - exit(1); } } } @@ -149,19 +200,15 @@ pub fn load_and_process_ledger( "Checking if another process currently holding Primary access to {:?}", blockstore.ledger_path() ); - if Blockstore::open_with_options( + Blockstore::open_with_options( blockstore.ledger_path(), BlockstoreOptions { access_type: AccessType::PrimaryForMaintenance, ..BlockstoreOptions::default() }, ) - .is_err() - { - // Couldn't get Primary access, error out to be defensive. - eprintln!("Error: custom accounts path is not supported under secondary access"); - exit(1); - } + // Couldn't get Primary access, error out to be defensive. + .map_err(LoadAndProcessLedgerError::CustomAccountsPathUnsupported)?; } account_paths.split(',').map(PathBuf::from).collect() } else if blockstore.is_primary_access() { @@ -179,11 +226,8 @@ pub fn load_and_process_ledger( }; let (account_run_paths, account_snapshot_paths) = - create_all_accounts_run_and_snapshot_dirs(&account_paths).unwrap_or_else(|err| { - eprintln!("Error: {err}"); - exit(1); - }); - + create_all_accounts_run_and_snapshot_dirs(&account_paths) + .map_err(LoadAndProcessLedgerError::CreateAllAccountsRunAndSnapshotDirectories)?; // From now on, use run/ paths in the same way as the previous account_paths. let account_paths = account_run_paths; @@ -201,12 +245,8 @@ pub fn load_and_process_ledger( snapshot_utils::purge_incomplete_bank_snapshots(&bank_snapshots_dir); info!("Cleaning contents of account snapshot paths: {account_snapshot_paths:?}"); - if let Err(err) = - clean_orphaned_account_snapshot_dirs(&bank_snapshots_dir, &account_snapshot_paths) - { - eprintln!("Failed to clean orphaned account snapshot dirs: {err}"); - exit(1); - } + clean_orphaned_account_snapshot_dirs(&bank_snapshots_dir, &account_snapshot_paths) + .map_err(LoadAndProcessLedgerError::CleanOrphanedAccountSnapshotDirectories)?; let geyser_plugin_active = arg_matches.is_present("geyser_plugin_config"); let (accounts_update_notifier, transaction_notifier) = if geyser_plugin_active { @@ -218,12 +258,8 @@ pub fn load_and_process_ledger( let (confirmed_bank_sender, confirmed_bank_receiver) = unbounded(); drop(confirmed_bank_sender); let geyser_service = - GeyserPluginService::new(confirmed_bank_receiver, &geyser_config_files).unwrap_or_else( - |err| { - eprintln!("Failed to setup Geyser service: {err}"); - exit(1); - }, - ); + GeyserPluginService::new(confirmed_bank_receiver, &geyser_config_files) + .map_err(LoadAndProcessLedgerError::GeyserServiceSetup)?; ( geyser_service.get_accounts_update_notifier(), geyser_service.get_transaction_notifier(), @@ -245,7 +281,8 @@ pub fn load_and_process_ledger( None, // Maybe support this later, though accounts_update_notifier, exit.clone(), - ); + ) + .map_err(LoadAndProcessLedgerError::LoadBankForks)?; let block_verification_method = value_t!( arg_matches, "block_verification_method", @@ -256,6 +293,25 @@ pub fn load_and_process_ledger( "Using: block-verification-method: {}", block_verification_method, ); + match block_verification_method { + BlockVerificationMethod::BlockstoreProcessor => { + info!("no scheduler pool is installed for block verification..."); + } + BlockVerificationMethod::UnifiedScheduler => { + let no_transaction_status_sender = None; + let no_replay_vote_sender = None; + let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + bank_forks + .write() + .unwrap() + .install_scheduler_pool(DefaultSchedulerPool::new_dyn( + process_options.runtime_config.log_messages_bytes_limit, + no_transaction_status_sender, + no_replay_vote_sender, + ignored_prioritization_fee_cache, + )); + } + } let node_id = Arc::new(Keypair::new()); let cluster_info = Arc::new(ClusterInfo::new( @@ -306,10 +362,8 @@ pub fn load_and_process_ledger( let tss_blockstore = if enable_rpc_transaction_history { Arc::new(open_blockstore( blockstore.ledger_path(), + arg_matches, AccessType::PrimaryForMaintenance, - None, - false, - false, )) } else { blockstore.clone() @@ -345,7 +399,8 @@ pub fn load_and_process_ledger( None, // Maybe support this later, though &accounts_background_request_sender, ) - .map(|_| (bank_forks, starting_snapshot_hashes)); + .map(|_| (bank_forks, starting_snapshot_hashes)) + .map_err(LoadAndProcessLedgerError::ProcessBlockstoreFromRoot); exit.store(true, Ordering::Relaxed); accounts_background_service.join().unwrap(); @@ -359,16 +414,19 @@ pub fn load_and_process_ledger( pub fn open_blockstore( ledger_path: &Path, + matches: &ArgMatches, access_type: AccessType, - wal_recovery_mode: Option, - force_update_to_open: bool, - enforce_ulimit_nofile: bool, ) -> Blockstore { + let wal_recovery_mode = matches + .value_of("wal_recovery_mode") + .map(BlockstoreRecoveryMode::from); + let force_update_to_open = matches.is_present("force_update_to_open"); + let enforce_ulimit_nofile = !matches.is_present("ignore_ulimit_nofile_error"); let shred_storage_type = get_shred_storage_type( ledger_path, &format!( - "Shred storage type cannot be inferred for ledger at {ledger_path:?}, \ - using default RocksLevel", + "Shred storage type cannot be inferred for ledger at {ledger_path:?}, using default \ + RocksLevel", ), ); @@ -401,13 +459,13 @@ pub fn open_blockstore( if missing_blockstore && is_secondary { eprintln!( - "Failed to open blockstore at {ledger_path:?}, it \ - is missing at least one critical file: {err:?}" + "Failed to open blockstore at {ledger_path:?}, it is missing at least one \ + critical file: {err:?}" ); } else if missing_column && is_secondary { eprintln!( - "Failed to open blockstore at {ledger_path:?}, it \ - does not have all necessary columns: {err:?}" + "Failed to open blockstore at {ledger_path:?}, it does not have all necessary \ + columns: {err:?}" ); } else { eprintln!("Failed to open blockstore at {ledger_path:?}: {err:?}"); @@ -437,6 +495,20 @@ pub fn open_blockstore( } } +pub fn get_shred_storage_type(ledger_path: &Path, message: &str) -> ShredStorageType { + // TODO: the following shred_storage_type inference must be updated once + // the rocksdb options can be constructed via load_options_file() as the + // value picked by passing None for `max_shred_storage_size` could affect + // the persisted rocksdb options file. + match ShredStorageType::from_ledger_path(ledger_path, None) { + Some(s) => s, + None => { + info!("{}", message); + ShredStorageType::RocksLevel + } + } +} + /// Open blockstore with temporary primary access to allow necessary, /// persistent changes to be made to the blockstore (such as creation of new /// column family(s)). Then, continue opening with `original_access_type` @@ -480,3 +552,13 @@ pub fn open_genesis_config_by(ledger_path: &Path, matches: &ArgMatches<'_>) -> G value_t_or_exit!(matches, "max_genesis_archive_unpacked_size", u64); open_genesis_config(ledger_path, max_genesis_archive_unpacked_size) } + +pub fn get_program_ids(tx: &VersionedTransaction) -> impl Iterator + '_ { + let message = &tx.message; + let account_keys = message.static_account_keys(); + + message + .instructions() + .iter() + .map(|ix| ix.program_id(account_keys)) +} diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 33031e9d14a0a5..47b5cc0024400d 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -35,15 +35,11 @@ use { validator::BlockVerificationMethod, }, solana_cost_model::{cost_model::CostModel, cost_tracker::CostTracker}, - solana_entry::entry::Entry, solana_ledger::{ ancestor_iterator::AncestorIterator, blockstore::{create_new_ledger, Blockstore, PurgeType}, blockstore_db::{self, columns as cf, Column, ColumnName, Database}, - blockstore_options::{ - AccessType, BlockstoreRecoveryMode, LedgerColumnOptions, - BLOCKSTORE_DIRECTORY_ROCKS_FIFO, - }, + blockstore_options::{AccessType, LedgerColumnOptions, BLOCKSTORE_DIRECTORY_ROCKS_FIFO}, blockstore_processor::ProcessOptions, shred::Shred, use_snapshot_archives_at_startup::{self, UseSnapshotArchivesAtStartup}, @@ -76,9 +72,7 @@ use { shred_version::compute_shred_version, stake::{self, state::StakeStateV2}, system_program, - transaction::{ - MessageHash, SanitizedTransaction, SimpleAddressLoader, VersionedTransaction, - }, + transaction::{MessageHash, SanitizedTransaction, SimpleAddressLoader}, }, solana_stake_program::stake_state::{self, PointValue}, solana_vote_program::{ @@ -109,22 +103,6 @@ mod ledger_utils; mod output; mod program; -#[derive(PartialEq, Eq)] -enum LedgerOutputMethod { - Print, - Json, -} - -fn get_program_ids(tx: &VersionedTransaction) -> impl Iterator + '_ { - let message = &tx.message; - let account_keys = message.static_account_keys(); - - message - .instructions() - .iter() - .map(|ix| ix.program_id(account_keys)) -} - fn parse_encoding_format(matches: &ArgMatches<'_>) -> UiAccountEncoding { match matches.value_of("encoding") { Some("jsonParsed") => UiAccountEncoding::JsonParsed, @@ -134,238 +112,6 @@ fn parse_encoding_format(matches: &ArgMatches<'_>) -> UiAccountEncoding { } } -fn output_slot_rewards(blockstore: &Blockstore, slot: Slot, method: &LedgerOutputMethod) { - // Note: rewards are not output in JSON yet - if *method == LedgerOutputMethod::Print { - if let Ok(Some(rewards)) = blockstore.read_rewards(slot) { - if !rewards.is_empty() { - println!(" Rewards:"); - println!( - " {:<44} {:^15} {:<15} {:<20} {:>10}", - "Address", "Type", "Amount", "New Balance", "Commission", - ); - - for reward in rewards { - let sign = if reward.lamports < 0 { "-" } else { "" }; - println!( - " {:<44} {:^15} {}◎{:<14.9} ◎{:<18.9} {}", - reward.pubkey, - if let Some(reward_type) = reward.reward_type { - format!("{reward_type}") - } else { - "-".to_string() - }, - sign, - lamports_to_sol(reward.lamports.unsigned_abs()), - lamports_to_sol(reward.post_balance), - reward - .commission - .map(|commission| format!("{commission:>9}%")) - .unwrap_or_else(|| " -".to_string()) - ); - } - } - } - } -} - -fn output_entry( - blockstore: &Blockstore, - method: &LedgerOutputMethod, - slot: Slot, - entry_index: usize, - entry: Entry, -) { - match method { - LedgerOutputMethod::Print => { - println!( - " Entry {} - num_hashes: {}, hash: {}, transactions: {}", - entry_index, - entry.num_hashes, - entry.hash, - entry.transactions.len() - ); - for (transactions_index, transaction) in entry.transactions.into_iter().enumerate() { - println!(" Transaction {transactions_index}"); - let tx_signature = transaction.signatures[0]; - let tx_status_meta = blockstore - .read_transaction_status((tx_signature, slot)) - .unwrap_or_else(|err| { - eprintln!( - "Failed to read transaction status for {} at slot {}: {}", - transaction.signatures[0], slot, err - ); - None - }) - .map(|meta| meta.into()); - - solana_cli_output::display::println_transaction( - &transaction, - tx_status_meta.as_ref(), - " ", - None, - None, - ); - } - } - LedgerOutputMethod::Json => { - // Note: transaction status is not output in JSON yet - serde_json::to_writer(stdout(), &entry).expect("serialize entry"); - stdout().write_all(b",\n").expect("newline"); - } - } -} - -fn output_slot( - blockstore: &Blockstore, - slot: Slot, - allow_dead_slots: bool, - method: &LedgerOutputMethod, - verbose_level: u64, - all_program_ids: &mut HashMap, -) -> Result<(), String> { - if blockstore.is_dead(slot) { - if allow_dead_slots { - if *method == LedgerOutputMethod::Print { - println!(" Slot is dead"); - } - } else { - return Err("Dead slot".to_string()); - } - } - - let (entries, num_shreds, is_full) = blockstore - .get_slot_entries_with_shred_info(slot, 0, allow_dead_slots) - .map_err(|err| format!("Failed to load entries for slot {slot}: {err:?}"))?; - - if *method == LedgerOutputMethod::Print { - if let Ok(Some(meta)) = blockstore.meta(slot) { - if verbose_level >= 1 { - println!(" {meta:?} is_full: {is_full}"); - } else { - println!( - " num_shreds: {}, parent_slot: {:?}, next_slots: {:?}, num_entries: {}, is_full: {}", - num_shreds, - meta.parent_slot, - meta.next_slots, - entries.len(), - is_full, - ); - } - } - } - - if verbose_level >= 2 { - for (entry_index, entry) in entries.into_iter().enumerate() { - output_entry(blockstore, method, slot, entry_index, entry); - } - - output_slot_rewards(blockstore, slot, method); - } else if verbose_level >= 1 { - let mut transactions = 0; - let mut num_hashes = 0; - let mut program_ids = HashMap::new(); - let blockhash = if let Some(entry) = entries.last() { - entry.hash - } else { - Hash::default() - }; - - for entry in entries { - transactions += entry.transactions.len(); - num_hashes += entry.num_hashes; - for transaction in entry.transactions { - for program_id in get_program_ids(&transaction) { - *program_ids.entry(*program_id).or_insert(0) += 1; - } - } - } - - println!(" Transactions: {transactions}, hashes: {num_hashes}, block_hash: {blockhash}",); - for (pubkey, count) in program_ids.iter() { - *all_program_ids.entry(*pubkey).or_insert(0) += count; - } - println!(" Programs:"); - output_sorted_program_ids(program_ids); - } - Ok(()) -} - -fn output_ledger( - blockstore: Blockstore, - starting_slot: Slot, - ending_slot: Slot, - allow_dead_slots: bool, - method: LedgerOutputMethod, - num_slots: Option, - verbose_level: u64, - only_rooted: bool, -) { - let slot_iterator = blockstore - .slot_meta_iterator(starting_slot) - .unwrap_or_else(|err| { - eprintln!("Failed to load entries starting from slot {starting_slot}: {err:?}"); - exit(1); - }); - - if method == LedgerOutputMethod::Json { - stdout().write_all(b"{\"ledger\":[\n").expect("open array"); - } - - let num_slots = num_slots.unwrap_or(Slot::MAX); - let mut num_printed = 0; - let mut all_program_ids = HashMap::new(); - for (slot, slot_meta) in slot_iterator { - if only_rooted && !blockstore.is_root(slot) { - continue; - } - if slot > ending_slot { - break; - } - - match method { - LedgerOutputMethod::Print => { - println!("Slot {} root?: {}", slot, blockstore.is_root(slot)) - } - LedgerOutputMethod::Json => { - serde_json::to_writer(stdout(), &slot_meta).expect("serialize slot_meta"); - stdout().write_all(b",\n").expect("newline"); - } - } - - if let Err(err) = output_slot( - &blockstore, - slot, - allow_dead_slots, - &method, - verbose_level, - &mut all_program_ids, - ) { - eprintln!("{err}"); - } - num_printed += 1; - if num_printed >= num_slots as usize { - break; - } - } - - if method == LedgerOutputMethod::Json { - stdout().write_all(b"\n]}\n").expect("close array"); - } else { - println!("Summary of Programs:"); - output_sorted_program_ids(all_program_ids); - } -} - -fn output_sorted_program_ids(program_ids: HashMap) { - let mut program_ids_array: Vec<_> = program_ids.into_iter().collect(); - // Sort descending by count of program id - program_ids_array.sort_by(|a, b| b.1.cmp(&a.1)); - for (program_id, count) in program_ids_array.iter() { - println!("{:<44}: {}", program_id.to_string(), count); - } -} - fn output_account( pubkey: &Pubkey, account: &AccountSharedData, @@ -743,17 +489,23 @@ fn analyze_column< db: &Database, name: &str, ) { + let mut key_len: u64 = 0; let mut key_tot: u64 = 0; let mut val_hist = histogram::Histogram::new(); let mut val_tot: u64 = 0; let mut row_hist = histogram::Histogram::new(); - let a = C::key_size() as u64; - for (_x, y) in db.iter::(blockstore_db::IteratorMode::Start).unwrap() { - let b = y.len() as u64; - key_tot += a; - val_hist.increment(b).unwrap(); - val_tot += b; - row_hist.increment(a + b).unwrap(); + for (key, val) in db.iter::(blockstore_db::IteratorMode::Start).unwrap() { + // Key length is fixed, only need to calculate it once + if key_len == 0 { + key_len = C::key(key).len() as u64; + } + let val_len = val.len() as u64; + + key_tot += key_len; + val_hist.increment(val_len).unwrap(); + val_tot += val_len; + + row_hist.increment(key_len + val_len).unwrap(); } let json_result = if val_hist.entries() > 0 { @@ -761,7 +513,7 @@ fn analyze_column< "column":name, "entries":val_hist.entries(), "key_stats":{ - "max":a, + "max":key_len, "total_bytes":key_tot, }, "val_stats":{ @@ -790,7 +542,7 @@ fn analyze_column< "column":name, "entries":val_hist.entries(), "key_stats":{ - "max":a, + "max":key_len, "total_bytes":0, }, "val_stats":{ @@ -874,7 +626,8 @@ fn print_blockstore_file_metadata( for file in live_files { if sst_file_name.is_none() || file.name.eq(sst_file_name.as_ref().unwrap()) { println!( - "[{}] cf_name: {}, level: {}, start_slot: {:?}, end_slot: {:?}, size: {}, num_entries: {}", + "[{}] cf_name: {}, level: {}, start_slot: {:?}, end_slot: {:?}, size: {}, \ + num_entries: {}", file.name, file.column_family_name, file.level, @@ -936,7 +689,8 @@ fn compute_slot_cost(blockstore: &Blockstore, slot: Slot) -> Result<(), String> let result = cost_tracker.try_add(&tx_cost); if result.is_err() { println!( - "Slot: {slot}, CostModel rejected transaction {transaction:?}, reason {result:?}", + "Slot: {slot}, CostModel rejected transaction {transaction:?}, reason \ + {result:?}", ); } for (program_id, _instruction) in transaction.message().program_instructions_iter() @@ -947,7 +701,8 @@ fn compute_slot_cost(blockstore: &Blockstore, slot: Slot) -> Result<(), String> } println!( - "Slot: {slot}, Entries: {num_entries}, Transactions: {num_transactions}, Programs {num_programs}", + "Slot: {slot}, Entries: {num_entries}, Transactions: {num_transactions}, Programs \ + {num_programs}", ); println!(" Programs: {program_ids:?}"); @@ -1005,7 +760,7 @@ fn get_latest_optimistic_slots( if hash_and_timestamp_opt.is_none() { warn!( "Slot {slot} is an ancestor of latest optimistically confirmed slot \ - {latest_slot}, but was not marked as optimistically confirmed in blockstore." + {latest_slot}, but was not marked as optimistically confirmed in blockstore." ); } (slot, hash_and_timestamp_opt, contains_nonvote_tx) @@ -1111,16 +866,22 @@ fn main() { .value_name("MEGABYTES") .validator(is_parsable::) .takes_value(true) - .help("How much memory the accounts index can consume. If this is exceeded, some account index entries will be stored on disk."); + .help( + "How much memory the accounts index can consume. If this is exceeded, some account \ + index entries will be stored on disk.", + ); let disable_disk_index = Arg::with_name("disable_accounts_disk_index") .long("disable-accounts-disk-index") - .help("Disable the disk-based accounts index. It is enabled by default. The entire accounts index will be kept in memory.") + .help( + "Disable the disk-based accounts index. It is enabled by default. The entire accounts \ + index will be kept in memory.", + ) .conflicts_with("accounts_index_memory_limit_mb"); let accountsdb_skip_shrink = Arg::with_name("accounts_db_skip_shrink") .long("accounts-db-skip-shrink") .help( - "Enables faster starting of ledger-tool by skipping shrink. \ - This option is for use during testing.", + "Enables faster starting of ledger-tool by skipping shrink. This option is for use \ + during testing.", ); let accountsdb_verify_refcounts = Arg::with_name("accounts_db_verify_refcounts") .long("accounts-db-verify-refcounts") @@ -1128,27 +889,14 @@ fn main() { "Debug option to scan all AppendVecs and verify account index refcounts prior to clean", ) .hidden(hidden_unless_forced()); - let accounts_db_test_skip_rewrites_but_include_in_bank_hash = Arg::with_name("accounts_db_test_skip_rewrites") - .long("accounts-db-test-skip-rewrites") - .help( - "Debug option to skip rewrites for rent-exempt accounts but still add them in bank delta hash calculation", - ) - .hidden(hidden_unless_forced()); - let accounts_filler_count = Arg::with_name("accounts_filler_count") - .long("accounts-filler-count") - .value_name("COUNT") - .validator(is_parsable::) - .takes_value(true) - .default_value("0") - .help("How many accounts to add to stress the system. Accounts are ignored in operations related to correctness."); - let accounts_filler_size = Arg::with_name("accounts_filler_size") - .long("accounts-filler-size") - .value_name("BYTES") - .validator(is_parsable::) - .takes_value(true) - .default_value("0") - .requires("accounts_filler_count") - .help("Size per filler account in bytes."); + let accounts_db_test_skip_rewrites_but_include_in_bank_hash = + Arg::with_name("accounts_db_test_skip_rewrites") + .long("accounts-db-test-skip-rewrites") + .help( + "Debug option to skip rewrites for rent-exempt accounts but still add them in \ + bank delta hash calculation", + ) + .hidden(hidden_unless_forced()); let account_paths_arg = Arg::with_name("account_paths") .long("accounts") .value_name("PATHS") @@ -1165,9 +913,8 @@ fn main() { .takes_value(true) .multiple(true) .help( - "Persistent accounts-index location. \ - May be specified multiple times. \ - [default: [ledger]/accounts_index]", + "Persistent accounts-index location. May be specified multiple times. [default: \ + [ledger]/accounts_index]", ); let accounts_db_test_hash_calculation_arg = Arg::with_name("accounts_db_test_hash_calculation") .long("accounts-db-test-hash-calculation") @@ -1178,9 +925,9 @@ fn main() { .validator(is_slot) .takes_value(true) .help("Halt processing at the given slot"); - let no_os_memory_stats_reporting_arg = Arg::with_name("no_os_memory_stats_reporting") - .long("no-os-memory-stats-reporting") - .help("Disable reporting of OS memory statistics."); + let os_memory_stats_reporting_arg = Arg::with_name("os_memory_stats_reporting") + .long("os-memory-stats-reporting") + .help("Enable reporting of OS memory statistics."); let accounts_db_skip_initial_hash_calc_arg = Arg::with_name("accounts_db_skip_initial_hash_calculation") .long("accounts-db-skip-initial-hash-calculation") @@ -1196,19 +943,26 @@ fn main() { ) .hidden(hidden_unless_forced()); let halt_at_slot_store_hash_raw_data = Arg::with_name("halt_at_slot_store_hash_raw_data") - .long("halt-at-slot-store-hash-raw-data") - .help("After halting at slot, run an accounts hash calculation and store the raw hash data for debugging.") - .hidden(hidden_unless_forced()); + .long("halt-at-slot-store-hash-raw-data") + .help( + "After halting at slot, run an accounts hash calculation and store the raw hash data \ + for debugging.", + ) + .hidden(hidden_unless_forced()); let verify_index_arg = Arg::with_name("verify_accounts_index") .long("verify-accounts-index") .takes_value(false) .help("For debugging and tests on accounts index."); - let limit_load_slot_count_from_snapshot_arg = Arg::with_name("limit_load_slot_count_from_snapshot") - .long("limit-load-slot-count-from-snapshot") - .value_name("SLOT") - .validator(is_slot) - .takes_value(true) - .help("For debugging and profiling with large snapshots, artificially limit how many slots are loaded from a snapshot."); + let limit_load_slot_count_from_snapshot_arg = + Arg::with_name("limit_load_slot_count_from_snapshot") + .long("limit-load-slot-count-from-snapshot") + .value_name("SLOT") + .validator(is_slot) + .takes_value(true) + .help( + "For debugging and profiling with large snapshots, artificially limit how many \ + slots are loaded from a snapshot.", + ); let hard_forks_arg = Arg::with_name("hard_forks") .long("hard-fork") .value_name("SLOT") @@ -1232,9 +986,8 @@ fn main() { .value_name("NUM_HASHES|\"sleep\"") .takes_value(true) .help( - "How many PoH hashes to roll before emitting the next tick. \ - If \"sleep\", for development \ - sleep for the target tick duration instead of hashing", + "How many PoH hashes to roll before emitting the next tick. If \"sleep\", for \ + development sleep for the target tick duration instead of hashing", ); let snapshot_version_arg = Arg::with_name("snapshot_version") .long("snapshot-version") @@ -1255,36 +1008,38 @@ fn main() { .long(use_snapshot_archives_at_startup::cli::LONG_ARG) .takes_value(true) .possible_values(use_snapshot_archives_at_startup::cli::POSSIBLE_VALUES) - .default_value(use_snapshot_archives_at_startup::cli::default_value()) + .default_value(use_snapshot_archives_at_startup::cli::default_value_for_ledger_tool()) .help(use_snapshot_archives_at_startup::cli::HELP) .long_help(use_snapshot_archives_at_startup::cli::LONG_HELP); let default_max_full_snapshot_archives_to_retain = &DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN.to_string(); - let maximum_full_snapshot_archives_to_retain = Arg::with_name( - "maximum_full_snapshots_to_retain", - ) - .long("maximum-full-snapshots-to-retain") - .alias("maximum-snapshots-to-retain") - .value_name("NUMBER") - .takes_value(true) - .default_value(default_max_full_snapshot_archives_to_retain) - .validator(validate_maximum_full_snapshot_archives_to_retain) - .help( - "The maximum number of full snapshot archives to hold on to when purging older snapshots.", - ); + let maximum_full_snapshot_archives_to_retain = + Arg::with_name("maximum_full_snapshots_to_retain") + .long("maximum-full-snapshots-to-retain") + .alias("maximum-snapshots-to-retain") + .value_name("NUMBER") + .takes_value(true) + .default_value(default_max_full_snapshot_archives_to_retain) + .validator(validate_maximum_full_snapshot_archives_to_retain) + .help( + "The maximum number of full snapshot archives to hold on to when purging older \ + snapshots.", + ); let default_max_incremental_snapshot_archives_to_retain = &DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN.to_string(); - let maximum_incremental_snapshot_archives_to_retain = Arg::with_name( - "maximum_incremental_snapshots_to_retain", - ) - .long("maximum-incremental-snapshots-to-retain") - .value_name("NUMBER") - .takes_value(true) - .default_value(default_max_incremental_snapshot_archives_to_retain) - .validator(validate_maximum_incremental_snapshot_archives_to_retain) - .help("The maximum number of incremental snapshot archives to hold on to when purging older snapshots."); + let maximum_incremental_snapshot_archives_to_retain = + Arg::with_name("maximum_incremental_snapshots_to_retain") + .long("maximum-incremental-snapshots-to-retain") + .value_name("NUMBER") + .takes_value(true) + .default_value(default_max_incremental_snapshot_archives_to_retain) + .validator(validate_maximum_incremental_snapshot_archives_to_retain) + .help( + "The maximum number of incremental snapshot archives to hold on to when purging \ + older snapshots.", + ); let geyser_plugin_args = Arg::with_name("geyser_plugin_config") .long("geyser-plugin-config") @@ -1337,27 +1092,30 @@ fn main() { "tolerate_corrupted_tail_records", "absolute_consistency", "point_in_time", - "skip_any_corrupted_record"]) - .help( - "Mode to recovery the ledger db write ahead log" - ), + "skip_any_corrupted_record", + ]) + .help("Mode to recovery the ledger db write ahead log"), ) .arg( Arg::with_name("force_update_to_open") .long("force-update-to-open") .takes_value(false) .global(true) - .help("Allow commands that would otherwise not alter the \ - blockstore to make necessary updates in order to open it"), + .help( + "Allow commands that would otherwise not alter the blockstore to make \ + necessary updates in order to open it", + ), ) .arg( Arg::with_name("ignore_ulimit_nofile_error") .long("ignore-ulimit-nofile-error") .value_name("FORMAT") .global(true) - .help("Allow opening the blockstore to succeed even if the desired open file \ - descriptor limit cannot be configured. Use with caution as some commands may \ - run fine with a reduced file descriptor limit while others will not"), + .help( + "Allow opening the blockstore to succeed even if the desired open file \ + descriptor limit cannot be configured. Use with caution as some commands may \ + run fine with a reduced file descriptor limit while others will not", + ), ) .arg( Arg::with_name("snapshot_archive_path") @@ -1392,8 +1150,10 @@ fn main() { .global(true) .takes_value(true) .possible_values(&["json", "json-compact"]) - .help("Return information in specified output format, \ - currently only available for bigtable and program subcommands"), + .help( + "Return information in specified output format, currently only available for \ + bigtable and program subcommands", + ), ) .arg( Arg::with_name("verbose") @@ -1407,737 +1167,791 @@ fn main() { .bigtable_subcommand() .subcommand( SubCommand::with_name("print") - .about("Print the ledger") - .arg(&starting_slot_arg) - .arg(&allow_dead_slots_arg) - .arg(&ending_slot_arg) - .arg( - Arg::with_name("num_slots") - .long("num-slots") - .value_name("SLOT") - .validator(is_slot) - .takes_value(true) - .help("Number of slots to print"), - ) - .arg( - Arg::with_name("only_rooted") - .long("only-rooted") - .takes_value(false) - .help("Only print root slots"), - ) + .about("Print the ledger") + .arg(&starting_slot_arg) + .arg(&allow_dead_slots_arg) + .arg(&ending_slot_arg) + .arg( + Arg::with_name("num_slots") + .long("num-slots") + .value_name("SLOT") + .validator(is_slot) + .takes_value(true) + .help("Number of slots to print"), + ) + .arg( + Arg::with_name("only_rooted") + .long("only-rooted") + .takes_value(false) + .help("Only print root slots"), + ), ) .subcommand( SubCommand::with_name("copy") - .about("Copy the ledger") - .arg(&starting_slot_arg) - .arg(&ending_slot_arg) - .arg( - Arg::with_name("target_db") - .long("target-db") - .value_name("DIR") - .takes_value(true) - .help("Target db"), - ) + .about("Copy the ledger") + .arg(&starting_slot_arg) + .arg(&ending_slot_arg) + .arg( + Arg::with_name("target_db") + .long("target-db") + .value_name("DIR") + .takes_value(true) + .help("Target db"), + ), ) .subcommand( SubCommand::with_name("slot") - .about("Print the contents of one or more slots") - .arg( - Arg::with_name("slots") - .index(1) - .value_name("SLOTS") - .validator(is_slot) - .takes_value(true) - .multiple(true) - .required(true) - .help("Slots to print"), - ) - .arg(&allow_dead_slots_arg) + .about("Print the contents of one or more slots") + .arg( + Arg::with_name("slots") + .index(1) + .value_name("SLOTS") + .validator(is_slot) + .takes_value(true) + .multiple(true) + .required(true) + .help("Slots to print"), + ) + .arg(&allow_dead_slots_arg), ) .subcommand( SubCommand::with_name("dead-slots") - .arg(&starting_slot_arg) - .about("Print all the dead slots in the ledger") + .arg(&starting_slot_arg) + .about("Print all the dead slots in the ledger"), ) .subcommand( SubCommand::with_name("duplicate-slots") - .arg(&starting_slot_arg) - .about("Print all the duplicate slots in the ledger") + .arg(&starting_slot_arg) + .about("Print all the duplicate slots in the ledger"), ) .subcommand( SubCommand::with_name("set-dead-slot") - .about("Mark one or more slots dead") - .arg( - Arg::with_name("slots") - .index(1) - .value_name("SLOTS") - .validator(is_slot) - .takes_value(true) - .multiple(true) - .required(true) - .help("Slots to mark dead"), - ) + .about("Mark one or more slots dead") + .arg( + Arg::with_name("slots") + .index(1) + .value_name("SLOTS") + .validator(is_slot) + .takes_value(true) + .multiple(true) + .required(true) + .help("Slots to mark dead"), + ), ) .subcommand( SubCommand::with_name("remove-dead-slot") - .about("Remove the dead flag for a slot") - .arg( - Arg::with_name("slots") - .index(1) - .value_name("SLOTS") - .validator(is_slot) - .takes_value(true) - .multiple(true) - .required(true) - .help("Slots to mark as not dead"), - ) + .about("Remove the dead flag for a slot") + .arg( + Arg::with_name("slots") + .index(1) + .value_name("SLOTS") + .validator(is_slot) + .takes_value(true) + .multiple(true) + .required(true) + .help("Slots to mark as not dead"), + ), ) .subcommand( SubCommand::with_name("genesis") - .about("Prints the ledger's genesis config") - .arg(&max_genesis_archive_unpacked_size_arg) - .arg( - Arg::with_name("accounts") - .long("accounts") - .takes_value(false) - .help("Print the ledger's genesis accounts"), - ) - .arg( - Arg::with_name("no_account_data") - .long("no-account-data") - .takes_value(false) - .requires("accounts") - .help("Do not print account data when printing account contents."), - ) - .arg(&accounts_data_encoding_arg) + .about("Prints the ledger's genesis config") + .arg(&max_genesis_archive_unpacked_size_arg) + .arg( + Arg::with_name("accounts") + .long("accounts") + .takes_value(false) + .help("Print the ledger's genesis accounts"), + ) + .arg( + Arg::with_name("no_account_data") + .long("no-account-data") + .takes_value(false) + .requires("accounts") + .help("Do not print account data when printing account contents."), + ) + .arg(&accounts_data_encoding_arg), ) .subcommand( SubCommand::with_name("genesis-hash") - .about("Prints the ledger's genesis hash") - .arg(&max_genesis_archive_unpacked_size_arg) + .about("Prints the ledger's genesis hash") + .arg(&max_genesis_archive_unpacked_size_arg), ) .subcommand( SubCommand::with_name("parse_full_frozen") - .about("Parses log for information about critical events about \ - ancestors of the given `ending_slot`") - .arg(&starting_slot_arg) - .arg(&ending_slot_arg) - .arg( - Arg::with_name("log_path") - .long("log-path") - .value_name("PATH") - .takes_value(true) - .help("path to log file to parse"), - ) + .about( + "Parses log for information about critical events about ancestors of the \ + given `ending_slot`", + ) + .arg(&starting_slot_arg) + .arg(&ending_slot_arg) + .arg( + Arg::with_name("log_path") + .long("log-path") + .value_name("PATH") + .takes_value(true) + .help("path to log file to parse"), + ), ) .subcommand( SubCommand::with_name("modify-genesis") - .about("Modifies genesis parameters") - .arg(&max_genesis_archive_unpacked_size_arg) - .arg(&hashes_per_tick) - .arg( - Arg::with_name("cluster_type") - .long("cluster-type") - .possible_values(&ClusterType::STRINGS) - .takes_value(true) - .help( - "Selects the features that will be enabled for the cluster" - ), - ) - .arg( - Arg::with_name("output_directory") - .index(1) - .value_name("DIR") - .takes_value(true) - .help("Output directory for the modified genesis config"), - ) + .about("Modifies genesis parameters") + .arg(&max_genesis_archive_unpacked_size_arg) + .arg(&hashes_per_tick) + .arg( + Arg::with_name("cluster_type") + .long("cluster-type") + .possible_values(&ClusterType::STRINGS) + .takes_value(true) + .help("Selects the features that will be enabled for the cluster"), + ) + .arg( + Arg::with_name("output_directory") + .index(1) + .value_name("DIR") + .takes_value(true) + .help("Output directory for the modified genesis config"), + ), ) .subcommand( SubCommand::with_name("shred-version") - .about("Prints the ledger's shred hash") - .arg(&hard_forks_arg) - .arg(&max_genesis_archive_unpacked_size_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .about("Prints the ledger's shred hash") + .arg(&hard_forks_arg) + .arg(&max_genesis_archive_unpacked_size_arg) + .arg(&accounts_index_bins) + .arg(&accounts_index_limit) + .arg(&disable_disk_index) + .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash), ) .subcommand( SubCommand::with_name("shred-meta") - .about("Prints raw shred metadata") - .arg(&starting_slot_arg) - .arg(&ending_slot_arg) + .about("Prints raw shred metadata") + .arg(&starting_slot_arg) + .arg(&ending_slot_arg), ) .subcommand( SubCommand::with_name("bank-hash") - .about("Prints the hash of the working bank after reading the ledger") - .arg(&max_genesis_archive_unpacked_size_arg) - .arg(&halt_at_slot_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .about("Prints the hash of the working bank after reading the ledger") + .arg(&max_genesis_archive_unpacked_size_arg) + .arg(&halt_at_slot_arg) + .arg(&accounts_index_bins) + .arg(&accounts_index_limit) + .arg(&disable_disk_index) + .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash), ) .subcommand( SubCommand::with_name("bounds") - .about( - "Print lowest and highest non-empty slots. \ - Note that there may be empty slots within the bounds", - ) - .arg( - Arg::with_name("all") - .long("all") - .takes_value(false) - .required(false) - .help("Additionally print all the non-empty slots within the bounds"), - ) + .about( + "Print lowest and highest non-empty slots. Note that there may be empty slots \ + within the bounds", + ) + .arg( + Arg::with_name("all") + .long("all") + .takes_value(false) + .required(false) + .help("Additionally print all the non-empty slots within the bounds"), + ), ) .subcommand( SubCommand::with_name("json") - .about("Print the ledger in JSON format") - .arg(&starting_slot_arg) - .arg(&allow_dead_slots_arg) + .about("Print the ledger in JSON format") + .arg(&starting_slot_arg) + .arg(&allow_dead_slots_arg), ) .subcommand( SubCommand::with_name("verify") - .about("Verify the ledger") - .arg(&no_snapshot_arg) - .arg(&account_paths_arg) - .arg(&accounts_hash_cache_path_arg) - .arg(&accounts_index_path_arg) - .arg(&halt_at_slot_arg) - .arg(&limit_load_slot_count_from_snapshot_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_skip_shrink) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) - .arg(&accounts_filler_count) - .arg(&accounts_filler_size) - .arg(&verify_index_arg) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&ancient_append_vecs) - .arg(&halt_at_slot_store_hash_raw_data) - .arg(&hard_forks_arg) - .arg(&accounts_db_test_hash_calculation_arg) - .arg(&no_os_memory_stats_reporting_arg) - .arg(&allow_dead_slots_arg) - .arg(&max_genesis_archive_unpacked_size_arg) - .arg(&debug_key_arg) - .arg(&geyser_plugin_args) - .arg(&use_snapshot_archives_at_startup) - .arg( - Arg::with_name("skip_poh_verify") - .long("skip-poh-verify") - .takes_value(false) - .help( - "Deprecated, please use --skip-verification.\n\ - Skip ledger PoH and transaction verification." - ), - ) - .arg( - Arg::with_name("skip_verification") - .long("skip-verification") - .takes_value(false) - .help("Skip ledger PoH and transaction verification."), - ) - .arg( - Arg::with_name("enable_rpc_transaction_history") - .long("enable-rpc-transaction-history") - .takes_value(false) - .help("Store transaction info for processed slots into local ledger"), - ) - .arg( - Arg::with_name("run_final_hash_calc") - .long("run-final-accounts-hash-calculation") - .takes_value(false) - .help("After 'verify' completes, run a final accounts hash calculation. Final hash calculation could race with accounts background service tasks and assert."), - ) - .arg( - Arg::with_name("partitioned_epoch_rewards_compare_calculation") - .long("partitioned-epoch-rewards-compare-calculation") - .takes_value(false) - .help("Do normal epoch rewards distribution, but also calculate rewards using the partitioned rewards code path and compare the resulting vote and stake accounts") - .hidden(hidden_unless_forced()) - ) - .arg( - Arg::with_name("partitioned_epoch_rewards_force_enable_single_slot") - .long("partitioned-epoch-rewards-force-enable-single-slot") - .takes_value(false) - .help("Force the partitioned rewards distribution, but distribute all rewards in the first slot in the epoch. This should match consensus with the normal rewards distribution.") - .conflicts_with("partitioned_epoch_rewards_compare_calculation") - .hidden(hidden_unless_forced()) - ) - .arg( - Arg::with_name("print_accounts_stats") - .long("print-accounts-stats") - .takes_value(false) - .help("After verifying the ledger, print some information about the account stores"), - ) - .arg( - Arg::with_name("write_bank_file") - .long("write-bank-file") - .takes_value(false) - .help("After verifying the ledger, write a file that contains the information \ - that went into computing the completed bank's bank hash. The file will be \ - written within /bank_hash_details/"), - ) - ).subcommand( - SubCommand::with_name("graph") - .about("Create a Graphviz rendering of the ledger") - .arg(&no_snapshot_arg) - .arg(&account_paths_arg) - .arg(&accounts_hash_cache_path_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&halt_at_slot_arg) - .arg(&hard_forks_arg) - .arg(&max_genesis_archive_unpacked_size_arg) - .arg(&use_snapshot_archives_at_startup) - .arg( - Arg::with_name("include_all_votes") - .long("include-all-votes") - .help("Include all votes in the graph"), - ) - .arg( - Arg::with_name("graph_filename") - .index(1) - .value_name("FILENAME") - .takes_value(true) - .help("Output file"), - ) - .arg( - Arg::with_name("vote_account_mode") - .long("vote-account-mode") - .takes_value(true) - .value_name("MODE") - .default_value(default_graph_vote_account_mode.as_ref()) - .possible_values(GraphVoteAccountMode::ALL_MODE_STRINGS) - .help("Specify if and how to graph vote accounts. Enabling will incur significant rendering overhead, especially `with-history`") - ) - ).subcommand( - SubCommand::with_name("create-snapshot") - .about("Create a new ledger snapshot") - .arg(&no_snapshot_arg) - .arg(&account_paths_arg) - .arg(&accounts_hash_cache_path_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&accountsdb_skip_shrink) - .arg(&ancient_append_vecs) - .arg(&hard_forks_arg) - .arg(&max_genesis_archive_unpacked_size_arg) - .arg(&snapshot_version_arg) - .arg(&maximum_full_snapshot_archives_to_retain) - .arg(&maximum_incremental_snapshot_archives_to_retain) - .arg(&geyser_plugin_args) - .arg(&use_snapshot_archives_at_startup) - .arg( - Arg::with_name("snapshot_slot") - .index(1) - .value_name("SLOT") - .validator(|value| { - if value.parse::().is_ok() - || value == "ROOT" - { - Ok(()) - } else { - Err(format!( - "Unable to parse as a number or the keyword ROOT, provided: {value}" - )) - } - }) - .takes_value(true) - .help("Slot at which to create the snapshot; accepts keyword ROOT for the highest root"), - ) - .arg( - Arg::with_name("output_directory") - .index(2) - .value_name("DIR") - .takes_value(true) - .help("Output directory for the snapshot [default: --snapshot-archive-path if present else --ledger directory]"), - ) - .arg( - Arg::with_name("warp_slot") - .required(false) - .long("warp-slot") - .takes_value(true) - .value_name("WARP_SLOT") - .validator(is_slot) - .help("After loading the snapshot slot warp the ledger to WARP_SLOT, \ - which could be a slot in a galaxy far far away"), - ) - .arg( - Arg::with_name("faucet_lamports") - .short("t") - .long("faucet-lamports") - .value_name("LAMPORTS") - .takes_value(true) - .requires("faucet_pubkey") - .help("Number of lamports to assign to the faucet"), - ) - .arg( - Arg::with_name("faucet_pubkey") - .short("m") - .long("faucet-pubkey") - .value_name("PUBKEY") - .takes_value(true) - .validator(is_pubkey_or_keypair) - .requires("faucet_lamports") - .help("Path to file containing the faucet's pubkey"), - ) - .arg( - Arg::with_name("bootstrap_validator") - .short("b") - .long("bootstrap-validator") - .value_name("IDENTITY_PUBKEY VOTE_PUBKEY STAKE_PUBKEY") - .takes_value(true) - .validator(is_pubkey_or_keypair) - .number_of_values(3) - .multiple(true) - .help("The bootstrap validator's identity, vote and stake pubkeys"), - ) - .arg( - Arg::with_name("bootstrap_stake_authorized_pubkey") - .long("bootstrap-stake-authorized-pubkey") - .value_name("BOOTSTRAP STAKE AUTHORIZED PUBKEY") - .takes_value(true) - .validator(is_pubkey_or_keypair) - .help( - "Path to file containing the pubkey authorized to manage the bootstrap \ - validator's stake [default: --bootstrap-validator IDENTITY_PUBKEY]", - ), - ) - .arg( - Arg::with_name("bootstrap_validator_lamports") - .long("bootstrap-validator-lamports") - .value_name("LAMPORTS") - .takes_value(true) - .default_value(default_bootstrap_validator_lamports) - .help("Number of lamports to assign to the bootstrap validator"), - ) - .arg( - Arg::with_name("bootstrap_validator_stake_lamports") - .long("bootstrap-validator-stake-lamports") - .value_name("LAMPORTS") - .takes_value(true) - .default_value(default_bootstrap_validator_stake_lamports) - .help("Number of lamports to assign to the bootstrap validator's stake account"), - ) - .arg( - Arg::with_name("rent_burn_percentage") - .long("rent-burn-percentage") - .value_name("NUMBER") - .takes_value(true) - .help("Adjust percentage of collected rent to burn") - .validator(is_valid_percentage), - ) - .arg(&hashes_per_tick) - .arg( - Arg::with_name("accounts_to_remove") - .required(false) - .long("remove-account") - .takes_value(true) - .value_name("PUBKEY") - .validator(is_pubkey) - .multiple(true) - .help("List of accounts to remove while creating the snapshot"), - ) - .arg( - Arg::with_name("feature_gates_to_deactivate") - .required(false) - .long("deactivate-feature-gate") - .takes_value(true) - .value_name("PUBKEY") - .validator(is_pubkey) - .multiple(true) - .help("List of feature gates to deactivate while creating the snapshot") - ) - .arg( - Arg::with_name("vote_accounts_to_destake") - .required(false) - .long("destake-vote-account") - .takes_value(true) - .value_name("PUBKEY") - .validator(is_pubkey) - .multiple(true) - .help("List of validator vote accounts to destake") - ) - .arg( - Arg::with_name("remove_stake_accounts") - .required(false) - .long("remove-stake-accounts") - .takes_value(false) - .help("Remove all existing stake accounts from the new snapshot") - ) - .arg( - Arg::with_name("incremental") - .long("incremental") - .takes_value(false) - .help("Create an incremental snapshot instead of a full snapshot. This requires \ - that the ledger is loaded from a full snapshot, which will be used as the \ - base for the incremental snapshot.") - .conflicts_with("no_snapshot") - ) - .arg( - Arg::with_name("minimized") - .long("minimized") - .takes_value(false) - .help("Create a minimized snapshot instead of a full snapshot. This snapshot \ - will only include information needed to replay the ledger from the \ - snapshot slot to the ending slot.") - .conflicts_with("incremental") - .requires("ending_slot") - ) - .arg( - Arg::with_name("ending_slot") - .long("ending-slot") - .takes_value(true) - .value_name("ENDING_SLOT") - .help("Ending slot for minimized snapshot creation") - ) - .arg( - Arg::with_name("snapshot_archive_format") - .long("snapshot-archive-format") - .possible_values(SUPPORTED_ARCHIVE_COMPRESSION) - .default_value(DEFAULT_ARCHIVE_COMPRESSION) - .value_name("ARCHIVE_TYPE") - .takes_value(true) - .help("Snapshot archive format to use.") - .conflicts_with("no_snapshot") - ) - ).subcommand( - SubCommand::with_name("accounts") - .about("Print account stats and contents after processing the ledger") - .arg(&no_snapshot_arg) - .arg(&account_paths_arg) - .arg(&accounts_hash_cache_path_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&halt_at_slot_arg) - .arg(&hard_forks_arg) - .arg(&geyser_plugin_args) - .arg(&accounts_data_encoding_arg) - .arg(&use_snapshot_archives_at_startup) - .arg( - Arg::with_name("include_sysvars") - .long("include-sysvars") - .takes_value(false) - .help("Include sysvars too"), - ) - .arg( - Arg::with_name("no_account_contents") - .long("no-account-contents") - .takes_value(false) - .help("Do not print contents of each account, which is very slow with lots of accounts."), - ) - .arg(Arg::with_name("no_account_data") - .long("no-account-data") - .takes_value(false) - .help("Do not print account data when printing account contents."), - ) - .arg(&max_genesis_archive_unpacked_size_arg) - ).subcommand( - SubCommand::with_name("capitalization") - .about("Print capitalization (aka, total supply) while checksumming it") - .arg(&no_snapshot_arg) - .arg(&account_paths_arg) - .arg(&accounts_hash_cache_path_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&halt_at_slot_arg) - .arg(&hard_forks_arg) - .arg(&max_genesis_archive_unpacked_size_arg) - .arg(&geyser_plugin_args) - .arg(&use_snapshot_archives_at_startup) - .arg( - Arg::with_name("warp_epoch") - .required(false) - .long("warp-epoch") - .takes_value(true) - .value_name("WARP_EPOCH") - .help("After loading the snapshot warp the ledger to WARP_EPOCH, \ - which could be an epoch in a galaxy far far away"), - ) - .arg( - Arg::with_name("inflation") - .required(false) - .long("inflation") - .takes_value(true) - .possible_values(&["pico", "full", "none"]) - .help("Overwrite inflation when warping"), - ) - .arg( - Arg::with_name("enable_credits_auto_rewind") - .required(false) - .long("enable-credits-auto-rewind") - .takes_value(false) - .help("Enable credits auto rewind"), - ) - .arg( - Arg::with_name("recalculate_capitalization") - .required(false) - .long("recalculate-capitalization") - .takes_value(false) - .help("Recalculate capitalization before warping; circumvents \ - bank's out-of-sync capitalization"), - ) - .arg( - Arg::with_name("csv_filename") - .long("csv-filename") - .value_name("FILENAME") - .takes_value(true) - .help("Output file in the csv format"), - ) - ).subcommand( - SubCommand::with_name("purge") - .about("Delete a range of slots from the ledger") - .arg( - Arg::with_name("start_slot") - .index(1) - .value_name("SLOT") - .takes_value(true) - .required(true) - .help("Start slot to purge from (inclusive)"), - ) - .arg( - Arg::with_name("end_slot") - .index(2) - .value_name("SLOT") - .help("Ending slot to stop purging (inclusive) \ - [default: the highest slot in the ledger]"), - ) - .arg( - Arg::with_name("batch_size") - .long("batch-size") - .value_name("NUM") - .takes_value(true) - .default_value("1000") - .help("Removes at most BATCH_SIZE slots while purging in loop"), - ) - .arg( - Arg::with_name("no_compaction") - .long("no-compaction") - .required(false) - .takes_value(false) - .help("--no-compaction is deprecated, ledger compaction \ - after purge is disabled by default") - .conflicts_with("enable_compaction") - .hidden(hidden_unless_forced()) - ) - .arg( - Arg::with_name("enable_compaction") - .long("enable-compaction") - .required(false) - .takes_value(false) - .help("Perform ledger compaction after purge. Compaction \ - will optimize storage space, but may take a long \ - time to complete.") - .conflicts_with("no_compaction") - ) - .arg( - Arg::with_name("dead_slots_only") - .long("dead-slots-only") - .required(false) - .takes_value(false) - .help("Limit purging to dead slots only") - ) - ) - .subcommand( - SubCommand::with_name("list-roots") - .about("Output up to last root hashes and their \ - heights starting at the given block height") - .arg( - Arg::with_name("max_height") - .long("max-height") - .value_name("NUM") - .takes_value(true) - .help("Maximum block height") - ) - .arg( - Arg::with_name("start_root") - .long("start-root") - .value_name("NUM") - .takes_value(true) - .help("First root to start searching from") - ) - .arg( - Arg::with_name("slot_list") - .long("slot-list") - .value_name("FILENAME") - .required(false) - .takes_value(true) - .help("The location of the output YAML file. A list of \ - rollback slot heights and hashes will be written to the file") - ) - .arg( - Arg::with_name("num_roots") - .long("num-roots") - .value_name("NUM") - .takes_value(true) - .default_value(DEFAULT_ROOT_COUNT) - .required(false) - .help("Number of roots in the output"), - ) - ) - .subcommand( - SubCommand::with_name("latest-optimistic-slots") - .about("Output up to the most recent optimistic \ - slots with their hashes and timestamps.") + .about("Verify the ledger") + .arg(&no_snapshot_arg) + .arg(&account_paths_arg) + .arg(&accounts_hash_cache_path_arg) + .arg(&accounts_index_path_arg) + .arg(&halt_at_slot_arg) + .arg(&limit_load_slot_count_from_snapshot_arg) + .arg(&accounts_index_bins) + .arg(&accounts_index_limit) + .arg(&disable_disk_index) + .arg(&accountsdb_skip_shrink) + .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .arg(&verify_index_arg) + .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&ancient_append_vecs) + .arg(&halt_at_slot_store_hash_raw_data) + .arg(&hard_forks_arg) + .arg(&accounts_db_test_hash_calculation_arg) + .arg(&os_memory_stats_reporting_arg) + .arg(&allow_dead_slots_arg) + .arg(&max_genesis_archive_unpacked_size_arg) + .arg(&debug_key_arg) + .arg(&geyser_plugin_args) + .arg(&use_snapshot_archives_at_startup) .arg( - Arg::with_name("num_slots") - .long("num-slots") - .value_name("NUM") - .takes_value(true) - .default_value(DEFAULT_LATEST_OPTIMISTIC_SLOTS_COUNT) - .required(false) - .help("Number of slots in the output"), + Arg::with_name("skip_poh_verify") + .long("skip-poh-verify") + .takes_value(false) + .help( + "Deprecated, please use --skip-verification. Skip ledger PoH and \ + transaction verification.", + ), ) .arg( - Arg::with_name("exclude_vote_only_slots") - .long("exclude-vote-only-slots") + Arg::with_name("skip_verification") + .long("skip-verification") + .takes_value(false) + .help("Skip ledger PoH and transaction verification."), + ) + .arg( + Arg::with_name("enable_rpc_transaction_history") + .long("enable-rpc-transaction-history") + .takes_value(false) + .help("Store transaction info for processed slots into local ledger"), + ) + .arg( + Arg::with_name("run_final_hash_calc") + .long("run-final-accounts-hash-calculation") + .takes_value(false) + .help( + "After 'verify' completes, run a final accounts hash calculation. \ + Final hash calculation could race with accounts background service \ + tasks and assert.", + ), + ) + .arg( + Arg::with_name("partitioned_epoch_rewards_compare_calculation") + .long("partitioned-epoch-rewards-compare-calculation") + .takes_value(false) + .help( + "Do normal epoch rewards distribution, but also calculate rewards \ + using the partitioned rewards code path and compare the resulting \ + vote and stake accounts", + ) + .hidden(hidden_unless_forced()), + ) + .arg( + Arg::with_name("partitioned_epoch_rewards_force_enable_single_slot") + .long("partitioned-epoch-rewards-force-enable-single-slot") + .takes_value(false) + .help( + "Force the partitioned rewards distribution, but distribute all \ + rewards in the first slot in the epoch. This should match consensus \ + with the normal rewards distribution.", + ) + .conflicts_with("partitioned_epoch_rewards_compare_calculation") + .hidden(hidden_unless_forced()), + ) + .arg( + Arg::with_name("print_accounts_stats") + .long("print-accounts-stats") + .takes_value(false) + .help( + "After verifying the ledger, print some information about the account \ + stores", + ), + ) + .arg( + Arg::with_name("write_bank_file") + .long("write-bank-file") + .takes_value(false) + .help( + "After verifying the ledger, write a file that contains the \ + information that went into computing the completed bank's bank hash. \ + The file will be written within /bank_hash_details/", + ), + ), + ) + .subcommand( + SubCommand::with_name("graph") + .about("Create a Graphviz rendering of the ledger") + .arg(&no_snapshot_arg) + .arg(&account_paths_arg) + .arg(&accounts_hash_cache_path_arg) + .arg(&accounts_index_bins) + .arg(&accounts_index_limit) + .arg(&disable_disk_index) + .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&halt_at_slot_arg) + .arg(&hard_forks_arg) + .arg(&max_genesis_archive_unpacked_size_arg) + .arg(&use_snapshot_archives_at_startup) + .arg( + Arg::with_name("include_all_votes") + .long("include-all-votes") + .help("Include all votes in the graph"), + ) + .arg( + Arg::with_name("graph_filename") + .index(1) + .value_name("FILENAME") + .takes_value(true) + .help("Output file"), + ) + .arg( + Arg::with_name("vote_account_mode") + .long("vote-account-mode") + .takes_value(true) + .value_name("MODE") + .default_value(default_graph_vote_account_mode.as_ref()) + .possible_values(GraphVoteAccountMode::ALL_MODE_STRINGS) + .help( + "Specify if and how to graph vote accounts. Enabling will incur \ + significant rendering overhead, especially `with-history`", + ), + ), + ) + .subcommand( + SubCommand::with_name("create-snapshot") + .about("Create a new ledger snapshot") + .arg(&no_snapshot_arg) + .arg(&account_paths_arg) + .arg(&accounts_hash_cache_path_arg) + .arg(&accounts_index_bins) + .arg(&accounts_index_limit) + .arg(&disable_disk_index) + .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&accountsdb_skip_shrink) + .arg(&ancient_append_vecs) + .arg(&hard_forks_arg) + .arg(&max_genesis_archive_unpacked_size_arg) + .arg(&snapshot_version_arg) + .arg(&maximum_full_snapshot_archives_to_retain) + .arg(&maximum_incremental_snapshot_archives_to_retain) + .arg(&geyser_plugin_args) + .arg(&use_snapshot_archives_at_startup) + .arg( + Arg::with_name("snapshot_slot") + .index(1) + .value_name("SLOT") + .validator(|value| { + if value.parse::().is_ok() || value == "ROOT" { + Ok(()) + } else { + Err(format!( + "Unable to parse as a number or the keyword ROOT, provided: \ + {value}" + )) + } + }) + .takes_value(true) + .help( + "Slot at which to create the snapshot; accepts keyword ROOT for the \ + highest root", + ), + ) + .arg( + Arg::with_name("output_directory") + .index(2) + .value_name("DIR") + .takes_value(true) + .help( + "Output directory for the snapshot \ + [default: --snapshot-archive-path if present else --ledger directory]", + ), + ) + .arg( + Arg::with_name("warp_slot") .required(false) - .help("Exclude slots that contain only votes from output"), + .long("warp-slot") + .takes_value(true) + .value_name("WARP_SLOT") + .validator(is_slot) + .help( + "After loading the snapshot slot warp the ledger to WARP_SLOT, which \ + could be a slot in a galaxy far far away", + ), + ) + .arg( + Arg::with_name("faucet_lamports") + .short("t") + .long("faucet-lamports") + .value_name("LAMPORTS") + .takes_value(true) + .requires("faucet_pubkey") + .help("Number of lamports to assign to the faucet"), + ) + .arg( + Arg::with_name("faucet_pubkey") + .short("m") + .long("faucet-pubkey") + .value_name("PUBKEY") + .takes_value(true) + .validator(is_pubkey_or_keypair) + .requires("faucet_lamports") + .help("Path to file containing the faucet's pubkey"), + ) + .arg( + Arg::with_name("bootstrap_validator") + .short("b") + .long("bootstrap-validator") + .value_name("IDENTITY_PUBKEY VOTE_PUBKEY STAKE_PUBKEY") + .takes_value(true) + .validator(is_pubkey_or_keypair) + .number_of_values(3) + .multiple(true) + .help("The bootstrap validator's identity, vote and stake pubkeys"), ) + .arg( + Arg::with_name("bootstrap_stake_authorized_pubkey") + .long("bootstrap-stake-authorized-pubkey") + .value_name("BOOTSTRAP STAKE AUTHORIZED PUBKEY") + .takes_value(true) + .validator(is_pubkey_or_keypair) + .help( + "Path to file containing the pubkey authorized to manage the \ + bootstrap validator's stake + [default: --bootstrap-validator IDENTITY_PUBKEY]", + ), + ) + .arg( + Arg::with_name("bootstrap_validator_lamports") + .long("bootstrap-validator-lamports") + .value_name("LAMPORTS") + .takes_value(true) + .default_value(default_bootstrap_validator_lamports) + .help("Number of lamports to assign to the bootstrap validator"), + ) + .arg( + Arg::with_name("bootstrap_validator_stake_lamports") + .long("bootstrap-validator-stake-lamports") + .value_name("LAMPORTS") + .takes_value(true) + .default_value(default_bootstrap_validator_stake_lamports) + .help( + "Number of lamports to assign to the bootstrap validator's stake \ + account", + ), + ) + .arg( + Arg::with_name("rent_burn_percentage") + .long("rent-burn-percentage") + .value_name("NUMBER") + .takes_value(true) + .help("Adjust percentage of collected rent to burn") + .validator(is_valid_percentage), + ) + .arg(&hashes_per_tick) + .arg( + Arg::with_name("accounts_to_remove") + .required(false) + .long("remove-account") + .takes_value(true) + .value_name("PUBKEY") + .validator(is_pubkey) + .multiple(true) + .help("List of accounts to remove while creating the snapshot"), + ) + .arg( + Arg::with_name("feature_gates_to_deactivate") + .required(false) + .long("deactivate-feature-gate") + .takes_value(true) + .value_name("PUBKEY") + .validator(is_pubkey) + .multiple(true) + .help("List of feature gates to deactivate while creating the snapshot"), + ) + .arg( + Arg::with_name("vote_accounts_to_destake") + .required(false) + .long("destake-vote-account") + .takes_value(true) + .value_name("PUBKEY") + .validator(is_pubkey) + .multiple(true) + .help("List of validator vote accounts to destake"), + ) + .arg( + Arg::with_name("remove_stake_accounts") + .required(false) + .long("remove-stake-accounts") + .takes_value(false) + .help("Remove all existing stake accounts from the new snapshot"), + ) + .arg( + Arg::with_name("incremental") + .long("incremental") + .takes_value(false) + .help( + "Create an incremental snapshot instead of a full snapshot. This \ + requires that the ledger is loaded from a full snapshot, which will \ + be used as the base for the incremental snapshot.", + ) + .conflicts_with("no_snapshot"), + ) + .arg( + Arg::with_name("minimized") + .long("minimized") + .takes_value(false) + .help( + "Create a minimized snapshot instead of a full snapshot. This \ + snapshot will only include information needed to replay the ledger \ + from the snapshot slot to the ending slot.", + ) + .conflicts_with("incremental") + .requires("ending_slot"), + ) + .arg( + Arg::with_name("ending_slot") + .long("ending-slot") + .takes_value(true) + .value_name("ENDING_SLOT") + .help("Ending slot for minimized snapshot creation"), + ) + .arg( + Arg::with_name("snapshot_archive_format") + .long("snapshot-archive-format") + .possible_values(SUPPORTED_ARCHIVE_COMPRESSION) + .default_value(DEFAULT_ARCHIVE_COMPRESSION) + .value_name("ARCHIVE_TYPE") + .takes_value(true) + .help("Snapshot archive format to use.") + .conflicts_with("no_snapshot"), + ), + ) + .subcommand( + SubCommand::with_name("accounts") + .about("Print account stats and contents after processing the ledger") + .arg(&no_snapshot_arg) + .arg(&account_paths_arg) + .arg(&accounts_hash_cache_path_arg) + .arg(&accounts_index_bins) + .arg(&accounts_index_limit) + .arg(&disable_disk_index) + .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&halt_at_slot_arg) + .arg(&hard_forks_arg) + .arg(&geyser_plugin_args) + .arg(&accounts_data_encoding_arg) + .arg(&use_snapshot_archives_at_startup) + .arg( + Arg::with_name("include_sysvars") + .long("include-sysvars") + .takes_value(false) + .help("Include sysvars too"), + ) + .arg( + Arg::with_name("no_account_contents") + .long("no-account-contents") + .takes_value(false) + .help( + "Do not print contents of each account, which is very slow with lots \ + of accounts.", + ), + ) + .arg( + Arg::with_name("no_account_data") + .long("no-account-data") + .takes_value(false) + .help("Do not print account data when printing account contents."), + ) + .arg(&max_genesis_archive_unpacked_size_arg), + ) + .subcommand( + SubCommand::with_name("capitalization") + .about("Print capitalization (aka, total supply) while checksumming it") + .arg(&no_snapshot_arg) + .arg(&account_paths_arg) + .arg(&accounts_hash_cache_path_arg) + .arg(&accounts_index_bins) + .arg(&accounts_index_limit) + .arg(&disable_disk_index) + .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&halt_at_slot_arg) + .arg(&hard_forks_arg) + .arg(&max_genesis_archive_unpacked_size_arg) + .arg(&geyser_plugin_args) + .arg(&use_snapshot_archives_at_startup) + .arg( + Arg::with_name("warp_epoch") + .required(false) + .long("warp-epoch") + .takes_value(true) + .value_name("WARP_EPOCH") + .help( + "After loading the snapshot warp the ledger to WARP_EPOCH, which \ + could be an epoch in a galaxy far far away", + ), + ) + .arg( + Arg::with_name("inflation") + .required(false) + .long("inflation") + .takes_value(true) + .possible_values(&["pico", "full", "none"]) + .help("Overwrite inflation when warping"), + ) + .arg( + Arg::with_name("enable_credits_auto_rewind") + .required(false) + .long("enable-credits-auto-rewind") + .takes_value(false) + .help("Enable credits auto rewind"), + ) + .arg( + Arg::with_name("recalculate_capitalization") + .required(false) + .long("recalculate-capitalization") + .takes_value(false) + .help( + "Recalculate capitalization before warping; circumvents bank's \ + out-of-sync capitalization", + ), + ) + .arg( + Arg::with_name("csv_filename") + .long("csv-filename") + .value_name("FILENAME") + .takes_value(true) + .help("Output file in the csv format"), + ), + ) + .subcommand( + SubCommand::with_name("purge") + .about("Delete a range of slots from the ledger") + .arg( + Arg::with_name("start_slot") + .index(1) + .value_name("SLOT") + .takes_value(true) + .required(true) + .help("Start slot to purge from (inclusive)"), + ) + .arg(Arg::with_name("end_slot").index(2).value_name("SLOT").help( + "Ending slot to stop purging (inclusive) \ + [default: the highest slot in the ledger]", + )) + .arg( + Arg::with_name("batch_size") + .long("batch-size") + .value_name("NUM") + .takes_value(true) + .default_value("1000") + .help("Removes at most BATCH_SIZE slots while purging in loop"), + ) + .arg( + Arg::with_name("no_compaction") + .long("no-compaction") + .required(false) + .takes_value(false) + .help( + "--no-compaction is deprecated, ledger compaction after purge is \ + disabled by default", + ) + .conflicts_with("enable_compaction") + .hidden(hidden_unless_forced()), + ) + .arg( + Arg::with_name("enable_compaction") + .long("enable-compaction") + .required(false) + .takes_value(false) + .help( + "Perform ledger compaction after purge. Compaction will optimize \ + storage space, but may take a long time to complete.", + ) + .conflicts_with("no_compaction"), + ) + .arg( + Arg::with_name("dead_slots_only") + .long("dead-slots-only") + .required(false) + .takes_value(false) + .help("Limit purging to dead slots only"), + ), + ) + .subcommand( + SubCommand::with_name("list-roots") + .about( + "Output up to last root hashes and their heights starting at the \ + given block height", + ) + .arg( + Arg::with_name("max_height") + .long("max-height") + .value_name("NUM") + .takes_value(true) + .help("Maximum block height"), + ) + .arg( + Arg::with_name("start_root") + .long("start-root") + .value_name("NUM") + .takes_value(true) + .help("First root to start searching from"), + ) + .arg( + Arg::with_name("slot_list") + .long("slot-list") + .value_name("FILENAME") + .required(false) + .takes_value(true) + .help( + "The location of the output YAML file. A list of rollback slot \ + heights and hashes will be written to the file", + ), + ) + .arg( + Arg::with_name("num_roots") + .long("num-roots") + .value_name("NUM") + .takes_value(true) + .default_value(DEFAULT_ROOT_COUNT) + .required(false) + .help("Number of roots in the output"), + ), + ) + .subcommand( + SubCommand::with_name("latest-optimistic-slots") + .about( + "Output up to the most recent optimistic slots with their hashes \ + and timestamps.", + ) + .arg( + Arg::with_name("num_slots") + .long("num-slots") + .value_name("NUM") + .takes_value(true) + .default_value(DEFAULT_LATEST_OPTIMISTIC_SLOTS_COUNT) + .required(false) + .help("Number of slots in the output"), + ) + .arg( + Arg::with_name("exclude_vote_only_slots") + .long("exclude-vote-only-slots") + .required(false) + .help("Exclude slots that contain only votes from output"), + ), ) .subcommand( SubCommand::with_name("repair-roots") - .about("Traverses the AncestorIterator backward from a last known root \ - to restore missing roots to the Root column") + .about( + "Traverses the AncestorIterator backward from a last known root to restore \ + missing roots to the Root column", + ) .arg( Arg::with_name("start_root") .long("before") .value_name("NUM") .takes_value(true) - .help("Recent root after the range to repair") + .help("Recent root after the range to repair"), ) .arg( Arg::with_name("end_root") .long("until") .value_name("NUM") .takes_value(true) - .help("Earliest slot to check for root repair") + .help("Earliest slot to check for root repair"), ) .arg( Arg::with_name("max_slots") @@ -2146,40 +1960,47 @@ fn main() { .takes_value(true) .default_value(DEFAULT_MAX_SLOTS_ROOT_REPAIR) .required(true) - .help("Override the maximum number of slots to check for root repair") - ) - ) - .subcommand( - SubCommand::with_name("analyze-storage") - .about("Output statistics in JSON format about \ - all column families in the ledger rocksdb") + .help("Override the maximum number of slots to check for root repair"), + ), ) + .subcommand(SubCommand::with_name("analyze-storage").about( + "Output statistics in JSON format about all column families in the ledger rocksdb", + )) .subcommand( SubCommand::with_name("compute-slot-cost") - .about("runs cost_model over the block at the given slots, \ - computes how expensive a block was based on cost_model") - .arg( - Arg::with_name("slots") - .index(1) - .value_name("SLOTS") - .validator(is_slot) - .multiple(true) - .takes_value(true) - .help("Slots that their blocks are computed for cost, default to all slots in ledger"), - ) + .about( + "runs cost_model over the block at the given slots, computes how expensive a \ + block was based on cost_model", + ) + .arg( + Arg::with_name("slots") + .index(1) + .value_name("SLOTS") + .validator(is_slot) + .multiple(true) + .takes_value(true) + .help( + "Slots that their blocks are computed for cost, default to all slots \ + in ledger", + ), + ), ) .subcommand( SubCommand::with_name("print-file-metadata") - .about("Print the metadata of the specified ledger-store file. \ - If no file name is specified, it will print the metadata of all ledger files.") - .arg( - Arg::with_name("file_name") - .long("file-name") - .takes_value(true) - .value_name("SST_FILE_NAME") - .help("The ledger file name (e.g. 011080.sst.) \ - If no file name is specified, it will print the metadata of all ledger files.") - ) + .about( + "Print the metadata of the specified ledger-store file. If no file name is \ + specified, it will print the metadata of all ledger files.", + ) + .arg( + Arg::with_name("file_name") + .long("file-name") + .takes_value(true) + .value_name("SST_FILE_NAME") + .help( + "The ledger file name (e.g. 011080.sst.) If no file name is \ + specified, it will print the metadata of all ledger files.", + ), + ), ) .program_subcommand() .get_matches(); @@ -2195,2047 +2016,1917 @@ fn main() { .ok() .map(PathBuf::from); - let wal_recovery_mode = matches - .value_of("wal_recovery_mode") - .map(BlockstoreRecoveryMode::from); - let force_update_to_open = matches.is_present("force_update_to_open"); - let enforce_ulimit_nofile = !matches.is_present("ignore_ulimit_nofile_error"); let verbose_level = matches.occurrences_of("verbose"); - if let ("bigtable", Some(arg_matches)) = matches.subcommand() { - bigtable_process_command(&ledger_path, arg_matches) - } else if let ("program", Some(arg_matches)) = matches.subcommand() { - program(&ledger_path, arg_matches) - } else { - let ledger_path = canonicalize_ledger_path(&ledger_path); - - match matches.subcommand() { - ("print", Some(arg_matches)) => { - let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); - let ending_slot = value_t!(arg_matches, "ending_slot", Slot).unwrap_or(Slot::MAX); - let num_slots = value_t!(arg_matches, "num_slots", Slot).ok(); - let allow_dead_slots = arg_matches.is_present("allow_dead_slots"); - let only_rooted = arg_matches.is_present("only_rooted"); - output_ledger( - open_blockstore( - &ledger_path, - AccessType::Secondary, - wal_recovery_mode, - force_update_to_open, - enforce_ulimit_nofile, - ), - starting_slot, - ending_slot, - allow_dead_slots, - LedgerOutputMethod::Print, - num_slots, - verbose_level, - only_rooted, - ); - } - ("copy", Some(arg_matches)) => { - let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); - let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); - let target_db = PathBuf::from(value_t_or_exit!(arg_matches, "target_db", String)); - - let source = open_blockstore( - &ledger_path, - AccessType::Secondary, - None, - force_update_to_open, - enforce_ulimit_nofile, - ); - - // Check if shred storage type can be inferred; if not, a new - // ledger is being created. open_blockstore() will attempt to - // to infer shred storage type as well, but this check provides - // extra insight to user on how to create a FIFO ledger. - let _ = get_shred_storage_type( - &target_db, - &format!( - "No --target-db ledger at {:?} was detected, default \ - compaction (RocksLevel) will be used. Fifo compaction \ - can be enabled for a new ledger by manually creating \ - {BLOCKSTORE_DIRECTORY_ROCKS_FIFO} directory within \ - the specified --target_db directory.", - &target_db - ), - ); - let target = open_blockstore( - &target_db, - AccessType::Primary, - None, - force_update_to_open, - enforce_ulimit_nofile, - ); - for (slot, _meta) in source.slot_meta_iterator(starting_slot).unwrap() { - if slot > ending_slot { - break; - } - if let Ok(shreds) = source.get_data_shreds_for_slot(slot, 0) { - if target.insert_shreds(shreds, None, true).is_err() { - warn!("error inserting shreds for slot {}", slot); + match matches.subcommand() { + ("bigtable", Some(arg_matches)) => bigtable_process_command(&ledger_path, arg_matches), + ("program", Some(arg_matches)) => program(&ledger_path, arg_matches), + _ => { + let ledger_path = canonicalize_ledger_path(&ledger_path); + + match matches.subcommand() { + ("print", Some(arg_matches)) => { + let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); + let ending_slot = + value_t!(arg_matches, "ending_slot", Slot).unwrap_or(Slot::MAX); + let num_slots = value_t!(arg_matches, "num_slots", Slot).ok(); + let allow_dead_slots = arg_matches.is_present("allow_dead_slots"); + let only_rooted = arg_matches.is_present("only_rooted"); + output_ledger( + open_blockstore(&ledger_path, arg_matches, AccessType::Secondary), + starting_slot, + ending_slot, + allow_dead_slots, + OutputFormat::Display, + num_slots, + verbose_level, + only_rooted, + ); + } + ("copy", Some(arg_matches)) => { + let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); + let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); + let target_db = + PathBuf::from(value_t_or_exit!(arg_matches, "target_db", String)); + + let source = open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); + + // Check if shred storage type can be inferred; if not, a new + // ledger is being created. open_blockstore() will attempt to + // to infer shred storage type as well, but this check provides + // extra insight to user on how to create a FIFO ledger. + let _ = get_shred_storage_type( + &target_db, + &format!( + "No --target-db ledger at {:?} was detected, default compaction \ + (RocksLevel) will be used. Fifo compaction can be enabled for a new \ + ledger by manually creating {BLOCKSTORE_DIRECTORY_ROCKS_FIFO} directory \ + within the specified --target_db directory.", + &target_db + ), + ); + let target = open_blockstore(&target_db, arg_matches, AccessType::Primary); + for (slot, _meta) in source.slot_meta_iterator(starting_slot).unwrap() { + if slot > ending_slot { + break; + } + if let Ok(shreds) = source.get_data_shreds_for_slot(slot, 0) { + if target.insert_shreds(shreds, None, true).is_err() { + warn!("error inserting shreds for slot {}", slot); + } } } } - } - ("genesis", Some(arg_matches)) => { - let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); - let print_accounts = arg_matches.is_present("accounts"); - if print_accounts { - let print_account_data = !arg_matches.is_present("no_account_data"); - let print_encoding_format = parse_encoding_format(arg_matches); - for (pubkey, account) in genesis_config.accounts { - output_account( - &pubkey, - &AccountSharedData::from(account), - None, - print_account_data, - print_encoding_format, - ); + ("genesis", Some(arg_matches)) => { + let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + let print_accounts = arg_matches.is_present("accounts"); + if print_accounts { + let print_account_data = !arg_matches.is_present("no_account_data"); + let print_encoding_format = parse_encoding_format(arg_matches); + for (pubkey, account) in genesis_config.accounts { + output_account( + &pubkey, + &AccountSharedData::from(account), + None, + print_account_data, + print_encoding_format, + ); + } + } else { + println!("{genesis_config}"); } - } else { - println!("{genesis_config}"); } - } - ("genesis-hash", Some(arg_matches)) => { - println!( - "{}", - open_genesis_config_by(&ledger_path, arg_matches).hash() - ); - } - ("modify-genesis", Some(arg_matches)) => { - let mut genesis_config = open_genesis_config_by(&ledger_path, arg_matches); - let output_directory = - PathBuf::from(arg_matches.value_of("output_directory").unwrap()); - - if let Some(cluster_type) = cluster_type_of(arg_matches, "cluster_type") { - genesis_config.cluster_type = cluster_type; + ("genesis-hash", Some(arg_matches)) => { + println!( + "{}", + open_genesis_config_by(&ledger_path, arg_matches).hash() + ); } + ("modify-genesis", Some(arg_matches)) => { + let mut genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + let output_directory = + PathBuf::from(arg_matches.value_of("output_directory").unwrap()); - if let Some(hashes_per_tick) = arg_matches.value_of("hashes_per_tick") { - genesis_config.poh_config.hashes_per_tick = match hashes_per_tick { - // Note: Unlike `solana-genesis`, "auto" is not supported here. - "sleep" => None, - _ => Some(value_t_or_exit!(arg_matches, "hashes_per_tick", u64)), + if let Some(cluster_type) = cluster_type_of(arg_matches, "cluster_type") { + genesis_config.cluster_type = cluster_type; } - } - create_new_ledger( - &output_directory, - &genesis_config, - solana_accounts_db::hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, - LedgerColumnOptions::default(), - ) - .unwrap_or_else(|err| { - eprintln!("Failed to write genesis config: {err:?}"); - exit(1); - }); - - println!("{}", open_genesis_config_by(&output_directory, arg_matches)); - } - ("shred-version", Some(arg_matches)) => { - let process_options = ProcessOptions { - new_hard_forks: hardforks_of(arg_matches, "hard_forks"), - halt_at_slot: Some(0), - run_verification: false, - accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), - ..ProcessOptions::default() - }; - let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); - let blockstore = open_blockstore( - &ledger_path, - get_access_type(&process_options), - wal_recovery_mode, - force_update_to_open, - enforce_ulimit_nofile, - ); - match load_and_process_ledger( - arg_matches, - &genesis_config, - Arc::new(blockstore), - process_options, - snapshot_archive_path, - incremental_snapshot_archive_path, - ) { - Ok((bank_forks, ..)) => { - println!( - "{}", - compute_shred_version( - &genesis_config.hash(), - Some(&bank_forks.read().unwrap().working_bank().hard_forks()) - ) - ); + if let Some(hashes_per_tick) = arg_matches.value_of("hashes_per_tick") { + genesis_config.poh_config.hashes_per_tick = match hashes_per_tick { + // Note: Unlike `solana-genesis`, "auto" is not supported here. + "sleep" => None, + _ => Some(value_t_or_exit!(arg_matches, "hashes_per_tick", u64)), + } } - Err(err) => { - eprintln!("Failed to load ledger: {err:?}"); + + create_new_ledger( + &output_directory, + &genesis_config, + solana_accounts_db::hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, + LedgerColumnOptions::default(), + ) + .unwrap_or_else(|err| { + eprintln!("Failed to write genesis config: {err:?}"); exit(1); - } + }); + + println!("{}", open_genesis_config_by(&output_directory, arg_matches)); } - } - ("shred-meta", Some(arg_matches)) => { - #[derive(Debug)] - #[allow(dead_code)] - struct ShredMeta<'a> { - slot: Slot, - full_slot: bool, - shred_index: usize, - data: bool, - code: bool, - last_in_slot: bool, - data_complete: bool, - shred: &'a Shred, + ("shred-version", Some(arg_matches)) => { + let process_options = ProcessOptions { + new_hard_forks: hardforks_of(arg_matches, "hard_forks"), + halt_at_slot: Some(0), + run_verification: false, + accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), + ..ProcessOptions::default() + }; + let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + let blockstore = open_blockstore( + &ledger_path, + arg_matches, + get_access_type(&process_options), + ); + let (bank_forks, _) = load_and_process_ledger_or_exit( + arg_matches, + &genesis_config, + Arc::new(blockstore), + process_options, + snapshot_archive_path, + incremental_snapshot_archive_path, + ); + + println!( + "{}", + compute_shred_version( + &genesis_config.hash(), + Some(&bank_forks.read().unwrap().working_bank().hard_forks()) + ) + ); } - let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); - let ending_slot = value_t!(arg_matches, "ending_slot", Slot).unwrap_or(Slot::MAX); - let ledger = open_blockstore( - &ledger_path, - AccessType::Secondary, - None, - force_update_to_open, - enforce_ulimit_nofile, - ); - for (slot, _meta) in ledger - .slot_meta_iterator(starting_slot) - .unwrap() - .take_while(|(slot, _)| *slot <= ending_slot) - { - let full_slot = ledger.is_full(slot); - if let Ok(shreds) = ledger.get_data_shreds_for_slot(slot, 0) { - for (shred_index, shred) in shreds.iter().enumerate() { - println!( - "{:#?}", - ShredMeta { - slot, - full_slot, - shred_index, - data: shred.is_data(), - code: shred.is_code(), - data_complete: shred.data_complete(), - last_in_slot: shred.last_in_slot(), - shred, - } - ); + ("shred-meta", Some(arg_matches)) => { + #[derive(Debug)] + #[allow(dead_code)] + struct ShredMeta<'a> { + slot: Slot, + full_slot: bool, + shred_index: usize, + data: bool, + code: bool, + last_in_slot: bool, + data_complete: bool, + shred: &'a Shred, + } + let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); + let ending_slot = + value_t!(arg_matches, "ending_slot", Slot).unwrap_or(Slot::MAX); + let ledger = open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); + for (slot, _meta) in ledger + .slot_meta_iterator(starting_slot) + .unwrap() + .take_while(|(slot, _)| *slot <= ending_slot) + { + let full_slot = ledger.is_full(slot); + if let Ok(shreds) = ledger.get_data_shreds_for_slot(slot, 0) { + for (shred_index, shred) in shreds.iter().enumerate() { + println!( + "{:#?}", + ShredMeta { + slot, + full_slot, + shred_index, + data: shred.is_data(), + code: shred.is_code(), + data_complete: shred.data_complete(), + last_in_slot: shred.last_in_slot(), + shred, + } + ); + } } } } - } - ("bank-hash", Some(arg_matches)) => { - let process_options = ProcessOptions { - new_hard_forks: hardforks_of(arg_matches, "hard_forks"), - halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(), - run_verification: false, - accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), - ..ProcessOptions::default() - }; - let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); - let blockstore = open_blockstore( - &ledger_path, - get_access_type(&process_options), - wal_recovery_mode, - force_update_to_open, - enforce_ulimit_nofile, - ); - match load_and_process_ledger( - arg_matches, - &genesis_config, - Arc::new(blockstore), - process_options, - snapshot_archive_path, - incremental_snapshot_archive_path, - ) { - Ok((bank_forks, ..)) => { - println!("{}", &bank_forks.read().unwrap().working_bank().hash()); - } - Err(err) => { - eprintln!("Failed to load ledger: {err:?}"); - exit(1); - } + ("bank-hash", Some(arg_matches)) => { + let process_options = ProcessOptions { + new_hard_forks: hardforks_of(arg_matches, "hard_forks"), + halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(), + run_verification: false, + accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), + ..ProcessOptions::default() + }; + let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + let blockstore = open_blockstore( + &ledger_path, + arg_matches, + get_access_type(&process_options), + ); + let (bank_forks, _) = load_and_process_ledger_or_exit( + arg_matches, + &genesis_config, + Arc::new(blockstore), + process_options, + snapshot_archive_path, + incremental_snapshot_archive_path, + ); + println!("{}", &bank_forks.read().unwrap().working_bank().hash()); } - } - ("slot", Some(arg_matches)) => { - let slots = values_t_or_exit!(arg_matches, "slots", Slot); - let allow_dead_slots = arg_matches.is_present("allow_dead_slots"); - let blockstore = open_blockstore( - &ledger_path, - AccessType::Secondary, - wal_recovery_mode, - force_update_to_open, - enforce_ulimit_nofile, - ); - for slot in slots { - println!("Slot {slot}"); - if let Err(err) = output_slot( - &blockstore, - slot, - allow_dead_slots, - &LedgerOutputMethod::Print, - verbose_level, - &mut HashMap::new(), - ) { - eprintln!("{err}"); + ("slot", Some(arg_matches)) => { + let slots = values_t_or_exit!(arg_matches, "slots", Slot); + let allow_dead_slots = arg_matches.is_present("allow_dead_slots"); + let blockstore = + open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); + for slot in slots { + println!("Slot {slot}"); + if let Err(err) = output_slot( + &blockstore, + slot, + allow_dead_slots, + &OutputFormat::Display, + verbose_level, + &mut HashMap::new(), + ) { + eprintln!("{err}"); + } } } - } - ("json", Some(arg_matches)) => { - let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); - let allow_dead_slots = arg_matches.is_present("allow_dead_slots"); - output_ledger( - open_blockstore( - &ledger_path, - AccessType::Secondary, - wal_recovery_mode, - force_update_to_open, - enforce_ulimit_nofile, - ), - starting_slot, - Slot::MAX, - allow_dead_slots, - LedgerOutputMethod::Json, - None, - std::u64::MAX, - true, - ); - } - ("dead-slots", Some(arg_matches)) => { - let blockstore = open_blockstore( - &ledger_path, - AccessType::Secondary, - wal_recovery_mode, - force_update_to_open, - enforce_ulimit_nofile, - ); - let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); - for slot in blockstore.dead_slots_iterator(starting_slot).unwrap() { - println!("{slot}"); + ("json", Some(arg_matches)) => { + let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); + let allow_dead_slots = arg_matches.is_present("allow_dead_slots"); + output_ledger( + open_blockstore(&ledger_path, arg_matches, AccessType::Secondary), + starting_slot, + Slot::MAX, + allow_dead_slots, + OutputFormat::Json, + None, + std::u64::MAX, + true, + ); } - } - ("duplicate-slots", Some(arg_matches)) => { - let blockstore = open_blockstore( - &ledger_path, - AccessType::Secondary, - wal_recovery_mode, - force_update_to_open, - enforce_ulimit_nofile, - ); - let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); - for slot in blockstore.duplicate_slots_iterator(starting_slot).unwrap() { - println!("{slot}"); + ("dead-slots", Some(arg_matches)) => { + let blockstore = + open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); + let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); + for slot in blockstore.dead_slots_iterator(starting_slot).unwrap() { + println!("{slot}"); + } } - } - ("set-dead-slot", Some(arg_matches)) => { - let slots = values_t_or_exit!(arg_matches, "slots", Slot); - let blockstore = open_blockstore( - &ledger_path, - AccessType::Primary, - wal_recovery_mode, - force_update_to_open, - enforce_ulimit_nofile, - ); - for slot in slots { - match blockstore.set_dead_slot(slot) { - Ok(_) => println!("Slot {slot} dead"), - Err(err) => eprintln!("Failed to set slot {slot} dead slot: {err:?}"), + ("duplicate-slots", Some(arg_matches)) => { + let blockstore = + open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); + let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); + for slot in blockstore.duplicate_slots_iterator(starting_slot).unwrap() { + println!("{slot}"); } } - } - ("remove-dead-slot", Some(arg_matches)) => { - let slots = values_t_or_exit!(arg_matches, "slots", Slot); - let blockstore = open_blockstore( - &ledger_path, - AccessType::Primary, - wal_recovery_mode, - force_update_to_open, - enforce_ulimit_nofile, - ); - for slot in slots { - match blockstore.remove_dead_slot(slot) { - Ok(_) => println!("Slot {slot} not longer marked dead"), - Err(err) => { - eprintln!("Failed to remove dead flag for slot {slot}, {err:?}") + ("set-dead-slot", Some(arg_matches)) => { + let slots = values_t_or_exit!(arg_matches, "slots", Slot); + let blockstore = + open_blockstore(&ledger_path, arg_matches, AccessType::Primary); + for slot in slots { + match blockstore.set_dead_slot(slot) { + Ok(_) => println!("Slot {slot} dead"), + Err(err) => eprintln!("Failed to set slot {slot} dead slot: {err:?}"), } } } - } - ("parse_full_frozen", Some(arg_matches)) => { - let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); - let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); - let blockstore = open_blockstore( - &ledger_path, - AccessType::Secondary, - wal_recovery_mode, - force_update_to_open, - enforce_ulimit_nofile, - ); - let mut ancestors = BTreeSet::new(); - assert!( - blockstore.meta(ending_slot).unwrap().is_some(), - "Ending slot doesn't exist" - ); - for a in AncestorIterator::new(ending_slot, &blockstore) { - ancestors.insert(a); - if a <= starting_slot { - break; + ("remove-dead-slot", Some(arg_matches)) => { + let slots = values_t_or_exit!(arg_matches, "slots", Slot); + let blockstore = + open_blockstore(&ledger_path, arg_matches, AccessType::Primary); + for slot in slots { + match blockstore.remove_dead_slot(slot) { + Ok(_) => println!("Slot {slot} not longer marked dead"), + Err(err) => { + eprintln!("Failed to remove dead flag for slot {slot}, {err:?}") + } + } } } - println!("ancestors: {:?}", ancestors.iter()); - - let mut frozen = BTreeMap::new(); - let mut full = BTreeMap::new(); - let frozen_regex = Regex::new(r"bank frozen: (\d*)").unwrap(); - let full_regex = Regex::new(r"slot (\d*) is full").unwrap(); - - let log_file = PathBuf::from(value_t_or_exit!(arg_matches, "log_path", String)); - let f = BufReader::new(File::open(log_file).unwrap()); - println!("Reading log file"); - for line in f.lines().flatten() { - let parse_results = { - if let Some(slot_string) = frozen_regex.captures_iter(&line).next() { - Some((slot_string, &mut frozen)) - } else { - full_regex - .captures_iter(&line) - .next() - .map(|slot_string| (slot_string, &mut full)) + ("parse_full_frozen", Some(arg_matches)) => { + let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); + let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); + let blockstore = + open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); + let mut ancestors = BTreeSet::new(); + assert!( + blockstore.meta(ending_slot).unwrap().is_some(), + "Ending slot doesn't exist" + ); + for a in AncestorIterator::new(ending_slot, &blockstore) { + ancestors.insert(a); + if a <= starting_slot { + break; } - }; + } + println!("ancestors: {:?}", ancestors.iter()); + + let mut frozen = BTreeMap::new(); + let mut full = BTreeMap::new(); + let frozen_regex = Regex::new(r"bank frozen: (\d*)").unwrap(); + let full_regex = Regex::new(r"slot (\d*) is full").unwrap(); + + let log_file = PathBuf::from(value_t_or_exit!(arg_matches, "log_path", String)); + let f = BufReader::new(File::open(log_file).unwrap()); + println!("Reading log file"); + for line in f.lines().map_while(Result::ok) { + let parse_results = { + if let Some(slot_string) = frozen_regex.captures_iter(&line).next() { + Some((slot_string, &mut frozen)) + } else { + full_regex + .captures_iter(&line) + .next() + .map(|slot_string| (slot_string, &mut full)) + } + }; - if let Some((slot_string, map)) = parse_results { - let slot = slot_string - .get(1) - .expect("Only one match group") - .as_str() - .parse::() - .unwrap(); - if ancestors.contains(&slot) && !map.contains_key(&slot) { - map.insert(slot, line); - } - if slot == ending_slot - && frozen.contains_key(&slot) - && full.contains_key(&slot) - { - break; + if let Some((slot_string, map)) = parse_results { + let slot = slot_string + .get(1) + .expect("Only one match group") + .as_str() + .parse::() + .unwrap(); + if ancestors.contains(&slot) && !map.contains_key(&slot) { + map.insert(slot, line); + } + if slot == ending_slot + && frozen.contains_key(&slot) + && full.contains_key(&slot) + { + break; + } } } - } - for ((slot1, frozen_log), (slot2, full_log)) in frozen.iter().zip(full.iter()) { - assert_eq!(slot1, slot2); - println!("Slot: {slot1}\n, full: {full_log}\n, frozen: {frozen_log}"); + for ((slot1, frozen_log), (slot2, full_log)) in frozen.iter().zip(full.iter()) { + assert_eq!(slot1, slot2); + println!("Slot: {slot1}\n, full: {full_log}\n, frozen: {frozen_log}"); + } } - } - ("verify", Some(arg_matches)) => { - let exit_signal = Arc::new(AtomicBool::new(false)); - let no_os_memory_stats_reporting = - arg_matches.is_present("no_os_memory_stats_reporting"); - let system_monitor_service = SystemMonitorService::new( - Arc::clone(&exit_signal), - SystemMonitorStatsReportConfig { - report_os_memory_stats: !no_os_memory_stats_reporting, - report_os_network_stats: false, - report_os_cpu_stats: false, - report_os_disk_stats: false, - }, - ); + ("verify", Some(arg_matches)) => { + let exit_signal = Arc::new(AtomicBool::new(false)); + let report_os_memory_stats = + arg_matches.is_present("os_memory_stats_reporting"); + let system_monitor_service = SystemMonitorService::new( + Arc::clone(&exit_signal), + SystemMonitorStatsReportConfig { + report_os_memory_stats, + report_os_network_stats: false, + report_os_cpu_stats: false, + report_os_disk_stats: false, + }, + ); + + let debug_keys = pubkeys_of(arg_matches, "debug_key") + .map(|pubkeys| Arc::new(pubkeys.into_iter().collect::>())); - let debug_keys = pubkeys_of(arg_matches, "debug_key") - .map(|pubkeys| Arc::new(pubkeys.into_iter().collect::>())); + if arg_matches.is_present("skip_poh_verify") { + eprintln!( + "--skip-poh-verify is deprecated. Replace with --skip-verification." + ); + } - if arg_matches.is_present("skip_poh_verify") { - eprintln!( - "--skip-poh-verify is deprecated. Replace with --skip-verification." + let process_options = ProcessOptions { + new_hard_forks: hardforks_of(arg_matches, "hard_forks"), + run_verification: !(arg_matches.is_present("skip_poh_verify") + || arg_matches.is_present("skip_verification")), + on_halt_store_hash_raw_data_for_debug: arg_matches + .is_present("halt_at_slot_store_hash_raw_data"), + run_final_accounts_hash_calc: arg_matches.is_present("run_final_hash_calc"), + halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(), + debug_keys, + limit_load_slot_count_from_snapshot: value_t!( + arg_matches, + "limit_load_slot_count_from_snapshot", + usize + ) + .ok(), + accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), + verify_index: arg_matches.is_present("verify_accounts_index"), + allow_dead_slots: arg_matches.is_present("allow_dead_slots"), + accounts_db_test_hash_calculation: arg_matches + .is_present("accounts_db_test_hash_calculation"), + accounts_db_skip_shrink: arg_matches.is_present("accounts_db_skip_shrink"), + runtime_config: RuntimeConfig::default(), + use_snapshot_archives_at_startup: value_t_or_exit!( + arg_matches, + use_snapshot_archives_at_startup::cli::NAME, + UseSnapshotArchivesAtStartup + ), + ..ProcessOptions::default() + }; + let print_accounts_stats = arg_matches.is_present("print_accounts_stats"); + let write_bank_file = arg_matches.is_present("write_bank_file"); + let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + info!("genesis hash: {}", genesis_config.hash()); + + let blockstore = open_blockstore( + &ledger_path, + arg_matches, + get_access_type(&process_options), + ); + let (bank_forks, _) = load_and_process_ledger_or_exit( + arg_matches, + &genesis_config, + Arc::new(blockstore), + process_options, + snapshot_archive_path, + incremental_snapshot_archive_path, ); + + if print_accounts_stats { + let working_bank = bank_forks.read().unwrap().working_bank(); + working_bank.print_accounts_stats(); + } + if write_bank_file { + let working_bank = bank_forks.read().unwrap().working_bank(); + bank_hash_details::write_bank_hash_details_file(&working_bank) + .map_err(|err| { + warn!("Unable to write bank hash_details file: {err}"); + }) + .ok(); + } + exit_signal.store(true, Ordering::Relaxed); + system_monitor_service.join().unwrap(); } + ("graph", Some(arg_matches)) => { + let output_file = value_t_or_exit!(arg_matches, "graph_filename", String); + let graph_config = GraphConfig { + include_all_votes: arg_matches.is_present("include_all_votes"), + vote_account_mode: value_t_or_exit!( + arg_matches, + "vote_account_mode", + GraphVoteAccountMode + ), + }; + + let process_options = ProcessOptions { + new_hard_forks: hardforks_of(arg_matches, "hard_forks"), + halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(), + run_verification: false, + accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), + use_snapshot_archives_at_startup: value_t_or_exit!( + arg_matches, + use_snapshot_archives_at_startup::cli::NAME, + UseSnapshotArchivesAtStartup + ), + ..ProcessOptions::default() + }; - let process_options = ProcessOptions { - new_hard_forks: hardforks_of(arg_matches, "hard_forks"), - run_verification: !(arg_matches.is_present("skip_poh_verify") - || arg_matches.is_present("skip_verification")), - on_halt_store_hash_raw_data_for_debug: arg_matches - .is_present("halt_at_slot_store_hash_raw_data"), - run_final_accounts_hash_calc: arg_matches.is_present("run_final_hash_calc"), - halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(), - debug_keys, - limit_load_slot_count_from_snapshot: value_t!( + let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + let blockstore = open_blockstore( + &ledger_path, arg_matches, - "limit_load_slot_count_from_snapshot", - usize - ) - .ok(), - accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), - verify_index: arg_matches.is_present("verify_accounts_index"), - allow_dead_slots: arg_matches.is_present("allow_dead_slots"), - accounts_db_test_hash_calculation: arg_matches - .is_present("accounts_db_test_hash_calculation"), - accounts_db_skip_shrink: arg_matches.is_present("accounts_db_skip_shrink"), - runtime_config: RuntimeConfig::default(), - use_snapshot_archives_at_startup: value_t_or_exit!( + get_access_type(&process_options), + ); + let (bank_forks, _) = load_and_process_ledger_or_exit( arg_matches, - use_snapshot_archives_at_startup::cli::NAME, - UseSnapshotArchivesAtStartup - ), - ..ProcessOptions::default() - }; - let print_accounts_stats = arg_matches.is_present("print_accounts_stats"); - let write_bank_file = arg_matches.is_present("write_bank_file"); - let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); - info!("genesis hash: {}", genesis_config.hash()); - - let blockstore = open_blockstore( - &ledger_path, - get_access_type(&process_options), - wal_recovery_mode, - force_update_to_open, - enforce_ulimit_nofile, - ); - let (bank_forks, ..) = load_and_process_ledger( - arg_matches, - &genesis_config, - Arc::new(blockstore), - process_options, - snapshot_archive_path, - incremental_snapshot_archive_path, - ) - .unwrap_or_else(|err| { - eprintln!("Ledger verification failed: {err:?}"); - exit(1); - }); - if print_accounts_stats { - let working_bank = bank_forks.read().unwrap().working_bank(); - working_bank.print_accounts_stats(); - } - if write_bank_file { - let working_bank = bank_forks.read().unwrap().working_bank(); - let _ = bank_hash_details::write_bank_hash_details_file(&working_bank); + &genesis_config, + Arc::new(blockstore), + process_options, + snapshot_archive_path, + incremental_snapshot_archive_path, + ); + + let dot = graph_forks(&bank_forks.read().unwrap(), &graph_config); + let extension = Path::new(&output_file).extension(); + let result = if extension == Some(OsStr::new("pdf")) { + render_dot(dot, &output_file, "pdf") + } else if extension == Some(OsStr::new("png")) { + render_dot(dot, &output_file, "png") + } else { + File::create(&output_file) + .and_then(|mut file| file.write_all(&dot.into_bytes())) + }; + + match result { + Ok(_) => println!("Wrote {output_file}"), + Err(err) => eprintln!("Unable to write {output_file}: {err}"), + } } - exit_signal.store(true, Ordering::Relaxed); - system_monitor_service.join().unwrap(); - } - ("graph", Some(arg_matches)) => { - let output_file = value_t_or_exit!(arg_matches, "graph_filename", String); - let graph_config = GraphConfig { - include_all_votes: arg_matches.is_present("include_all_votes"), - vote_account_mode: value_t_or_exit!( - arg_matches, - "vote_account_mode", - GraphVoteAccountMode - ), - }; + ("create-snapshot", Some(arg_matches)) => { + let is_incremental = arg_matches.is_present("incremental"); + let is_minimized = arg_matches.is_present("minimized"); + let output_directory = value_t!(arg_matches, "output_directory", PathBuf) + .unwrap_or_else(|_| { + match ( + is_incremental, + &snapshot_archive_path, + &incremental_snapshot_archive_path, + ) { + (true, _, Some(incremental_snapshot_archive_path)) => { + incremental_snapshot_archive_path.clone() + } + (_, Some(snapshot_archive_path), _) => { + snapshot_archive_path.clone() + } + (_, _, _) => ledger_path.clone(), + } + }); + let mut warp_slot = value_t!(arg_matches, "warp_slot", Slot).ok(); + let remove_stake_accounts = arg_matches.is_present("remove_stake_accounts"); + let new_hard_forks = hardforks_of(arg_matches, "hard_forks"); + + let faucet_pubkey = pubkey_of(arg_matches, "faucet_pubkey"); + let faucet_lamports = + value_t!(arg_matches, "faucet_lamports", u64).unwrap_or(0); + + let rent_burn_percentage = value_t!(arg_matches, "rent_burn_percentage", u8); + let hashes_per_tick = arg_matches.value_of("hashes_per_tick"); + + let bootstrap_stake_authorized_pubkey = + pubkey_of(arg_matches, "bootstrap_stake_authorized_pubkey"); + let bootstrap_validator_lamports = + value_t_or_exit!(arg_matches, "bootstrap_validator_lamports", u64); + let bootstrap_validator_stake_lamports = + value_t_or_exit!(arg_matches, "bootstrap_validator_stake_lamports", u64); + let minimum_stake_lamports = rent.minimum_balance(StakeStateV2::size_of()); + if bootstrap_validator_stake_lamports < minimum_stake_lamports { + eprintln!( + "Error: insufficient --bootstrap-validator-stake-lamports. Minimum amount \ + is {minimum_stake_lamports}" + ); + exit(1); + } + let bootstrap_validator_pubkeys = + pubkeys_of(arg_matches, "bootstrap_validator"); + let accounts_to_remove = + pubkeys_of(arg_matches, "accounts_to_remove").unwrap_or_default(); + let feature_gates_to_deactivate = + pubkeys_of(arg_matches, "feature_gates_to_deactivate").unwrap_or_default(); + let vote_accounts_to_destake: HashSet<_> = + pubkeys_of(arg_matches, "vote_accounts_to_destake") + .unwrap_or_default() + .into_iter() + .collect(); + let snapshot_version = arg_matches.value_of("snapshot_version").map_or( + SnapshotVersion::default(), + |s| { + s.parse::().unwrap_or_else(|e| { + eprintln!("Error: {e}"); + exit(1) + }) + }, + ); - let process_options = ProcessOptions { - new_hard_forks: hardforks_of(arg_matches, "hard_forks"), - halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(), - run_verification: false, - accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), - use_snapshot_archives_at_startup: value_t_or_exit!( + let snapshot_archive_format = { + let archive_format_str = + value_t_or_exit!(arg_matches, "snapshot_archive_format", String); + ArchiveFormat::from_cli_arg(&archive_format_str).unwrap_or_else(|| { + panic!("Archive format not recognized: {archive_format_str}") + }) + }; + + let maximum_full_snapshot_archives_to_retain = value_t_or_exit!( arg_matches, - use_snapshot_archives_at_startup::cli::NAME, - UseSnapshotArchivesAtStartup - ), - ..ProcessOptions::default() - }; + "maximum_full_snapshots_to_retain", + NonZeroUsize + ); + let maximum_incremental_snapshot_archives_to_retain = value_t_or_exit!( + arg_matches, + "maximum_incremental_snapshots_to_retain", + NonZeroUsize + ); + let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + let mut process_options = ProcessOptions { + new_hard_forks, + run_verification: false, + accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), + accounts_db_skip_shrink: arg_matches.is_present("accounts_db_skip_shrink"), + use_snapshot_archives_at_startup: value_t_or_exit!( + arg_matches, + use_snapshot_archives_at_startup::cli::NAME, + UseSnapshotArchivesAtStartup + ), + ..ProcessOptions::default() + }; + let blockstore = Arc::new(open_blockstore( + &ledger_path, + arg_matches, + get_access_type(&process_options), + )); - let blockstore = open_blockstore( - &ledger_path, - get_access_type(&process_options), - wal_recovery_mode, - force_update_to_open, - enforce_ulimit_nofile, - ); - match load_and_process_ledger( - arg_matches, - &open_genesis_config_by(&ledger_path, arg_matches), - Arc::new(blockstore), - process_options, - snapshot_archive_path, - incremental_snapshot_archive_path, - ) { - Ok((bank_forks, ..)) => { - let dot = graph_forks(&bank_forks.read().unwrap(), &graph_config); - - let extension = Path::new(&output_file).extension(); - let result = if extension == Some(OsStr::new("pdf")) { - render_dot(dot, &output_file, "pdf") - } else if extension == Some(OsStr::new("png")) { - render_dot(dot, &output_file, "png") - } else { - File::create(&output_file) - .and_then(|mut file| file.write_all(&dot.into_bytes())) - }; + let snapshot_slot = if Some("ROOT") == arg_matches.value_of("snapshot_slot") { + blockstore + .rooted_slot_iterator(0) + .expect("Failed to get rooted slot iterator") + .last() + .expect("Failed to get root") + } else { + value_t_or_exit!(arg_matches, "snapshot_slot", Slot) + }; - match result { - Ok(_) => println!("Wrote {output_file}"), - Err(err) => eprintln!("Unable to write {output_file}: {err}"), - } - } - Err(err) => { - eprintln!("Failed to load ledger: {err:?}"); + if blockstore + .meta(snapshot_slot) + .unwrap() + .filter(|m| m.is_full()) + .is_none() + { + eprintln!( + "Error: snapshot slot {snapshot_slot} does not exist in blockstore or is \ + not full.", + ); exit(1); } - } - } - ("create-snapshot", Some(arg_matches)) => { - let is_incremental = arg_matches.is_present("incremental"); - let is_minimized = arg_matches.is_present("minimized"); - let output_directory = value_t!(arg_matches, "output_directory", PathBuf) - .unwrap_or_else(|_| { - match ( - is_incremental, - &snapshot_archive_path, - &incremental_snapshot_archive_path, - ) { - (true, _, Some(incremental_snapshot_archive_path)) => { - incremental_snapshot_archive_path.clone() - } - (_, Some(snapshot_archive_path), _) => snapshot_archive_path.clone(), - (_, _, _) => ledger_path.clone(), + process_options.halt_at_slot = Some(snapshot_slot); + + let ending_slot = if is_minimized { + let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); + if ending_slot <= snapshot_slot { + eprintln!( + "Error: ending_slot ({ending_slot}) must be greater than \ + snapshot_slot ({snapshot_slot})" + ); + exit(1); } - }); - let mut warp_slot = value_t!(arg_matches, "warp_slot", Slot).ok(); - let remove_stake_accounts = arg_matches.is_present("remove_stake_accounts"); - let new_hard_forks = hardforks_of(arg_matches, "hard_forks"); - - let faucet_pubkey = pubkey_of(arg_matches, "faucet_pubkey"); - let faucet_lamports = value_t!(arg_matches, "faucet_lamports", u64).unwrap_or(0); - - let rent_burn_percentage = value_t!(arg_matches, "rent_burn_percentage", u8); - let hashes_per_tick = arg_matches.value_of("hashes_per_tick"); - - let bootstrap_stake_authorized_pubkey = - pubkey_of(arg_matches, "bootstrap_stake_authorized_pubkey"); - let bootstrap_validator_lamports = - value_t_or_exit!(arg_matches, "bootstrap_validator_lamports", u64); - let bootstrap_validator_stake_lamports = - value_t_or_exit!(arg_matches, "bootstrap_validator_stake_lamports", u64); - let minimum_stake_lamports = rent.minimum_balance(StakeStateV2::size_of()); - if bootstrap_validator_stake_lamports < minimum_stake_lamports { - eprintln!( - "Error: insufficient --bootstrap-validator-stake-lamports. \ - Minimum amount is {minimum_stake_lamports}" - ); - exit(1); - } - let bootstrap_validator_pubkeys = pubkeys_of(arg_matches, "bootstrap_validator"); - let accounts_to_remove = - pubkeys_of(arg_matches, "accounts_to_remove").unwrap_or_default(); - let feature_gates_to_deactivate = - pubkeys_of(arg_matches, "feature_gates_to_deactivate").unwrap_or_default(); - let vote_accounts_to_destake: HashSet<_> = - pubkeys_of(arg_matches, "vote_accounts_to_destake") - .unwrap_or_default() - .into_iter() - .collect(); - let snapshot_version = arg_matches.value_of("snapshot_version").map_or( - SnapshotVersion::default(), - |s| { - s.parse::().unwrap_or_else(|e| { - eprintln!("Error: {e}"); - exit(1) - }) - }, - ); - let snapshot_archive_format = { - let archive_format_str = - value_t_or_exit!(arg_matches, "snapshot_archive_format", String); - ArchiveFormat::from_cli_arg(&archive_format_str).unwrap_or_else(|| { - panic!("Archive format not recognized: {archive_format_str}") - }) - }; + Some(ending_slot) + } else { + None + }; - let maximum_full_snapshot_archives_to_retain = value_t_or_exit!( - arg_matches, - "maximum_full_snapshots_to_retain", - NonZeroUsize - ); - let maximum_incremental_snapshot_archives_to_retain = value_t_or_exit!( - arg_matches, - "maximum_incremental_snapshots_to_retain", - NonZeroUsize - ); - let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); - let mut process_options = ProcessOptions { - new_hard_forks, - run_verification: false, - accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), - accounts_db_skip_shrink: arg_matches.is_present("accounts_db_skip_shrink"), - use_snapshot_archives_at_startup: value_t_or_exit!( - arg_matches, - use_snapshot_archives_at_startup::cli::NAME, - UseSnapshotArchivesAtStartup - ), - ..ProcessOptions::default() - }; - let blockstore = Arc::new(open_blockstore( - &ledger_path, - get_access_type(&process_options), - wal_recovery_mode, - force_update_to_open, - enforce_ulimit_nofile, - )); + let snapshot_type_str = if is_incremental { + "incremental " + } else if is_minimized { + "minimized " + } else { + "" + }; - let snapshot_slot = if Some("ROOT") == arg_matches.value_of("snapshot_slot") { - blockstore - .rooted_slot_iterator(0) - .expect("Failed to get rooted slot iterator") - .last() - .expect("Failed to get root") - } else { - value_t_or_exit!(arg_matches, "snapshot_slot", Slot) - }; + info!( + "Creating {}snapshot of slot {} in {}", + snapshot_type_str, + snapshot_slot, + output_directory.display() + ); - if blockstore - .meta(snapshot_slot) - .unwrap() - .filter(|m| m.is_full()) - .is_none() - { - eprintln!( - "Error: snapshot slot {snapshot_slot} does not exist in blockstore or is not full.", + let (bank_forks, starting_snapshot_hashes) = load_and_process_ledger_or_exit( + arg_matches, + &genesis_config, + blockstore.clone(), + process_options, + snapshot_archive_path, + incremental_snapshot_archive_path, ); - exit(1); - } - process_options.halt_at_slot = Some(snapshot_slot); + let mut bank = bank_forks + .read() + .unwrap() + .get(snapshot_slot) + .unwrap_or_else(|| { + eprintln!("Error: Slot {snapshot_slot} is not available"); + exit(1); + }); - let ending_slot = if is_minimized { - let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); - if ending_slot <= snapshot_slot { - eprintln!( - "Error: ending_slot ({ending_slot}) must be greater than snapshot_slot ({snapshot_slot})" + let child_bank_required = rent_burn_percentage.is_ok() + || hashes_per_tick.is_some() + || remove_stake_accounts + || !accounts_to_remove.is_empty() + || !feature_gates_to_deactivate.is_empty() + || !vote_accounts_to_destake.is_empty() + || faucet_pubkey.is_some() + || bootstrap_validator_pubkeys.is_some(); + + if child_bank_required { + let mut child_bank = Bank::new_from_parent( + bank.clone(), + bank.collector_id(), + bank.slot() + 1, ); - exit(1); - } - Some(ending_slot) - } else { - None - }; + if let Ok(rent_burn_percentage) = rent_burn_percentage { + child_bank.set_rent_burn_percentage(rent_burn_percentage); + } - let snapshot_type_str = if is_incremental { - "incremental " - } else if is_minimized { - "minimized " - } else { - "" - }; + if let Some(hashes_per_tick) = hashes_per_tick { + child_bank.set_hashes_per_tick(match hashes_per_tick { + // Note: Unlike `solana-genesis`, "auto" is not supported here. + "sleep" => None, + _ => Some(value_t_or_exit!(arg_matches, "hashes_per_tick", u64)), + }); + } + bank = Arc::new(child_bank); + } - info!( - "Creating {}snapshot of slot {} in {}", - snapshot_type_str, - snapshot_slot, - output_directory.display() - ); + if let Some(faucet_pubkey) = faucet_pubkey { + bank.store_account( + &faucet_pubkey, + &AccountSharedData::new(faucet_lamports, 0, &system_program::id()), + ); + } - match load_and_process_ledger( - arg_matches, - &genesis_config, - blockstore.clone(), - process_options, - snapshot_archive_path, - incremental_snapshot_archive_path, - ) { - Ok((bank_forks, starting_snapshot_hashes)) => { - let mut bank = bank_forks - .read() + if remove_stake_accounts { + for (address, mut account) in bank + .get_program_accounts(&stake::program::id(), &ScanConfig::default()) .unwrap() - .get(snapshot_slot) - .unwrap_or_else(|| { - eprintln!("Error: Slot {snapshot_slot} is not available"); - exit(1); - }); + .into_iter() + { + account.set_lamports(0); + bank.store_account(&address, &account); + } + } - let child_bank_required = rent_burn_percentage.is_ok() - || hashes_per_tick.is_some() - || remove_stake_accounts - || !accounts_to_remove.is_empty() - || !feature_gates_to_deactivate.is_empty() - || !vote_accounts_to_destake.is_empty() - || faucet_pubkey.is_some() - || bootstrap_validator_pubkeys.is_some(); - - if child_bank_required { - let mut child_bank = Bank::new_from_parent( - bank.clone(), - bank.collector_id(), - bank.slot() + 1, + for address in accounts_to_remove { + let mut account = bank.get_account(&address).unwrap_or_else(|| { + eprintln!( + "Error: Account does not exist, unable to remove it: {address}" ); + exit(1); + }); - if let Ok(rent_burn_percentage) = rent_burn_percentage { - child_bank.set_rent_burn_percentage(rent_burn_percentage); - } + account.set_lamports(0); + bank.store_account(&address, &account); + debug!("Account removed: {address}"); + } - if let Some(hashes_per_tick) = hashes_per_tick { - child_bank.set_hashes_per_tick(match hashes_per_tick { - // Note: Unlike `solana-genesis`, "auto" is not supported here. - "sleep" => None, - _ => { - Some(value_t_or_exit!(arg_matches, "hashes_per_tick", u64)) - } - }); + for address in feature_gates_to_deactivate { + let mut account = bank.get_account(&address).unwrap_or_else(|| { + eprintln!( + "Error: Feature-gate account does not exist, unable to \ + deactivate it: {address}" + ); + exit(1); + }); + + match feature::from_account(&account) { + Some(feature) => { + if feature.activated_at.is_none() { + warn!("Feature gate is not yet activated: {address}"); + } + } + None => { + eprintln!("Error: Account is not a `Feature`: {address}"); + exit(1); } - bank = Arc::new(child_bank); } - if let Some(faucet_pubkey) = faucet_pubkey { - bank.store_account( - &faucet_pubkey, - &AccountSharedData::new(faucet_lamports, 0, &system_program::id()), - ); - } + account.set_lamports(0); + bank.store_account(&address, &account); + debug!("Feature gate deactivated: {address}"); + } - if remove_stake_accounts { - for (address, mut account) in bank - .get_program_accounts(&stake::program::id(), &ScanConfig::default()) - .unwrap() - .into_iter() - { - account.set_lamports(0); - bank.store_account(&address, &account); + if !vote_accounts_to_destake.is_empty() { + for (address, mut account) in bank + .get_program_accounts(&stake::program::id(), &ScanConfig::default()) + .unwrap() + .into_iter() + { + if let Ok(StakeStateV2::Stake(meta, stake, _)) = account.state() { + if vote_accounts_to_destake.contains(&stake.delegation.voter_pubkey) + { + if verbose_level > 0 { + warn!( + "Undelegating stake account {} from {}", + address, stake.delegation.voter_pubkey, + ); + } + account.set_state(&StakeStateV2::Initialized(meta)).unwrap(); + bank.store_account(&address, &account); + } } } + } + + if let Some(bootstrap_validator_pubkeys) = bootstrap_validator_pubkeys { + assert_eq!(bootstrap_validator_pubkeys.len() % 3, 0); - for address in accounts_to_remove { - let mut account = bank.get_account(&address).unwrap_or_else(|| { + // Ensure there are no duplicated pubkeys in the --bootstrap-validator list + { + let mut v = bootstrap_validator_pubkeys.clone(); + v.sort(); + v.dedup(); + if v.len() != bootstrap_validator_pubkeys.len() { eprintln!( - "Error: Account does not exist, unable to remove it: {address}" + "Error: --bootstrap-validator pubkeys cannot be duplicated" ); exit(1); - }); + } + } + // Delete existing vote accounts + for (address, mut account) in bank + .get_program_accounts( + &solana_vote_program::id(), + &ScanConfig::default(), + ) + .unwrap() + .into_iter() + { account.set_lamports(0); bank.store_account(&address, &account); - debug!("Account removed: {address}"); } - for address in feature_gates_to_deactivate { - let mut account = bank.get_account(&address).unwrap_or_else(|| { - eprintln!( - "Error: Feature-gate account does not exist, unable to deactivate it: {address}" - ); - exit(1); - }); + // Add a new identity/vote/stake account for each of the provided bootstrap + // validators + let mut bootstrap_validator_pubkeys_iter = + bootstrap_validator_pubkeys.iter(); + loop { + let Some(identity_pubkey) = bootstrap_validator_pubkeys_iter.next() + else { + break; + }; + let vote_pubkey = bootstrap_validator_pubkeys_iter.next().unwrap(); + let stake_pubkey = bootstrap_validator_pubkeys_iter.next().unwrap(); - match feature::from_account(&account) { - Some(feature) => { - if feature.activated_at.is_none() { - warn!("Feature gate is not yet activated: {address}"); - } - } - None => { - eprintln!("Error: Account is not a `Feature`: {address}"); - exit(1); - } - } + bank.store_account( + identity_pubkey, + &AccountSharedData::new( + bootstrap_validator_lamports, + 0, + &system_program::id(), + ), + ); - account.set_lamports(0); - bank.store_account(&address, &account); - debug!("Feature gate deactivated: {address}"); - } + let vote_account = vote_state::create_account_with_authorized( + identity_pubkey, + identity_pubkey, + identity_pubkey, + 100, + VoteState::get_rent_exempt_reserve(&rent).max(1), + ); - if !vote_accounts_to_destake.is_empty() { - for (address, mut account) in bank - .get_program_accounts(&stake::program::id(), &ScanConfig::default()) - .unwrap() - .into_iter() - { - if let Ok(StakeStateV2::Stake(meta, stake, _)) = account.state() { - if vote_accounts_to_destake - .contains(&stake.delegation.voter_pubkey) - { - if verbose_level > 0 { - warn!( - "Undelegating stake account {} from {}", - address, stake.delegation.voter_pubkey, - ); - } - account - .set_state(&StakeStateV2::Initialized(meta)) - .unwrap(); - bank.store_account(&address, &account); - } - } - } + bank.store_account( + stake_pubkey, + &stake_state::create_account( + bootstrap_stake_authorized_pubkey + .as_ref() + .unwrap_or(identity_pubkey), + vote_pubkey, + &vote_account, + &rent, + bootstrap_validator_stake_lamports, + ), + ); + bank.store_account(vote_pubkey, &vote_account); } - if let Some(bootstrap_validator_pubkeys) = bootstrap_validator_pubkeys { - assert_eq!(bootstrap_validator_pubkeys.len() % 3, 0); - - // Ensure there are no duplicated pubkeys in the --bootstrap-validator list - { - let mut v = bootstrap_validator_pubkeys.clone(); - v.sort(); - v.dedup(); - if v.len() != bootstrap_validator_pubkeys.len() { - eprintln!( - "Error: --bootstrap-validator pubkeys cannot be duplicated" - ); - exit(1); - } - } + // Warp ahead at least two epochs to ensure that the leader schedule will be + // updated to reflect the new bootstrap validator(s) + let minimum_warp_slot = + genesis_config.epoch_schedule.get_first_slot_in_epoch( + genesis_config.epoch_schedule.get_epoch(snapshot_slot) + 2, + ); - // Delete existing vote accounts - for (address, mut account) in bank - .get_program_accounts( - &solana_vote_program::id(), - &ScanConfig::default(), - ) - .unwrap() - .into_iter() - { - account.set_lamports(0); - bank.store_account(&address, &account); + if let Some(warp_slot) = warp_slot { + if warp_slot < minimum_warp_slot { + eprintln!( + "Error: --warp-slot too close. Must be >= \ + {minimum_warp_slot}" + ); + exit(1); } + } else { + warn!("Warping to slot {}", minimum_warp_slot); + warp_slot = Some(minimum_warp_slot); + } + } - // Add a new identity/vote/stake account for each of the provided bootstrap - // validators - let mut bootstrap_validator_pubkeys_iter = - bootstrap_validator_pubkeys.iter(); - loop { - let Some(identity_pubkey) = bootstrap_validator_pubkeys_iter.next() - else { - break; - }; - let vote_pubkey = bootstrap_validator_pubkeys_iter.next().unwrap(); - let stake_pubkey = bootstrap_validator_pubkeys_iter.next().unwrap(); - - bank.store_account( - identity_pubkey, - &AccountSharedData::new( - bootstrap_validator_lamports, - 0, - &system_program::id(), - ), - ); + if child_bank_required { + while !bank.is_complete() { + bank.register_unique_tick(); + } + } - let vote_account = vote_state::create_account_with_authorized( - identity_pubkey, - identity_pubkey, - identity_pubkey, - 100, - VoteState::get_rent_exempt_reserve(&rent).max(1), - ); + bank.set_capitalization(); + + let bank = if let Some(warp_slot) = warp_slot { + // need to flush the write cache in order to use Storages to calculate + // the accounts hash, and need to root `bank` before flushing the cache + bank.rc.accounts.accounts_db.add_root(bank.slot()); + bank.force_flush_accounts_cache(); + Arc::new(Bank::warp_from_parent( + bank.clone(), + bank.collector_id(), + warp_slot, + CalcAccountsHashDataSource::Storages, + )) + } else { + bank + }; - bank.store_account( - stake_pubkey, - &stake_state::create_account( - bootstrap_stake_authorized_pubkey - .as_ref() - .unwrap_or(identity_pubkey), - vote_pubkey, - &vote_account, - &rent, - bootstrap_validator_stake_lamports, - ), - ); - bank.store_account(vote_pubkey, &vote_account); - } + let minimize_snapshot_possibly_incomplete = if is_minimized { + minimize_bank_for_snapshot( + &blockstore, + &bank, + snapshot_slot, + ending_slot.unwrap(), + ) + } else { + false + }; - // Warp ahead at least two epochs to ensure that the leader schedule will be - // updated to reflect the new bootstrap validator(s) - let minimum_warp_slot = - genesis_config.epoch_schedule.get_first_slot_in_epoch( - genesis_config.epoch_schedule.get_epoch(snapshot_slot) + 2, - ); + println!( + "Creating a version {} {}snapshot of slot {}", + snapshot_version, + snapshot_type_str, + bank.slot(), + ); - if let Some(warp_slot) = warp_slot { - if warp_slot < minimum_warp_slot { - eprintln!( - "Error: --warp-slot too close. Must be >= {minimum_warp_slot}" - ); - exit(1); - } - } else { - warn!("Warping to slot {}", minimum_warp_slot); - warp_slot = Some(minimum_warp_slot); - } + if is_incremental { + if starting_snapshot_hashes.is_none() { + eprintln!( + "Unable to create incremental snapshot without a base full \ + snapshot" + ); + exit(1); } - - if child_bank_required { - while !bank.is_complete() { - bank.register_unique_tick(); - } + let full_snapshot_slot = starting_snapshot_hashes.unwrap().full.0 .0; + if bank.slot() <= full_snapshot_slot { + eprintln!( + "Unable to create incremental snapshot: Slot must be greater \ + than full snapshot slot. slot: {}, full snapshot slot: {}", + bank.slot(), + full_snapshot_slot, + ); + exit(1); } - bank.set_capitalization(); - - let bank = if let Some(warp_slot) = warp_slot { - // need to flush the write cache in order to use Storages to calculate - // the accounts hash, and need to root `bank` before flushing the cache - bank.rc.accounts.accounts_db.add_root(bank.slot()); - bank.force_flush_accounts_cache(); - Arc::new(Bank::warp_from_parent( - bank.clone(), - bank.collector_id(), - warp_slot, - CalcAccountsHashDataSource::Storages, - )) - } else { - bank - }; + let incremental_snapshot_archive_info = + snapshot_bank_utils::bank_to_incremental_snapshot_archive( + ledger_path, + &bank, + full_snapshot_slot, + Some(snapshot_version), + output_directory.clone(), + output_directory, + snapshot_archive_format, + maximum_full_snapshot_archives_to_retain, + maximum_incremental_snapshot_archives_to_retain, + ) + .unwrap_or_else(|err| { + eprintln!("Unable to create incremental snapshot: {err}"); + exit(1); + }); - let minimize_snapshot_possibly_incomplete = if is_minimized { - minimize_bank_for_snapshot( - &blockstore, + println!( + "Successfully created incremental snapshot for slot {}, hash {}, \ + base slot: {}: {}", + bank.slot(), + bank.hash(), + full_snapshot_slot, + incremental_snapshot_archive_info.path().display(), + ); + } else { + let full_snapshot_archive_info = + snapshot_bank_utils::bank_to_full_snapshot_archive( + ledger_path, &bank, - snapshot_slot, - ending_slot.unwrap(), + Some(snapshot_version), + output_directory.clone(), + output_directory, + snapshot_archive_format, + maximum_full_snapshot_archives_to_retain, + maximum_incremental_snapshot_archives_to_retain, ) - } else { - false - }; + .unwrap_or_else(|err| { + eprintln!("Unable to create snapshot: {err}"); + exit(1); + }); println!( - "Creating a version {} {}snapshot of slot {}", - snapshot_version, - snapshot_type_str, + "Successfully created snapshot for slot {}, hash {}: {}", bank.slot(), + bank.hash(), + full_snapshot_archive_info.path().display(), ); - if is_incremental { - if starting_snapshot_hashes.is_none() { - eprintln!("Unable to create incremental snapshot without a base full snapshot"); - exit(1); - } - let full_snapshot_slot = starting_snapshot_hashes.unwrap().full.0 .0; - if bank.slot() <= full_snapshot_slot { - eprintln!( - "Unable to create incremental snapshot: Slot must be greater than full snapshot slot. slot: {}, full snapshot slot: {}", - bank.slot(), - full_snapshot_slot, + if is_minimized { + let starting_epoch = bank.epoch_schedule().get_epoch(snapshot_slot); + let ending_epoch = + bank.epoch_schedule().get_epoch(ending_slot.unwrap()); + if starting_epoch != ending_epoch { + warn!( + "Minimized snapshot range crosses epoch boundary ({} to \ + {}). Bank hashes after {} will not match replays from a \ + full snapshot", + starting_epoch, + ending_epoch, + bank.epoch_schedule().get_last_slot_in_epoch(starting_epoch) ); - exit(1); } - let incremental_snapshot_archive_info = - snapshot_bank_utils::bank_to_incremental_snapshot_archive( - ledger_path, - &bank, - full_snapshot_slot, - Some(snapshot_version), - output_directory.clone(), - output_directory, - snapshot_archive_format, - maximum_full_snapshot_archives_to_retain, - maximum_incremental_snapshot_archives_to_retain, - ) - .unwrap_or_else(|err| { - eprintln!("Unable to create incremental snapshot: {err}"); - exit(1); - }); - - println!( - "Successfully created incremental snapshot for slot {}, hash {}, base slot: {}: {}", - bank.slot(), - bank.hash(), - full_snapshot_slot, - incremental_snapshot_archive_info.path().display(), - ); - } else { - let full_snapshot_archive_info = - snapshot_bank_utils::bank_to_full_snapshot_archive( - ledger_path, - &bank, - Some(snapshot_version), - output_directory.clone(), - output_directory, - snapshot_archive_format, - maximum_full_snapshot_archives_to_retain, - maximum_incremental_snapshot_archives_to_retain, - ) - .unwrap_or_else(|err| { - eprintln!("Unable to create snapshot: {err}"); - exit(1); - }); - - println!( - "Successfully created snapshot for slot {}, hash {}: {}", - bank.slot(), - bank.hash(), - full_snapshot_archive_info.path().display(), - ); - - if is_minimized { - let starting_epoch = bank.epoch_schedule().get_epoch(snapshot_slot); - let ending_epoch = - bank.epoch_schedule().get_epoch(ending_slot.unwrap()); - if starting_epoch != ending_epoch { - warn!("Minimized snapshot range crosses epoch boundary ({} to {}). Bank hashes after {} will not match replays from a full snapshot", - starting_epoch, ending_epoch, bank.epoch_schedule().get_last_slot_in_epoch(starting_epoch)); - } - - if minimize_snapshot_possibly_incomplete { - warn!("Minimized snapshot may be incomplete due to missing accounts from CPI'd address lookup table extensions. This may lead to mismatched bank hashes while replaying."); - } + if minimize_snapshot_possibly_incomplete { + warn!( + "Minimized snapshot may be incomplete due to missing \ + accounts from CPI'd address lookup table extensions. \ + This may lead to mismatched bank hashes while replaying." + ); } } - - println!( - "Shred version: {}", - compute_shred_version(&genesis_config.hash(), Some(&bank.hard_forks())) - ); - } - Err(err) => { - eprintln!("Failed to load ledger: {err:?}"); - exit(1); } + + println!( + "Shred version: {}", + compute_shred_version(&genesis_config.hash(), Some(&bank.hard_forks())) + ); } - } - ("accounts", Some(arg_matches)) => { - let halt_at_slot = value_t!(arg_matches, "halt_at_slot", Slot).ok(); - let process_options = ProcessOptions { - new_hard_forks: hardforks_of(arg_matches, "hard_forks"), - halt_at_slot, - run_verification: false, - accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), - use_snapshot_archives_at_startup: value_t_or_exit!( - arg_matches, - use_snapshot_archives_at_startup::cli::NAME, - UseSnapshotArchivesAtStartup - ), - ..ProcessOptions::default() - }; - let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); - let include_sysvars = arg_matches.is_present("include_sysvars"); - let blockstore = open_blockstore( - &ledger_path, - get_access_type(&process_options), - wal_recovery_mode, - force_update_to_open, - enforce_ulimit_nofile, - ); - let (bank_forks, ..) = load_and_process_ledger( - arg_matches, - &genesis_config, - Arc::new(blockstore), - process_options, - snapshot_archive_path, - incremental_snapshot_archive_path, - ) - .unwrap_or_else(|err| { - eprintln!("Failed to load ledger: {err:?}"); - exit(1); - }); - - let bank = bank_forks.read().unwrap().working_bank(); - let mut serializer = serde_json::Serializer::new(stdout()); - let (summarize, mut json_serializer) = - match OutputFormat::from_matches(arg_matches, "output_format", false) { - OutputFormat::Json | OutputFormat::JsonCompact => { - (false, Some(serializer.serialize_seq(None).unwrap())) - } - _ => (true, None), + ("accounts", Some(arg_matches)) => { + let halt_at_slot = value_t!(arg_matches, "halt_at_slot", Slot).ok(); + let process_options = ProcessOptions { + new_hard_forks: hardforks_of(arg_matches, "hard_forks"), + halt_at_slot, + run_verification: false, + accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), + use_snapshot_archives_at_startup: value_t_or_exit!( + arg_matches, + use_snapshot_archives_at_startup::cli::NAME, + UseSnapshotArchivesAtStartup + ), + ..ProcessOptions::default() }; - let mut total_accounts_stats = TotalAccountsStats::default(); - let rent_collector = bank.rent_collector(); - let print_account_contents = !arg_matches.is_present("no_account_contents"); - let print_account_data = !arg_matches.is_present("no_account_data"); - let data_encoding = parse_encoding_format(arg_matches); - let cli_account_new_config = CliAccountNewConfig { - data_encoding, - ..CliAccountNewConfig::default() - }; - let scan_func = |some_account_tuple: Option<(&Pubkey, AccountSharedData, Slot)>| { - if let Some((pubkey, account, slot)) = some_account_tuple - .filter(|(_, account, _)| Accounts::is_loadable(account.lamports())) - { - if !include_sysvars && solana_sdk::sysvar::is_sysvar_id(pubkey) { - return; - } + let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + let include_sysvars = arg_matches.is_present("include_sysvars"); + let blockstore = open_blockstore( + &ledger_path, + arg_matches, + get_access_type(&process_options), + ); + let (bank_forks, _) = load_and_process_ledger_or_exit( + arg_matches, + &genesis_config, + Arc::new(blockstore), + process_options, + snapshot_archive_path, + incremental_snapshot_archive_path, + ); - total_accounts_stats.accumulate_account(pubkey, &account, rent_collector); + let bank = bank_forks.read().unwrap().working_bank(); + let mut serializer = serde_json::Serializer::new(stdout()); + let (summarize, mut json_serializer) = + match OutputFormat::from_matches(arg_matches, "output_format", false) { + OutputFormat::Json | OutputFormat::JsonCompact => { + (false, Some(serializer.serialize_seq(None).unwrap())) + } + _ => (true, None), + }; + let mut total_accounts_stats = TotalAccountsStats::default(); + let rent_collector = bank.rent_collector(); + let print_account_contents = !arg_matches.is_present("no_account_contents"); + let print_account_data = !arg_matches.is_present("no_account_data"); + let data_encoding = parse_encoding_format(arg_matches); + let cli_account_new_config = CliAccountNewConfig { + data_encoding, + ..CliAccountNewConfig::default() + }; + let scan_func = + |some_account_tuple: Option<(&Pubkey, AccountSharedData, Slot)>| { + if let Some((pubkey, account, slot)) = some_account_tuple + .filter(|(_, account, _)| Accounts::is_loadable(account.lamports())) + { + if !include_sysvars && solana_sdk::sysvar::is_sysvar_id(pubkey) { + return; + } - if print_account_contents { - if let Some(json_serializer) = json_serializer.as_mut() { - let cli_account = CliAccount::new_with_config( - pubkey, - &account, - &cli_account_new_config, - ); - json_serializer.serialize_element(&cli_account).unwrap(); - } else { - output_account( + total_accounts_stats.accumulate_account( pubkey, &account, - Some(slot), - print_account_data, - data_encoding, + rent_collector, ); + + if print_account_contents { + if let Some(json_serializer) = json_serializer.as_mut() { + let cli_account = CliAccount::new_with_config( + pubkey, + &account, + &cli_account_new_config, + ); + json_serializer.serialize_element(&cli_account).unwrap(); + } else { + output_account( + pubkey, + &account, + Some(slot), + print_account_data, + data_encoding, + ); + } + } } - } + }; + let mut measure = Measure::start("scanning accounts"); + bank.scan_all_accounts(scan_func).unwrap(); + measure.stop(); + info!("{}", measure); + if let Some(json_serializer) = json_serializer { + json_serializer.end().unwrap(); + } + if summarize { + println!("\n{total_accounts_stats:#?}"); } - }; - let mut measure = Measure::start("scanning accounts"); - bank.scan_all_accounts(scan_func).unwrap(); - measure.stop(); - info!("{}", measure); - if let Some(json_serializer) = json_serializer { - json_serializer.end().unwrap(); - } - if summarize { - println!("\n{total_accounts_stats:#?}"); } - } - ("capitalization", Some(arg_matches)) => { - let halt_at_slot = value_t!(arg_matches, "halt_at_slot", Slot).ok(); - let process_options = ProcessOptions { - new_hard_forks: hardforks_of(arg_matches, "hard_forks"), - halt_at_slot, - run_verification: false, - accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), - use_snapshot_archives_at_startup: value_t_or_exit!( + ("capitalization", Some(arg_matches)) => { + let halt_at_slot = value_t!(arg_matches, "halt_at_slot", Slot).ok(); + let process_options = ProcessOptions { + new_hard_forks: hardforks_of(arg_matches, "hard_forks"), + halt_at_slot, + run_verification: false, + accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), + use_snapshot_archives_at_startup: value_t_or_exit!( + arg_matches, + use_snapshot_archives_at_startup::cli::NAME, + UseSnapshotArchivesAtStartup + ), + ..ProcessOptions::default() + }; + let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + let blockstore = open_blockstore( + &ledger_path, arg_matches, - use_snapshot_archives_at_startup::cli::NAME, - UseSnapshotArchivesAtStartup - ), - ..ProcessOptions::default() - }; - let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); - let blockstore = open_blockstore( - &ledger_path, - get_access_type(&process_options), - wal_recovery_mode, - force_update_to_open, - enforce_ulimit_nofile, - ); - match load_and_process_ledger( - arg_matches, - &genesis_config, - Arc::new(blockstore), - process_options, - snapshot_archive_path, - incremental_snapshot_archive_path, - ) { - Ok((bank_forks, ..)) => { - let bank_forks = bank_forks.read().unwrap(); - let slot = bank_forks.working_bank().slot(); - let bank = bank_forks.get(slot).unwrap_or_else(|| { - eprintln!("Error: Slot {slot} is not available"); - exit(1); - }); + get_access_type(&process_options), + ); + let (bank_forks, _) = load_and_process_ledger_or_exit( + arg_matches, + &genesis_config, + Arc::new(blockstore), + process_options, + snapshot_archive_path, + incremental_snapshot_archive_path, + ); + let bank_forks = bank_forks.read().unwrap(); + let slot = bank_forks.working_bank().slot(); + let bank = bank_forks.get(slot).unwrap_or_else(|| { + eprintln!("Error: Slot {slot} is not available"); + exit(1); + }); - if arg_matches.is_present("recalculate_capitalization") { - println!("Recalculating capitalization"); - let old_capitalization = bank.set_capitalization(); - if old_capitalization == bank.capitalization() { - eprintln!( - "Capitalization was identical: {}", - Sol(old_capitalization) - ); - } + if arg_matches.is_present("recalculate_capitalization") { + println!("Recalculating capitalization"); + let old_capitalization = bank.set_capitalization(); + if old_capitalization == bank.capitalization() { + eprintln!("Capitalization was identical: {}", Sol(old_capitalization)); } + } - if arg_matches.is_present("warp_epoch") { - let base_bank = bank; + if arg_matches.is_present("warp_epoch") { + let base_bank = bank; - let raw_warp_epoch = - value_t!(arg_matches, "warp_epoch", String).unwrap(); - let warp_epoch = if raw_warp_epoch.starts_with('+') { - base_bank.epoch() - + value_t!(arg_matches, "warp_epoch", Epoch).unwrap() - } else { - value_t!(arg_matches, "warp_epoch", Epoch).unwrap() + let raw_warp_epoch = value_t!(arg_matches, "warp_epoch", String).unwrap(); + let warp_epoch = if raw_warp_epoch.starts_with('+') { + base_bank.epoch() + value_t!(arg_matches, "warp_epoch", Epoch).unwrap() + } else { + value_t!(arg_matches, "warp_epoch", Epoch).unwrap() + }; + if warp_epoch < base_bank.epoch() { + eprintln!( + "Error: can't warp epoch backwards: {} => {}", + base_bank.epoch(), + warp_epoch + ); + exit(1); + } + + if let Ok(raw_inflation) = value_t!(arg_matches, "inflation", String) { + let inflation = match raw_inflation.as_str() { + "pico" => Inflation::pico(), + "full" => Inflation::full(), + "none" => Inflation::new_disabled(), + _ => unreachable!(), }; - if warp_epoch < base_bank.epoch() { - eprintln!( - "Error: can't warp epoch backwards: {} => {}", - base_bank.epoch(), - warp_epoch - ); - exit(1); - } + println!( + "Forcing to: {:?} (was: {:?})", + inflation, + base_bank.inflation() + ); + base_bank.set_inflation(inflation); + } - if let Ok(raw_inflation) = value_t!(arg_matches, "inflation", String) { - let inflation = match raw_inflation.as_str() { - "pico" => Inflation::pico(), - "full" => Inflation::full(), - "none" => Inflation::new_disabled(), - _ => unreachable!(), - }; - println!( - "Forcing to: {:?} (was: {:?})", - inflation, - base_bank.inflation() + let next_epoch = base_bank + .epoch_schedule() + .get_first_slot_in_epoch(warp_epoch); + // disable eager rent collection because this creates many unrelated + // rent collection account updates + base_bank + .lazy_rent_collection + .store(true, std::sync::atomic::Ordering::Relaxed); + + let feature_account_balance = std::cmp::max( + genesis_config.rent.minimum_balance(Feature::size_of()), + 1, + ); + if arg_matches.is_present("enable_credits_auto_rewind") { + base_bank.unfreeze_for_ledger_tool(); + let mut force_enabled_count = 0; + if base_bank + .get_account(&feature_set::credits_auto_rewind::id()) + .is_none() + { + base_bank.store_account( + &feature_set::credits_auto_rewind::id(), + &feature::create_account( + &Feature { activated_at: None }, + feature_account_balance, + ), ); - base_bank.set_inflation(inflation); + force_enabled_count += 1; } - - let next_epoch = base_bank - .epoch_schedule() - .get_first_slot_in_epoch(warp_epoch); - // disable eager rent collection because this creates many unrelated - // rent collection account updates - base_bank - .lazy_rent_collection - .store(true, std::sync::atomic::Ordering::Relaxed); - - let feature_account_balance = std::cmp::max( - genesis_config.rent.minimum_balance(Feature::size_of()), - 1, - ); - if arg_matches.is_present("enable_credits_auto_rewind") { - base_bank.unfreeze_for_ledger_tool(); - let mut force_enabled_count = 0; + if force_enabled_count == 0 { + warn!("Already credits_auto_rewind is activated (or scheduled)"); + } + let mut store_failed_count = 0; + if force_enabled_count >= 1 { if base_bank - .get_account(&feature_set::credits_auto_rewind::id()) - .is_none() + .get_account(&feature_set::deprecate_rewards_sysvar::id()) + .is_some() { + // steal some lamports from the pretty old feature not to affect + // capitalizaion, which doesn't affect inflation behavior! base_bank.store_account( - &feature_set::credits_auto_rewind::id(), - &feature::create_account( - &Feature { activated_at: None }, - feature_account_balance, - ), + &feature_set::deprecate_rewards_sysvar::id(), + &AccountSharedData::default(), ); - force_enabled_count += 1; + force_enabled_count -= 1; + } else { + store_failed_count += 1; } - if force_enabled_count == 0 { - warn!( - "Already credits_auto_rewind is activated (or scheduled)" - ); + } + assert_eq!(force_enabled_count, store_failed_count); + if store_failed_count >= 1 { + // we have no choice; maybe locally created blank cluster with + // not-Development cluster type. + let old_cap = base_bank.set_capitalization(); + let new_cap = base_bank.capitalization(); + warn!( + "Skewing capitalization a bit to enable \ + credits_auto_rewind as requested: increasing {} from {} \ + to {}", + feature_account_balance, old_cap, new_cap, + ); + assert_eq!( + old_cap + feature_account_balance * store_failed_count, + new_cap + ); + } + } + + #[derive(Default, Debug)] + struct PointDetail { + epoch: Epoch, + points: u128, + stake: u128, + credits: u128, + } + + #[derive(Default, Debug)] + struct CalculationDetail { + epochs: usize, + voter: Pubkey, + voter_owner: Pubkey, + current_effective_stake: u64, + total_stake: u64, + rent_exempt_reserve: u64, + points: Vec, + base_rewards: u64, + commission: u8, + vote_rewards: u64, + stake_rewards: u64, + activation_epoch: Epoch, + deactivation_epoch: Option, + point_value: Option, + old_credits_observed: Option, + new_credits_observed: Option, + skipped_reasons: String, + } + use solana_stake_program::stake_state::InflationPointCalculationEvent; + let stake_calculation_details: DashMap = + DashMap::new(); + let last_point_value = Arc::new(RwLock::new(None)); + let tracer = |event: &RewardCalculationEvent| { + // Currently RewardCalculationEvent enum has only Staking variant + // because only staking tracing is supported! + #[allow(irrefutable_let_patterns)] + if let RewardCalculationEvent::Staking(pubkey, event) = event { + let mut detail = + stake_calculation_details.entry(**pubkey).or_default(); + match event { + InflationPointCalculationEvent::CalculatedPoints( + epoch, + stake, + credits, + points, + ) => { + if *points > 0 { + detail.epochs += 1; + detail.points.push(PointDetail { + epoch: *epoch, + points: *points, + stake: *stake, + credits: *credits, + }); + } } - let mut store_failed_count = 0; - if force_enabled_count >= 1 { - if base_bank - .get_account(&feature_set::deprecate_rewards_sysvar::id()) - .is_some() - { - // steal some lamports from the pretty old feature not to affect - // capitalizaion, which doesn't affect inflation behavior! - base_bank.store_account( - &feature_set::deprecate_rewards_sysvar::id(), - &AccountSharedData::default(), - ); - force_enabled_count -= 1; + InflationPointCalculationEvent::SplitRewards( + all, + voter, + staker, + point_value, + ) => { + detail.base_rewards = *all; + detail.vote_rewards = *voter; + detail.stake_rewards = *staker; + detail.point_value = Some(point_value.clone()); + // we have duplicate copies of `PointValue`s for possible + // miscalculation; do some minimum sanity check + let mut last_point_value = last_point_value.write().unwrap(); + if let Some(last_point_value) = last_point_value.as_ref() { + assert_eq!(last_point_value, point_value); } else { - store_failed_count += 1; + *last_point_value = Some(point_value.clone()); } } - assert_eq!(force_enabled_count, store_failed_count); - if store_failed_count >= 1 { - // we have no choice; maybe locally created blank cluster with - // not-Development cluster type. - let old_cap = base_bank.set_capitalization(); - let new_cap = base_bank.capitalization(); - warn!( - "Skewing capitalization a bit to enable credits_auto_rewind as \ - requested: increasing {} from {} to {}", - feature_account_balance, old_cap, new_cap, - ); - assert_eq!( - old_cap + feature_account_balance * store_failed_count, - new_cap - ); + InflationPointCalculationEvent::EffectiveStakeAtRewardedEpoch( + stake, + ) => { + detail.current_effective_stake = *stake; + } + InflationPointCalculationEvent::Commission(commission) => { + detail.commission = *commission; + } + InflationPointCalculationEvent::RentExemptReserve(reserve) => { + detail.rent_exempt_reserve = *reserve; + } + InflationPointCalculationEvent::CreditsObserved( + old_credits_observed, + new_credits_observed, + ) => { + detail.old_credits_observed = Some(*old_credits_observed); + detail.new_credits_observed = *new_credits_observed; + } + InflationPointCalculationEvent::Delegation(delegation, owner) => { + detail.voter = delegation.voter_pubkey; + detail.voter_owner = *owner; + detail.total_stake = delegation.stake; + detail.activation_epoch = delegation.activation_epoch; + if delegation.deactivation_epoch < Epoch::max_value() { + detail.deactivation_epoch = + Some(delegation.deactivation_epoch); + } + } + InflationPointCalculationEvent::Skipped(skipped_reason) => { + if detail.skipped_reasons.is_empty() { + detail.skipped_reasons = format!("{skipped_reason:?}"); + } else { + use std::fmt::Write; + let _ = write!( + &mut detail.skipped_reasons, + "/{skipped_reason:?}" + ); + } } } - - #[derive(Default, Debug)] - struct PointDetail { - epoch: Epoch, - points: u128, - stake: u128, - credits: u128, } + }; + let warped_bank = Bank::new_from_parent_with_tracer( + base_bank.clone(), + base_bank.collector_id(), + next_epoch, + tracer, + ); + warped_bank.freeze(); + let mut csv_writer = if arg_matches.is_present("csv_filename") { + let csv_filename = + value_t_or_exit!(arg_matches, "csv_filename", String); + let file = File::create(csv_filename).unwrap(); + Some(csv::WriterBuilder::new().from_writer(file)) + } else { + None + }; + + println!("Slot: {} => {}", base_bank.slot(), warped_bank.slot()); + println!("Epoch: {} => {}", base_bank.epoch(), warped_bank.epoch()); + assert_capitalization(&base_bank); + assert_capitalization(&warped_bank); + let interest_per_epoch = ((warped_bank.capitalization() as f64) + / (base_bank.capitalization() as f64) + * 100_f64) + - 100_f64; + let interest_per_year = interest_per_epoch + / warped_bank.epoch_duration_in_years(base_bank.epoch()); + println!( + "Capitalization: {} => {} (+{} {}%; annualized {}%)", + Sol(base_bank.capitalization()), + Sol(warped_bank.capitalization()), + Sol(warped_bank.capitalization() - base_bank.capitalization()), + interest_per_epoch, + interest_per_year, + ); + + let mut overall_delta = 0; + + let modified_accounts = + warped_bank.get_all_accounts_modified_since_parent(); + let mut rewarded_accounts = modified_accounts + .iter() + .map(|(pubkey, account)| { + ( + pubkey, + account, + base_bank + .get_account(pubkey) + .map(|a| a.lamports()) + .unwrap_or_default(), + ) + }) + .collect::>(); + rewarded_accounts.sort_unstable_by_key( + |(pubkey, account, base_lamports)| { + ( + *account.owner(), + *base_lamports, + account.lamports() - base_lamports, + *pubkey, + ) + }, + ); - #[derive(Default, Debug)] - struct CalculationDetail { - epochs: usize, - voter: Pubkey, - voter_owner: Pubkey, - current_effective_stake: u64, - total_stake: u64, - rent_exempt_reserve: u64, - points: Vec, - base_rewards: u64, - commission: u8, - vote_rewards: u64, - stake_rewards: u64, - activation_epoch: Epoch, - deactivation_epoch: Option, - point_value: Option, - old_credits_observed: Option, - new_credits_observed: Option, - skipped_reasons: String, + let mut unchanged_accounts = stake_calculation_details + .iter() + .map(|entry| *entry.key()) + .collect::>() + .difference( + &rewarded_accounts + .iter() + .map(|(pubkey, ..)| **pubkey) + .collect(), + ) + .map(|pubkey| (*pubkey, warped_bank.get_account(pubkey).unwrap())) + .collect::>(); + unchanged_accounts.sort_unstable_by_key(|(pubkey, account)| { + (*account.owner(), account.lamports(), *pubkey) + }); + let unchanged_accounts = unchanged_accounts.into_iter(); + + let rewarded_accounts = rewarded_accounts + .into_iter() + .map(|(pubkey, account, ..)| (*pubkey, account.clone())); + + let all_accounts = unchanged_accounts.chain(rewarded_accounts); + for (pubkey, warped_account) in all_accounts { + // Don't output sysvars; it's always updated but not related to + // inflation. + if solana_sdk::sysvar::is_sysvar_id(&pubkey) { + continue; } - use solana_stake_program::stake_state::InflationPointCalculationEvent; - let stake_calculation_details: DashMap = - DashMap::new(); - let last_point_value = Arc::new(RwLock::new(None)); - let tracer = |event: &RewardCalculationEvent| { - // Currently RewardCalculationEvent enum has only Staking variant - // because only staking tracing is supported! - #[allow(irrefutable_let_patterns)] - if let RewardCalculationEvent::Staking(pubkey, event) = event { - let mut detail = - stake_calculation_details.entry(**pubkey).or_default(); - match event { - InflationPointCalculationEvent::CalculatedPoints( - epoch, - stake, - credits, - points, - ) => { - if *points > 0 { - detail.epochs += 1; - detail.points.push(PointDetail {epoch: *epoch, points: *points, stake: *stake, credits: *credits}); - } - } - InflationPointCalculationEvent::SplitRewards( - all, - voter, - staker, - point_value, - ) => { - detail.base_rewards = *all; - detail.vote_rewards = *voter; - detail.stake_rewards = *staker; - detail.point_value = Some(point_value.clone()); - // we have duplicate copies of `PointValue`s for possible - // miscalculation; do some minimum sanity check - let mut last_point_value = last_point_value.write().unwrap(); - if let Some(last_point_value) = last_point_value.as_ref() { - assert_eq!(last_point_value, point_value); - } else { - *last_point_value = Some(point_value.clone()); - } - } - InflationPointCalculationEvent::EffectiveStakeAtRewardedEpoch(stake) => { - detail.current_effective_stake = *stake; - } - InflationPointCalculationEvent::Commission(commission) => { - detail.commission = *commission; - } - InflationPointCalculationEvent::RentExemptReserve(reserve) => { - detail.rent_exempt_reserve = *reserve; + + if let Some(base_account) = base_bank.get_account(&pubkey) { + let delta = warped_account.lamports() - base_account.lamports(); + let detail_ref = stake_calculation_details.get(&pubkey); + let detail: Option<&CalculationDetail> = + detail_ref.as_ref().map(|detail_ref| detail_ref.value()); + println!( + "{:<45}({}): {} => {} (+{} {:>4.9}%) {:?}", + format!("{pubkey}"), // format! is needed to pad/justify correctly. + base_account.owner(), + Sol(base_account.lamports()), + Sol(warped_account.lamports()), + Sol(delta), + ((warped_account.lamports() as f64) + / (base_account.lamports() as f64) + * 100_f64) + - 100_f64, + detail, + ); + if let Some(ref mut csv_writer) = csv_writer { + #[derive(Serialize)] + struct InflationRecord { + cluster_type: String, + rewarded_epoch: Epoch, + account: String, + owner: String, + old_balance: u64, + new_balance: u64, + data_size: usize, + delegation: String, + delegation_owner: String, + effective_stake: String, + delegated_stake: String, + rent_exempt_reserve: String, + activation_epoch: String, + deactivation_epoch: String, + earned_epochs: String, + epoch: String, + epoch_credits: String, + epoch_points: String, + epoch_stake: String, + old_credits_observed: String, + new_credits_observed: String, + base_rewards: String, + stake_rewards: String, + vote_rewards: String, + commission: String, + cluster_rewards: String, + cluster_points: String, + old_capitalization: u64, + new_capitalization: u64, } - InflationPointCalculationEvent::CreditsObserved( - old_credits_observed, - new_credits_observed, - ) => { - detail.old_credits_observed = Some(*old_credits_observed); - detail.new_credits_observed = *new_credits_observed; + fn format_or_na( + data: Option, + ) -> String { + data.map(|data| format!("{data}")) + .unwrap_or_else(|| "N/A".to_owned()) } - InflationPointCalculationEvent::Delegation( - delegation, - owner, - ) => { - detail.voter = delegation.voter_pubkey; - detail.voter_owner = *owner; - detail.total_stake = delegation.stake; - detail.activation_epoch = delegation.activation_epoch; - if delegation.deactivation_epoch < Epoch::max_value() { - detail.deactivation_epoch = - Some(delegation.deactivation_epoch); - } + let mut point_details = detail + .map(|d| d.points.iter().map(Some).collect::>()) + .unwrap_or_default(); + + // ensure to print even if there is no calculation/point detail + if point_details.is_empty() { + point_details.push(None); } - InflationPointCalculationEvent::Skipped(skipped_reason) => { - if detail.skipped_reasons.is_empty() { - detail.skipped_reasons = format!("{skipped_reason:?}"); - } else { - use std::fmt::Write; - let _ = write!(&mut detail.skipped_reasons, "/{skipped_reason:?}"); - } + + for point_detail in point_details { + let (cluster_rewards, cluster_points) = last_point_value + .read() + .unwrap() + .clone() + .map_or((None, None), |pv| { + (Some(pv.rewards), Some(pv.points)) + }); + let record = InflationRecord { + cluster_type: format!("{:?}", base_bank.cluster_type()), + rewarded_epoch: base_bank.epoch(), + account: format!("{pubkey}"), + owner: format!("{}", base_account.owner()), + old_balance: base_account.lamports(), + new_balance: warped_account.lamports(), + data_size: base_account.data().len(), + delegation: format_or_na(detail.map(|d| d.voter)), + delegation_owner: format_or_na( + detail.map(|d| d.voter_owner), + ), + effective_stake: format_or_na( + detail.map(|d| d.current_effective_stake), + ), + delegated_stake: format_or_na( + detail.map(|d| d.total_stake), + ), + rent_exempt_reserve: format_or_na( + detail.map(|d| d.rent_exempt_reserve), + ), + activation_epoch: format_or_na(detail.map(|d| { + if d.activation_epoch < Epoch::max_value() { + d.activation_epoch + } else { + // bootstraped + 0 + } + })), + deactivation_epoch: format_or_na( + detail.and_then(|d| d.deactivation_epoch), + ), + earned_epochs: format_or_na(detail.map(|d| d.epochs)), + epoch: format_or_na(point_detail.map(|d| d.epoch)), + epoch_credits: format_or_na( + point_detail.map(|d| d.credits), + ), + epoch_points: format_or_na( + point_detail.map(|d| d.points), + ), + epoch_stake: format_or_na( + point_detail.map(|d| d.stake), + ), + old_credits_observed: format_or_na( + detail.and_then(|d| d.old_credits_observed), + ), + new_credits_observed: format_or_na( + detail.and_then(|d| d.new_credits_observed), + ), + base_rewards: format_or_na( + detail.map(|d| d.base_rewards), + ), + stake_rewards: format_or_na( + detail.map(|d| d.stake_rewards), + ), + vote_rewards: format_or_na( + detail.map(|d| d.vote_rewards), + ), + commission: format_or_na(detail.map(|d| d.commission)), + cluster_rewards: format_or_na(cluster_rewards), + cluster_points: format_or_na(cluster_points), + old_capitalization: base_bank.capitalization(), + new_capitalization: warped_bank.capitalization(), + }; + csv_writer.serialize(&record).unwrap(); } } - } - }; - let warped_bank = Bank::new_from_parent_with_tracer( - base_bank.clone(), - base_bank.collector_id(), - next_epoch, - tracer, - ); - warped_bank.freeze(); - let mut csv_writer = if arg_matches.is_present("csv_filename") { - let csv_filename = - value_t_or_exit!(arg_matches, "csv_filename", String); - let file = File::create(csv_filename).unwrap(); - Some(csv::WriterBuilder::new().from_writer(file)) + overall_delta += delta; } else { - None - }; - - println!("Slot: {} => {}", base_bank.slot(), warped_bank.slot()); - println!("Epoch: {} => {}", base_bank.epoch(), warped_bank.epoch()); - assert_capitalization(&base_bank); - assert_capitalization(&warped_bank); - let interest_per_epoch = ((warped_bank.capitalization() as f64) - / (base_bank.capitalization() as f64) - * 100_f64) - - 100_f64; - let interest_per_year = interest_per_epoch - / warped_bank.epoch_duration_in_years(base_bank.epoch()); - println!( - "Capitalization: {} => {} (+{} {}%; annualized {}%)", - Sol(base_bank.capitalization()), - Sol(warped_bank.capitalization()), - Sol(warped_bank.capitalization() - base_bank.capitalization()), - interest_per_epoch, - interest_per_year, - ); - - let mut overall_delta = 0; - - let modified_accounts = - warped_bank.get_all_accounts_modified_since_parent(); - let mut rewarded_accounts = modified_accounts - .iter() - .map(|(pubkey, account)| { - ( - pubkey, - account, - base_bank - .get_account(pubkey) - .map(|a| a.lamports()) - .unwrap_or_default(), - ) - }) - .collect::>(); - rewarded_accounts.sort_unstable_by_key( - |(pubkey, account, base_lamports)| { - ( - *account.owner(), - *base_lamports, - account.lamports() - base_lamports, - *pubkey, - ) - }, + error!("new account!?: {}", pubkey); + } + } + if overall_delta > 0 { + println!("Sum of lamports changes: {}", Sol(overall_delta)); + } + } else { + if arg_matches.is_present("recalculate_capitalization") { + eprintln!("Capitalization isn't verified because it's recalculated"); + } + if arg_matches.is_present("inflation") { + eprintln!( + "Forcing inflation isn't meaningful because bank isn't warping" ); + } - let mut unchanged_accounts = stake_calculation_details - .iter() - .map(|entry| *entry.key()) - .collect::>() - .difference( - &rewarded_accounts - .iter() - .map(|(pubkey, ..)| **pubkey) - .collect(), - ) - .map(|pubkey| (*pubkey, warped_bank.get_account(pubkey).unwrap())) - .collect::>(); - unchanged_accounts.sort_unstable_by_key(|(pubkey, account)| { - (*account.owner(), account.lamports(), *pubkey) - }); - let unchanged_accounts = unchanged_accounts.into_iter(); - - let rewarded_accounts = rewarded_accounts - .into_iter() - .map(|(pubkey, account, ..)| (*pubkey, account.clone())); - - let all_accounts = unchanged_accounts.chain(rewarded_accounts); - for (pubkey, warped_account) in all_accounts { - // Don't output sysvars; it's always updated but not related to - // inflation. - if solana_sdk::sysvar::is_sysvar_id(&pubkey) { - continue; - } - - if let Some(base_account) = base_bank.get_account(&pubkey) { - let delta = warped_account.lamports() - base_account.lamports(); - let detail_ref = stake_calculation_details.get(&pubkey); - let detail: Option<&CalculationDetail> = - detail_ref.as_ref().map(|detail_ref| detail_ref.value()); - println!( - "{:<45}({}): {} => {} (+{} {:>4.9}%) {:?}", - format!("{pubkey}"), // format! is needed to pad/justify correctly. - base_account.owner(), - Sol(base_account.lamports()), - Sol(warped_account.lamports()), - Sol(delta), - ((warped_account.lamports() as f64) - / (base_account.lamports() as f64) - * 100_f64) - - 100_f64, - detail, - ); - if let Some(ref mut csv_writer) = csv_writer { - #[derive(Serialize)] - struct InflationRecord { - cluster_type: String, - rewarded_epoch: Epoch, - account: String, - owner: String, - old_balance: u64, - new_balance: u64, - data_size: usize, - delegation: String, - delegation_owner: String, - effective_stake: String, - delegated_stake: String, - rent_exempt_reserve: String, - activation_epoch: String, - deactivation_epoch: String, - earned_epochs: String, - epoch: String, - epoch_credits: String, - epoch_points: String, - epoch_stake: String, - old_credits_observed: String, - new_credits_observed: String, - base_rewards: String, - stake_rewards: String, - vote_rewards: String, - commission: String, - cluster_rewards: String, - cluster_points: String, - old_capitalization: u64, - new_capitalization: u64, - } - fn format_or_na( - data: Option, - ) -> String { - data.map(|data| format!("{data}")) - .unwrap_or_else(|| "N/A".to_owned()) - } - let mut point_details = detail - .map(|d| d.points.iter().map(Some).collect::>()) - .unwrap_or_default(); + assert_capitalization(&bank); + println!("Inflation: {:?}", bank.inflation()); + println!("RentCollector: {:?}", bank.rent_collector()); + println!("Capitalization: {}", Sol(bank.capitalization())); + } + } + ("purge", Some(arg_matches)) => { + let start_slot = value_t_or_exit!(arg_matches, "start_slot", Slot); + let end_slot = value_t!(arg_matches, "end_slot", Slot).ok(); + let perform_compaction = arg_matches.is_present("enable_compaction"); + if arg_matches.is_present("no_compaction") { + warn!("--no-compaction is deprecated and is now the default behavior."); + } + let dead_slots_only = arg_matches.is_present("dead_slots_only"); + let batch_size = value_t_or_exit!(arg_matches, "batch_size", usize); - // ensure to print even if there is no calculation/point detail - if point_details.is_empty() { - point_details.push(None); - } + let blockstore = open_blockstore( + &ledger_path, + arg_matches, + AccessType::PrimaryForMaintenance, + ); - for point_detail in point_details { - let (cluster_rewards, cluster_points) = - last_point_value - .read() - .unwrap() - .clone() - .map_or((None, None), |pv| { - (Some(pv.rewards), Some(pv.points)) - }); - let record = InflationRecord { - cluster_type: format!( - "{:?}", - base_bank.cluster_type() - ), - rewarded_epoch: base_bank.epoch(), - account: format!("{pubkey}"), - owner: format!("{}", base_account.owner()), - old_balance: base_account.lamports(), - new_balance: warped_account.lamports(), - data_size: base_account.data().len(), - delegation: format_or_na(detail.map(|d| d.voter)), - delegation_owner: format_or_na( - detail.map(|d| d.voter_owner), - ), - effective_stake: format_or_na( - detail.map(|d| d.current_effective_stake), - ), - delegated_stake: format_or_na( - detail.map(|d| d.total_stake), - ), - rent_exempt_reserve: format_or_na( - detail.map(|d| d.rent_exempt_reserve), - ), - activation_epoch: format_or_na(detail.map(|d| { - if d.activation_epoch < Epoch::max_value() { - d.activation_epoch - } else { - // bootstraped - 0 - } - })), - deactivation_epoch: format_or_na( - detail.and_then(|d| d.deactivation_epoch), - ), - earned_epochs: format_or_na( - detail.map(|d| d.epochs), - ), - epoch: format_or_na(point_detail.map(|d| d.epoch)), - epoch_credits: format_or_na( - point_detail.map(|d| d.credits), - ), - epoch_points: format_or_na( - point_detail.map(|d| d.points), - ), - epoch_stake: format_or_na( - point_detail.map(|d| d.stake), - ), - old_credits_observed: format_or_na( - detail.and_then(|d| d.old_credits_observed), - ), - new_credits_observed: format_or_na( - detail.and_then(|d| d.new_credits_observed), - ), - base_rewards: format_or_na( - detail.map(|d| d.base_rewards), - ), - stake_rewards: format_or_na( - detail.map(|d| d.stake_rewards), - ), - vote_rewards: format_or_na( - detail.map(|d| d.vote_rewards), - ), - commission: format_or_na( - detail.map(|d| d.commission), - ), - cluster_rewards: format_or_na(cluster_rewards), - cluster_points: format_or_na(cluster_points), - old_capitalization: base_bank.capitalization(), - new_capitalization: warped_bank.capitalization(), - }; - csv_writer.serialize(&record).unwrap(); - } - } - overall_delta += delta; - } else { - error!("new account!?: {}", pubkey); + let end_slot = match end_slot { + Some(end_slot) => end_slot, + None => match blockstore.slot_meta_iterator(start_slot) { + Ok(metas) => { + let slots: Vec<_> = metas.map(|(slot, _)| slot).collect(); + if slots.is_empty() { + eprintln!("Purge range is empty"); + exit(1); } + *slots.last().unwrap() } - if overall_delta > 0 { - println!("Sum of lamports changes: {}", Sol(overall_delta)); - } - } else { - if arg_matches.is_present("recalculate_capitalization") { - eprintln!( - "Capitalization isn't verified because it's recalculated" - ); - } - if arg_matches.is_present("inflation") { - eprintln!( - "Forcing inflation isn't meaningful because bank isn't warping" - ); + Err(err) => { + eprintln!("Unable to read the Ledger: {err:?}"); + exit(1); } + }, + }; - assert_capitalization(&bank); - println!("Inflation: {:?}", bank.inflation()); - println!("RentCollector: {:?}", bank.rent_collector()); - println!("Capitalization: {}", Sol(bank.capitalization())); - } - } - Err(err) => { - eprintln!("Failed to load ledger: {err:?}"); + if end_slot < start_slot { + eprintln!("end slot {end_slot} is less than start slot {start_slot}"); exit(1); } - } - } - ("purge", Some(arg_matches)) => { - let start_slot = value_t_or_exit!(arg_matches, "start_slot", Slot); - let end_slot = value_t!(arg_matches, "end_slot", Slot).ok(); - let perform_compaction = arg_matches.is_present("enable_compaction"); - if arg_matches.is_present("no_compaction") { - warn!("--no-compaction is deprecated and is now the default behavior."); - } - let dead_slots_only = arg_matches.is_present("dead_slots_only"); - let batch_size = value_t_or_exit!(arg_matches, "batch_size", usize); - - let blockstore = open_blockstore( - &ledger_path, - AccessType::PrimaryForMaintenance, - wal_recovery_mode, - force_update_to_open, - enforce_ulimit_nofile, + info!( + "Purging data from slots {} to {} ({} slots) (do compaction: {}) (dead slot \ + only: {})", + start_slot, + end_slot, + end_slot - start_slot, + perform_compaction, + dead_slots_only, ); - - let end_slot = match end_slot { - Some(end_slot) => end_slot, - None => match blockstore.slot_meta_iterator(start_slot) { - Ok(metas) => { - let slots: Vec<_> = metas.map(|(slot, _)| slot).collect(); - if slots.is_empty() { - eprintln!("Purge range is empty"); - exit(1); - } - *slots.last().unwrap() + let purge_from_blockstore = |start_slot, end_slot| { + blockstore.purge_from_next_slots(start_slot, end_slot); + if perform_compaction { + blockstore.purge_and_compact_slots(start_slot, end_slot); + } else { + blockstore.purge_slots(start_slot, end_slot, PurgeType::Exact); } - Err(err) => { - eprintln!("Unable to read the Ledger: {err:?}"); - exit(1); + }; + if !dead_slots_only { + let slots_iter = &(start_slot..=end_slot).chunks(batch_size); + for slots in slots_iter { + let slots = slots.collect::>(); + assert!(!slots.is_empty()); + + let start_slot = *slots.first().unwrap(); + let end_slot = *slots.last().unwrap(); + info!( + "Purging chunked slots from {} to {} ({} slots)", + start_slot, + end_slot, + end_slot - start_slot + ); + purge_from_blockstore(start_slot, end_slot); } - }, - }; - - if end_slot < start_slot { - eprintln!("end slot {end_slot} is less than start slot {start_slot}"); - exit(1); - } - info!( - "Purging data from slots {} to {} ({} slots) (do compaction: {}) (dead slot only: {})", - start_slot, - end_slot, - end_slot - start_slot, - perform_compaction, - dead_slots_only, - ); - let purge_from_blockstore = |start_slot, end_slot| { - blockstore.purge_from_next_slots(start_slot, end_slot); - if perform_compaction { - blockstore.purge_and_compact_slots(start_slot, end_slot); } else { - blockstore.purge_slots(start_slot, end_slot, PurgeType::Exact); - } - }; - if !dead_slots_only { - let slots_iter = &(start_slot..=end_slot).chunks(batch_size); - for slots in slots_iter { - let slots = slots.collect::>(); - assert!(!slots.is_empty()); - - let start_slot = *slots.first().unwrap(); - let end_slot = *slots.last().unwrap(); - info!( - "Purging chunked slots from {} to {} ({} slots)", - start_slot, - end_slot, - end_slot - start_slot - ); - purge_from_blockstore(start_slot, end_slot); - } - } else { - let dead_slots_iter = blockstore - .dead_slots_iterator(start_slot) - .unwrap() - .take_while(|s| *s <= end_slot); - for dead_slot in dead_slots_iter { - info!("Purging dead slot {}", dead_slot); - purge_from_blockstore(dead_slot, dead_slot); + let dead_slots_iter = blockstore + .dead_slots_iterator(start_slot) + .unwrap() + .take_while(|s| *s <= end_slot); + for dead_slot in dead_slots_iter { + info!("Purging dead slot {}", dead_slot); + purge_from_blockstore(dead_slot, dead_slot); + } } } - } - ("list-roots", Some(arg_matches)) => { - let blockstore = open_blockstore( - &ledger_path, - AccessType::Secondary, - wal_recovery_mode, - force_update_to_open, - enforce_ulimit_nofile, - ); - - let max_height = value_t!(arg_matches, "max_height", usize).unwrap_or(usize::MAX); - let start_root = value_t!(arg_matches, "start_root", Slot).unwrap_or(0); - let num_roots = value_t_or_exit!(arg_matches, "num_roots", usize); - - let iter = blockstore - .rooted_slot_iterator(start_root) - .expect("Failed to get rooted slot"); - - let mut output: Box = - if let Some(path) = arg_matches.value_of("slot_list") { - match File::create(path) { - Ok(file) => Box::new(file), - _ => Box::new(stdout()), - } - } else { - Box::new(stdout()) - }; + ("list-roots", Some(arg_matches)) => { + let blockstore = + open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); + + let max_height = + value_t!(arg_matches, "max_height", usize).unwrap_or(usize::MAX); + let start_root = value_t!(arg_matches, "start_root", Slot).unwrap_or(0); + let num_roots = value_t_or_exit!(arg_matches, "num_roots", usize); + + let iter = blockstore + .rooted_slot_iterator(start_root) + .expect("Failed to get rooted slot"); + + let mut output: Box = + if let Some(path) = arg_matches.value_of("slot_list") { + match File::create(path) { + Ok(file) => Box::new(file), + _ => Box::new(stdout()), + } + } else { + Box::new(stdout()) + }; - iter.take(num_roots) - .take_while(|slot| *slot <= max_height as u64) - .collect::>() - .into_iter() - .rev() - .for_each(|slot| { - let blockhash = blockstore - .get_slot_entries(slot, 0) - .unwrap() - .last() - .unwrap() - .hash; + iter.take(num_roots) + .take_while(|slot| *slot <= max_height as u64) + .collect::>() + .into_iter() + .rev() + .for_each(|slot| { + let blockhash = blockstore + .get_slot_entries(slot, 0) + .unwrap() + .last() + .unwrap() + .hash; - writeln!(output, "{slot}: {blockhash:?}").expect("failed to write"); - }); - } - ("latest-optimistic-slots", Some(arg_matches)) => { - let blockstore = open_blockstore( - &ledger_path, - AccessType::Secondary, - wal_recovery_mode, - force_update_to_open, - enforce_ulimit_nofile, - ); - let num_slots = value_t_or_exit!(arg_matches, "num_slots", usize); - let exclude_vote_only_slots = arg_matches.is_present("exclude_vote_only_slots"); - let slots = - get_latest_optimistic_slots(&blockstore, num_slots, exclude_vote_only_slots); + writeln!(output, "{slot}: {blockhash:?}").expect("failed to write"); + }); + } + ("latest-optimistic-slots", Some(arg_matches)) => { + let blockstore = + open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); + let num_slots = value_t_or_exit!(arg_matches, "num_slots", usize); + let exclude_vote_only_slots = arg_matches.is_present("exclude_vote_only_slots"); + let slots = get_latest_optimistic_slots( + &blockstore, + num_slots, + exclude_vote_only_slots, + ); - println!( - "{:>20} {:>44} {:>32} {:>13}", - "Slot", "Hash", "Timestamp", "Vote Only?" - ); - for (slot, hash_and_timestamp_opt, contains_nonvote) in slots.iter() { - let (time_str, hash_str) = - if let Some((hash, timestamp)) = hash_and_timestamp_opt { - let secs: u64 = (timestamp / 1_000) as u64; - let nanos: u32 = ((timestamp % 1_000) * 1_000_000) as u32; - let t = UNIX_EPOCH + Duration::new(secs, nanos); - let datetime: DateTime = t.into(); - - (datetime.to_rfc3339(), format!("{hash}")) - } else { - let unknown = "Unknown"; - (String::from(unknown), String::from(unknown)) - }; println!( "{:>20} {:>44} {:>32} {:>13}", - slot, &hash_str, &time_str, !contains_nonvote + "Slot", "Hash", "Timestamp", "Vote Only?" ); + for (slot, hash_and_timestamp_opt, contains_nonvote) in slots.iter() { + let (time_str, hash_str) = + if let Some((hash, timestamp)) = hash_and_timestamp_opt { + let secs: u64 = (timestamp / 1_000) as u64; + let nanos: u32 = ((timestamp % 1_000) * 1_000_000) as u32; + let t = UNIX_EPOCH + Duration::new(secs, nanos); + let datetime: DateTime = t.into(); + + (datetime.to_rfc3339(), format!("{hash}")) + } else { + let unknown = "Unknown"; + (String::from(unknown), String::from(unknown)) + }; + println!( + "{:>20} {:>44} {:>32} {:>13}", + slot, &hash_str, &time_str, !contains_nonvote + ); + } } - } - ("repair-roots", Some(arg_matches)) => { - let blockstore = open_blockstore( - &ledger_path, - AccessType::Primary, - wal_recovery_mode, - force_update_to_open, - enforce_ulimit_nofile, - ); - - let start_root = value_t!(arg_matches, "start_root", Slot) - .unwrap_or_else(|_| blockstore.max_root()); - let max_slots = value_t_or_exit!(arg_matches, "max_slots", u64); - let end_root = value_t!(arg_matches, "end_root", Slot) - .unwrap_or_else(|_| start_root.saturating_sub(max_slots)); - assert!(start_root > end_root); - let num_slots = start_root - end_root - 1; // Adjust by one since start_root need not be checked - if arg_matches.is_present("end_root") && num_slots > max_slots { - eprintln!( - "Requested range {num_slots} too large, max {max_slots}. \ - Either adjust `--until` value, or pass a larger `--repair-limit` \ - to override the limit", + ("repair-roots", Some(arg_matches)) => { + let blockstore = + open_blockstore(&ledger_path, arg_matches, AccessType::Primary); + + let start_root = value_t!(arg_matches, "start_root", Slot) + .unwrap_or_else(|_| blockstore.max_root()); + let max_slots = value_t_or_exit!(arg_matches, "max_slots", u64); + let end_root = value_t!(arg_matches, "end_root", Slot) + .unwrap_or_else(|_| start_root.saturating_sub(max_slots)); + assert!(start_root > end_root); + let num_slots = start_root - end_root - 1; // Adjust by one since start_root need not be checked + if arg_matches.is_present("end_root") && num_slots > max_slots { + eprintln!( + "Requested range {num_slots} too large, max {max_slots}. Either adjust \ + `--until` value, or pass a larger `--repair-limit` to override the limit", ); - exit(1); - } - - let num_repaired_roots = blockstore - .scan_and_fix_roots(Some(start_root), Some(end_root), &AtomicBool::new(false)) - .unwrap_or_else(|err| { - eprintln!("Unable to repair roots: {err}"); exit(1); - }); - println!("Successfully repaired {num_repaired_roots} roots"); - } - ("bounds", Some(arg_matches)) => { - let blockstore = open_blockstore( - &ledger_path, - AccessType::Secondary, - wal_recovery_mode, - force_update_to_open, - enforce_ulimit_nofile, - ); + } + + let num_repaired_roots = blockstore + .scan_and_fix_roots( + Some(start_root), + Some(end_root), + &AtomicBool::new(false), + ) + .unwrap_or_else(|err| { + eprintln!("Unable to repair roots: {err}"); + exit(1); + }); + println!("Successfully repaired {num_repaired_roots} roots"); + } + ("bounds", Some(arg_matches)) => { + let blockstore = + open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); - match blockstore.slot_meta_iterator(0) { - Ok(metas) => { - let output_format = - OutputFormat::from_matches(arg_matches, "output_format", false); - let all = arg_matches.is_present("all"); + match blockstore.slot_meta_iterator(0) { + Ok(metas) => { + let output_format = + OutputFormat::from_matches(arg_matches, "output_format", false); + let all = arg_matches.is_present("all"); - let slots: Vec<_> = metas.map(|(slot, _)| slot).collect(); + let slots: Vec<_> = metas.map(|(slot, _)| slot).collect(); - let slot_bounds = if slots.is_empty() { - SlotBounds::default() - } else { - // Collect info about slot bounds - let mut bounds = SlotBounds { - slots: SlotInfo { - total: slots.len(), - first: Some(*slots.first().unwrap()), - last: Some(*slots.last().unwrap()), - ..SlotInfo::default() - }, - ..SlotBounds::default() - }; - if all { - bounds.all_slots = Some(&slots); - } + let slot_bounds = if slots.is_empty() { + SlotBounds::default() + } else { + // Collect info about slot bounds + let mut bounds = SlotBounds { + slots: SlotInfo { + total: slots.len(), + first: Some(*slots.first().unwrap()), + last: Some(*slots.last().unwrap()), + ..SlotInfo::default() + }, + ..SlotBounds::default() + }; + if all { + bounds.all_slots = Some(&slots); + } - // Consider also rooted slots, if present - if let Ok(rooted) = blockstore.rooted_slot_iterator(0) { - let mut first_rooted = None; - let mut last_rooted = None; - let mut total_rooted = 0; - for (i, slot) in rooted.into_iter().enumerate() { - if i == 0 { - first_rooted = Some(slot); + // Consider also rooted slots, if present + if let Ok(rooted) = blockstore.rooted_slot_iterator(0) { + let mut first_rooted = None; + let mut last_rooted = None; + let mut total_rooted = 0; + for (i, slot) in rooted.into_iter().enumerate() { + if i == 0 { + first_rooted = Some(slot); + } + last_rooted = Some(slot); + total_rooted += 1; } - last_rooted = Some(slot); - total_rooted += 1; + let last_root_for_comparison = last_rooted.unwrap_or_default(); + let count_past_root = slots + .iter() + .rev() + .take_while(|slot| *slot > &last_root_for_comparison) + .count(); + + bounds.roots = SlotInfo { + total: total_rooted, + first: first_rooted, + last: last_rooted, + num_after_last_root: Some(count_past_root), + }; } - let last_root_for_comparison = last_rooted.unwrap_or_default(); - let count_past_root = slots - .iter() - .rev() - .take_while(|slot| *slot > &last_root_for_comparison) - .count(); - - bounds.roots = SlotInfo { - total: total_rooted, - first: first_rooted, - last: last_rooted, - num_after_last_root: Some(count_past_root), - }; - } - bounds - }; + bounds + }; - // Print collected data - println!("{}", output_format.formatted_string(&slot_bounds)); - } - Err(err) => { - eprintln!("Unable to read the Ledger: {err:?}"); - exit(1); + // Print collected data + println!("{}", output_format.formatted_string(&slot_bounds)); + } + Err(err) => { + eprintln!("Unable to read the Ledger: {err:?}"); + exit(1); + } + }; + } + ("analyze-storage", Some(arg_matches)) => { + analyze_storage( + &open_blockstore(&ledger_path, arg_matches, AccessType::Secondary).db(), + ); + } + ("compute-slot-cost", Some(arg_matches)) => { + let blockstore = + open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); + + let mut slots: Vec = vec![]; + if !arg_matches.is_present("slots") { + if let Ok(metas) = blockstore.slot_meta_iterator(0) { + slots = metas.map(|(slot, _)| slot).collect(); + } + } else { + slots = values_t_or_exit!(arg_matches, "slots", Slot); } - }; - } - ("analyze-storage", _) => { - analyze_storage( - &open_blockstore( - &ledger_path, - AccessType::Secondary, - wal_recovery_mode, - force_update_to_open, - enforce_ulimit_nofile, - ) - .db(), - ); - } - ("compute-slot-cost", Some(arg_matches)) => { - let blockstore = open_blockstore( - &ledger_path, - AccessType::Secondary, - wal_recovery_mode, - force_update_to_open, - enforce_ulimit_nofile, - ); - let mut slots: Vec = vec![]; - if !arg_matches.is_present("slots") { - if let Ok(metas) = blockstore.slot_meta_iterator(0) { - slots = metas.map(|(slot, _)| slot).collect(); + for slot in slots { + if let Err(err) = compute_slot_cost(&blockstore, slot) { + eprintln!("{err}"); + } } - } else { - slots = values_t_or_exit!(arg_matches, "slots", Slot); } - - for slot in slots { - if let Err(err) = compute_slot_cost(&blockstore, slot) { + ("print-file-metadata", Some(arg_matches)) => { + let blockstore = + open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); + let sst_file_name = arg_matches.value_of("file_name"); + if let Err(err) = print_blockstore_file_metadata(&blockstore, &sst_file_name) { eprintln!("{err}"); } } - } - ("print-file-metadata", Some(arg_matches)) => { - let blockstore = open_blockstore( - &ledger_path, - AccessType::Secondary, - wal_recovery_mode, - false, - enforce_ulimit_nofile, - ); - let sst_file_name = arg_matches.value_of("file_name"); - if let Err(err) = print_blockstore_file_metadata(&blockstore, &sst_file_name) { - eprintln!("{err}"); + ("", _) => { + eprintln!("{}", matches.usage()); + exit(1); } - } - ("", _) => { - eprintln!("{}", matches.usage()); - exit(1); - } - _ => unreachable!(), - }; - measure_total_execution_time.stop(); - info!("{}", measure_total_execution_time); - } + _ => unreachable!(), + }; + } + }; + measure_total_execution_time.stop(); + info!("{}", measure_total_execution_time); } #[cfg(test)] diff --git a/ledger-tool/src/output.rs b/ledger-tool/src/output.rs index 46c2a62f1bfb13..4c953b37baa0f2 100644 --- a/ledger-tool/src/output.rs +++ b/ledger-tool/src/output.rs @@ -1,7 +1,25 @@ use { - serde::Serialize, - solana_cli_output::{QuietDisplay, VerboseDisplay}, - std::fmt::{Display, Formatter, Result}, + crate::ledger_utils::get_program_ids, + chrono::{Local, TimeZone}, + serde::{Deserialize, Serialize}, + solana_cli_output::{display::writeln_transaction, OutputFormat, QuietDisplay, VerboseDisplay}, + solana_entry::entry::Entry, + solana_ledger::blockstore::Blockstore, + solana_sdk::{ + clock::{Slot, UnixTimestamp}, + hash::Hash, + native_token::lamports_to_sol, + pubkey::Pubkey, + }, + solana_transaction_status::{ + EncodedConfirmedBlock, EncodedTransactionWithStatusMeta, EntrySummary, Rewards, + }, + std::{ + collections::HashMap, + fmt::{self, Display, Formatter}, + io::{stdout, Write}, + result::Result, + }, }; #[derive(Serialize, Debug, Default)] @@ -27,7 +45,7 @@ impl VerboseDisplay for SlotBounds<'_> {} impl QuietDisplay for SlotBounds<'_> {} impl Display for SlotBounds<'_> { - fn fmt(&self, f: &mut Formatter) -> Result { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { if self.slots.total > 0 { let first = self.slots.first.unwrap(); let last = self.slots.last.unwrap(); @@ -67,3 +85,466 @@ impl Display for SlotBounds<'_> { Ok(()) } } + +fn writeln_entry(f: &mut dyn fmt::Write, i: usize, entry: &CliEntry, prefix: &str) -> fmt::Result { + writeln!( + f, + "{prefix}Entry {} - num_hashes: {}, hash: {}, transactions: {}, starting_transaction_index: {}", + i, entry.num_hashes, entry.hash, entry.num_transactions, entry.starting_transaction_index, + ) +} + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CliEntries { + pub entries: Vec, + #[serde(skip_serializing)] + pub slot: Slot, +} + +impl QuietDisplay for CliEntries {} +impl VerboseDisplay for CliEntries {} + +impl fmt::Display for CliEntries { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + writeln!(f, "Slot {}", self.slot)?; + for (i, entry) in self.entries.iter().enumerate() { + writeln_entry(f, i, entry, " ")?; + } + Ok(()) + } +} + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CliEntry { + num_hashes: u64, + hash: String, + num_transactions: u64, + starting_transaction_index: usize, +} + +impl From for CliEntry { + fn from(entry_summary: EntrySummary) -> Self { + Self { + num_hashes: entry_summary.num_hashes, + hash: entry_summary.hash.to_string(), + num_transactions: entry_summary.num_transactions, + starting_transaction_index: entry_summary.starting_transaction_index, + } + } +} + +impl From<&CliPopulatedEntry> for CliEntry { + fn from(populated_entry: &CliPopulatedEntry) -> Self { + Self { + num_hashes: populated_entry.num_hashes, + hash: populated_entry.hash.clone(), + num_transactions: populated_entry.num_transactions, + starting_transaction_index: populated_entry.starting_transaction_index, + } + } +} + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CliPopulatedEntry { + num_hashes: u64, + hash: String, + num_transactions: u64, + starting_transaction_index: usize, + transactions: Vec, +} + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CliBlockWithEntries { + #[serde(flatten)] + pub encoded_confirmed_block: EncodedConfirmedBlockWithEntries, + #[serde(skip_serializing)] + pub slot: Slot, +} + +impl QuietDisplay for CliBlockWithEntries {} +impl VerboseDisplay for CliBlockWithEntries {} + +impl fmt::Display for CliBlockWithEntries { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + writeln!(f, "Slot: {}", self.slot)?; + writeln!( + f, + "Parent Slot: {}", + self.encoded_confirmed_block.parent_slot + )?; + writeln!(f, "Blockhash: {}", self.encoded_confirmed_block.blockhash)?; + writeln!( + f, + "Previous Blockhash: {}", + self.encoded_confirmed_block.previous_blockhash + )?; + if let Some(block_time) = self.encoded_confirmed_block.block_time { + writeln!( + f, + "Block Time: {:?}", + Local.timestamp_opt(block_time, 0).unwrap() + )?; + } + if let Some(block_height) = self.encoded_confirmed_block.block_height { + writeln!(f, "Block Height: {block_height:?}")?; + } + if !self.encoded_confirmed_block.rewards.is_empty() { + let mut rewards = self.encoded_confirmed_block.rewards.clone(); + rewards.sort_by(|a, b| a.pubkey.cmp(&b.pubkey)); + let mut total_rewards = 0; + writeln!(f, "Rewards:")?; + writeln!( + f, + " {:<44} {:^15} {:<15} {:<20} {:>14} {:>10}", + "Address", "Type", "Amount", "New Balance", "Percent Change", "Commission" + )?; + for reward in rewards { + let sign = if reward.lamports < 0 { "-" } else { "" }; + + total_rewards += reward.lamports; + #[allow(clippy::format_in_format_args)] + writeln!( + f, + " {:<44} {:^15} {:>15} {} {}", + reward.pubkey, + if let Some(reward_type) = reward.reward_type { + format!("{reward_type}") + } else { + "-".to_string() + }, + format!( + "{}◎{:<14.9}", + sign, + lamports_to_sol(reward.lamports.unsigned_abs()) + ), + if reward.post_balance == 0 { + " - -".to_string() + } else { + format!( + "◎{:<19.9} {:>13.9}%", + lamports_to_sol(reward.post_balance), + (reward.lamports.abs() as f64 + / (reward.post_balance as f64 - reward.lamports as f64)) + * 100.0 + ) + }, + reward + .commission + .map(|commission| format!("{commission:>9}%")) + .unwrap_or_else(|| " -".to_string()) + )?; + } + + let sign = if total_rewards < 0 { "-" } else { "" }; + writeln!( + f, + "Total Rewards: {}◎{:<12.9}", + sign, + lamports_to_sol(total_rewards.unsigned_abs()) + )?; + } + for (index, entry) in self.encoded_confirmed_block.entries.iter().enumerate() { + writeln_entry(f, index, &entry.into(), "")?; + for (index, transaction_with_meta) in entry.transactions.iter().enumerate() { + writeln!(f, " Transaction {index}:")?; + writeln_transaction( + f, + &transaction_with_meta.transaction.decode().unwrap(), + transaction_with_meta.meta.as_ref(), + " ", + None, + None, + )?; + } + } + Ok(()) + } +} + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct EncodedConfirmedBlockWithEntries { + pub previous_blockhash: String, + pub blockhash: String, + pub parent_slot: Slot, + pub entries: Vec, + pub rewards: Rewards, + pub block_time: Option, + pub block_height: Option, +} + +impl EncodedConfirmedBlockWithEntries { + pub fn try_from( + block: EncodedConfirmedBlock, + entries_iterator: impl Iterator, + ) -> Result { + let mut entries = vec![]; + for (i, entry) in entries_iterator.enumerate() { + let ending_transaction_index = entry + .starting_transaction_index + .saturating_add(entry.num_transactions as usize); + let transactions = block + .transactions + .get(entry.starting_transaction_index..ending_transaction_index) + .ok_or(format!( + "Mismatched entry data and transactions: entry {:?}", + i + ))?; + entries.push(CliPopulatedEntry { + num_hashes: entry.num_hashes, + hash: entry.hash.to_string(), + num_transactions: entry.num_transactions, + starting_transaction_index: entry.starting_transaction_index, + transactions: transactions.to_vec(), + }); + } + Ok(Self { + previous_blockhash: block.previous_blockhash, + blockhash: block.blockhash, + parent_slot: block.parent_slot, + entries, + rewards: block.rewards, + block_time: block.block_time, + block_height: block.block_height, + }) + } +} + +pub fn output_slot_rewards(blockstore: &Blockstore, slot: Slot, method: &OutputFormat) { + // Note: rewards are not output in JSON yet + if *method == OutputFormat::Display { + if let Ok(Some(rewards)) = blockstore.read_rewards(slot) { + if !rewards.is_empty() { + println!(" Rewards:"); + println!( + " {:<44} {:^15} {:<15} {:<20} {:>10}", + "Address", "Type", "Amount", "New Balance", "Commission", + ); + + for reward in rewards { + let sign = if reward.lamports < 0 { "-" } else { "" }; + println!( + " {:<44} {:^15} {}◎{:<14.9} ◎{:<18.9} {}", + reward.pubkey, + if let Some(reward_type) = reward.reward_type { + format!("{reward_type}") + } else { + "-".to_string() + }, + sign, + lamports_to_sol(reward.lamports.unsigned_abs()), + lamports_to_sol(reward.post_balance), + reward + .commission + .map(|commission| format!("{commission:>9}%")) + .unwrap_or_else(|| " -".to_string()) + ); + } + } + } + } +} + +pub fn output_entry( + blockstore: &Blockstore, + method: &OutputFormat, + slot: Slot, + entry_index: usize, + entry: Entry, +) { + match method { + OutputFormat::Display => { + println!( + " Entry {} - num_hashes: {}, hash: {}, transactions: {}", + entry_index, + entry.num_hashes, + entry.hash, + entry.transactions.len() + ); + for (transactions_index, transaction) in entry.transactions.into_iter().enumerate() { + println!(" Transaction {transactions_index}"); + let tx_signature = transaction.signatures[0]; + let tx_status_meta = blockstore + .read_transaction_status((tx_signature, slot)) + .unwrap_or_else(|err| { + eprintln!( + "Failed to read transaction status for {} at slot {}: {}", + transaction.signatures[0], slot, err + ); + None + }) + .map(|meta| meta.into()); + + solana_cli_output::display::println_transaction( + &transaction, + tx_status_meta.as_ref(), + " ", + None, + None, + ); + } + } + OutputFormat::Json => { + // Note: transaction status is not output in JSON yet + serde_json::to_writer(stdout(), &entry).expect("serialize entry"); + stdout().write_all(b",\n").expect("newline"); + } + _ => unreachable!(), + } +} + +pub fn output_slot( + blockstore: &Blockstore, + slot: Slot, + allow_dead_slots: bool, + method: &OutputFormat, + verbose_level: u64, + all_program_ids: &mut HashMap, +) -> Result<(), String> { + if blockstore.is_dead(slot) { + if allow_dead_slots { + if *method == OutputFormat::Display { + println!(" Slot is dead"); + } + } else { + return Err("Dead slot".to_string()); + } + } + + let (entries, num_shreds, is_full) = blockstore + .get_slot_entries_with_shred_info(slot, 0, allow_dead_slots) + .map_err(|err| format!("Failed to load entries for slot {slot}: {err:?}"))?; + + if *method == OutputFormat::Display { + if let Ok(Some(meta)) = blockstore.meta(slot) { + if verbose_level >= 1 { + println!(" {meta:?} is_full: {is_full}"); + } else { + println!( + " num_shreds: {}, parent_slot: {:?}, next_slots: {:?}, num_entries: {}, \ + is_full: {}", + num_shreds, + meta.parent_slot, + meta.next_slots, + entries.len(), + is_full, + ); + } + } + } + + if verbose_level >= 2 { + for (entry_index, entry) in entries.into_iter().enumerate() { + output_entry(blockstore, method, slot, entry_index, entry); + } + + output_slot_rewards(blockstore, slot, method); + } else if verbose_level >= 1 { + let mut transactions = 0; + let mut num_hashes = 0; + let mut program_ids = HashMap::new(); + let blockhash = if let Some(entry) = entries.last() { + entry.hash + } else { + Hash::default() + }; + + for entry in entries { + transactions += entry.transactions.len(); + num_hashes += entry.num_hashes; + for transaction in entry.transactions { + for program_id in get_program_ids(&transaction) { + *program_ids.entry(*program_id).or_insert(0) += 1; + } + } + } + + println!(" Transactions: {transactions}, hashes: {num_hashes}, block_hash: {blockhash}",); + for (pubkey, count) in program_ids.iter() { + *all_program_ids.entry(*pubkey).or_insert(0) += count; + } + println!(" Programs:"); + output_sorted_program_ids(program_ids); + } + Ok(()) +} + +pub fn output_ledger( + blockstore: Blockstore, + starting_slot: Slot, + ending_slot: Slot, + allow_dead_slots: bool, + method: OutputFormat, + num_slots: Option, + verbose_level: u64, + only_rooted: bool, +) { + let slot_iterator = blockstore + .slot_meta_iterator(starting_slot) + .unwrap_or_else(|err| { + eprintln!("Failed to load entries starting from slot {starting_slot}: {err:?}"); + std::process::exit(1); + }); + + if method == OutputFormat::Json { + stdout().write_all(b"{\"ledger\":[\n").expect("open array"); + } + + let num_slots = num_slots.unwrap_or(Slot::MAX); + let mut num_printed = 0; + let mut all_program_ids = HashMap::new(); + for (slot, slot_meta) in slot_iterator { + if only_rooted && !blockstore.is_root(slot) { + continue; + } + if slot > ending_slot { + break; + } + + match method { + OutputFormat::Display => { + println!("Slot {} root?: {}", slot, blockstore.is_root(slot)) + } + OutputFormat::Json => { + serde_json::to_writer(stdout(), &slot_meta).expect("serialize slot_meta"); + stdout().write_all(b",\n").expect("newline"); + } + _ => unreachable!(), + } + + if let Err(err) = output_slot( + &blockstore, + slot, + allow_dead_slots, + &method, + verbose_level, + &mut all_program_ids, + ) { + eprintln!("{err}"); + } + num_printed += 1; + if num_printed >= num_slots as usize { + break; + } + } + + if method == OutputFormat::Json { + stdout().write_all(b"\n]}\n").expect("close array"); + } else { + println!("Summary of Programs:"); + output_sorted_program_ids(all_program_ids); + } +} + +pub fn output_sorted_program_ids(program_ids: HashMap) { + let mut program_ids_array: Vec<_> = program_ids.into_iter().collect(); + // Sort descending by count of program id + program_ids_array.sort_by(|a, b| b.1.cmp(&a.1)); + for (program_id, count) in program_ids_array.iter() { + println!("{:<44}: {}", program_id.to_string(), count); + } +} diff --git a/ledger-tool/src/program.rs b/ledger-tool/src/program.rs index c1a65170a239fa..97d676aff7831f 100644 --- a/ledger-tool/src/program.rs +++ b/ledger-tool/src/program.rs @@ -9,10 +9,8 @@ use { syscalls::create_program_runtime_environment_v1, }, solana_clap_utils::input_parsers::pubkeys_of, - solana_ledger::{ - blockstore_options::{AccessType, BlockstoreRecoveryMode}, - blockstore_processor::ProcessOptions, - }, + solana_cli_output::{OutputFormat, QuietDisplay, VerboseDisplay}, + solana_ledger::{blockstore_options::AccessType, blockstore_processor::ProcessOptions}, solana_program_runtime::{ invoke_context::InvokeContext, loaded_programs::{LoadProgramMetrics, LoadedProgramType, DELAY_VISIBILITY_SLOT_OFFSET}, @@ -27,14 +25,13 @@ use { account::AccountSharedData, account_utils::StateMut, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, - feature_set, pubkey::Pubkey, slot_history::Slot, transaction_context::{IndexOfAccount, InstructionAccount}, }, std::{ collections::HashSet, - fmt::{Debug, Formatter}, + fmt::{self, Debug, Formatter}, fs::File, io::{Read, Seek, Write}, path::{Path, PathBuf}, @@ -77,8 +74,6 @@ fn load_accounts(path: &Path) -> Result { fn load_blockstore(ledger_path: &Path, arg_matches: &ArgMatches<'_>) -> Arc { let debug_keys = pubkeys_of(arg_matches, "debug_key") .map(|pubkeys| Arc::new(pubkeys.into_iter().collect::>())); - let force_update_to_open = arg_matches.is_present("force_update_to_open"); - let enforce_ulimit_nofile = !arg_matches.is_present("ignore_ulimit_nofile_error"); let process_options = ProcessOptions { new_hard_forks: hardforks_of(arg_matches, "hard_forks"), run_verification: false, @@ -108,30 +103,17 @@ fn load_blockstore(ledger_path: &Path, arg_matches: &ArgMatches<'_>) -> Arc { fn program_subcommand(self) -> Self { let program_arg = Arg::with_name("PROGRAM") .help( - "Program file to use. This is either an ELF shared-object file to be executed, \ - or an assembly file to be assembled and executed.", + "Program file to use. This is either an ELF shared-object file to be executed, or \ + an assembly file to be assembled and executed.", ) .required(true) .index(1); @@ -268,8 +250,9 @@ struct Output { log: Vec, } -impl Debug for Output { +impl fmt::Display for Output { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + writeln!(f, "Program output:")?; writeln!(f, "Result: {}", self.result)?; writeln!(f, "Instruction Count: {}", self.instruction_count)?; writeln!(f, "Execution time: {} us", self.execution_time.as_micros())?; @@ -280,6 +263,9 @@ impl Debug for Output { } } +impl QuietDisplay for Output {} +impl VerboseDisplay for Output {} + // Replace with std::lazy::Lazy when stabilized. // https://github.com/rust-lang/rust/issues/74465 struct LazyAnalysis<'a, 'b> { @@ -358,9 +344,6 @@ fn load_program<'a>( #[allow(unused_mut)] let mut verified_executable = if is_elf { let result = load_program_from_bytes( - invoke_context - .feature_set - .is_active(&feature_set::delay_visibility_of_program_deployment::id()), log_collector, &mut load_program_metrics, &contents, @@ -574,6 +557,7 @@ pub fn program(ledger_path: &Path, matches: &ArgMatches<'_>) { .get_current_instruction_context() .unwrap(), true, // copy_account_data + &invoke_context.feature_set, ) .unwrap(); @@ -620,16 +604,6 @@ pub fn program(ledger_path: &Path, matches: &ArgMatches<'_>) { .get_recorded_content() .to_vec(), }; - match matches.value_of("output_format") { - Some("json") => { - println!("{}", serde_json::to_string_pretty(&output).unwrap()); - } - Some("json-compact") => { - println!("{}", serde_json::to_string(&output).unwrap()); - } - _ => { - println!("Program output:"); - println!("{output:?}"); - } - } + let output_format = OutputFormat::from_matches(matches, "output_format", false); + println!("{}", output_format.formatted_string(&output)); } diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index c75380581fc16d..993f6d2c2f7645 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -25,18 +25,50 @@ use { solana_sdk::genesis_config::GenesisConfig, std::{ path::PathBuf, - process, result, + result, sync::{atomic::AtomicBool, Arc, RwLock}, }, + thiserror::Error, }; +#[derive(Error, Debug)] +pub enum BankForksUtilsError { + #[error("accounts path(s) not present when booting from snapshot")] + AccountPathsNotPresent, + + #[error( + "failed to load bank: {source}, full snapshot archive: {full_snapshot_archive}, \ + incremental snapshot archive: {incremental_snapshot_archive}" + )] + BankFromSnapshotsArchive { + source: snapshot_utils::SnapshotError, + full_snapshot_archive: String, + incremental_snapshot_archive: String, + }, + + #[error( + "there is no local state to startup from. \ + Ensure --{flag} is NOT set to \"{value}\" and restart" + )] + NoBankSnapshotDirectory { flag: String, value: String }, + + #[error("failed to load bank: {source}, snapshot: {path}")] + BankFromSnapshotsDirectory { + source: snapshot_utils::SnapshotError, + path: PathBuf, + }, + + #[error("failed to process blockstore from root: {0}")] + ProcessBlockstoreFromRoot(#[source] BlockstoreProcessorError), +} + pub type LoadResult = result::Result< ( Arc>, LeaderScheduleCache, Option, ), - BlockstoreProcessorError, + BankForksUtilsError, >; /// Load the banks via genesis or a snapshot then processes all full blocks in blockstore @@ -68,8 +100,7 @@ pub fn load( entry_notification_sender, accounts_update_notifier, exit, - ); - + )?; blockstore_processor::process_blockstore_from_root( blockstore, &bank_forks, @@ -80,7 +111,9 @@ pub fn load( entry_notification_sender, &AbsRequestSender::default(), ) - .map(|_| (bank_forks, leader_schedule_cache, starting_snapshot_hashes)) + .map_err(BankForksUtilsError::ProcessBlockstoreFromRoot)?; + + Ok((bank_forks, leader_schedule_cache, starting_snapshot_hashes)) } #[allow(clippy::too_many_arguments)] @@ -95,11 +128,7 @@ pub fn load_bank_forks( entry_notification_sender: Option<&EntryNotifierSender>, accounts_update_notifier: Option, exit: Arc, -) -> ( - Arc>, - LeaderScheduleCache, - Option, -) { +) -> LoadResult { fn get_snapshots_to_load( snapshot_config: Option<&SnapshotConfig>, ) -> Option<( @@ -157,18 +186,9 @@ pub fn load_bank_forks( process_options, accounts_update_notifier, exit, - ); + )?; (bank_forks, Some(starting_snapshot_hashes)) } else { - let maybe_filler_accounts = process_options - .accounts_db_config - .as_ref() - .map(|config| config.filler_accounts_config.count > 0); - - if let Some(true) = maybe_filler_accounts { - panic!("filler accounts specified, but not loading from snapshot"); - } - info!("Processing ledger from genesis"); let bank_forks = blockstore_processor::process_blockstore_for_bank_0( genesis_config, @@ -202,7 +222,7 @@ pub fn load_bank_forks( .for_each(|hard_fork_slot| root_bank.register_hard_fork(*hard_fork_slot)); } - (bank_forks, leader_schedule_cache, starting_snapshot_hashes) + Ok((bank_forks, leader_schedule_cache, starting_snapshot_hashes)) } #[allow(clippy::too_many_arguments)] @@ -216,11 +236,10 @@ fn bank_forks_from_snapshot( process_options: &ProcessOptions, accounts_update_notifier: Option, exit: Arc, -) -> (Arc>, StartingSnapshotHashes) { +) -> Result<(Arc>, StartingSnapshotHashes), BankForksUtilsError> { // Fail hard here if snapshot fails to load, don't silently continue if account_paths.is_empty() { - error!("Account paths not present when booting from snapshot"); - process::exit(1); + return Err(BankForksUtilsError::AccountPathsNotPresent); } let latest_snapshot_archive_slot = std::cmp::max( @@ -270,29 +289,21 @@ fn bank_forks_from_snapshot( accounts_update_notifier, exit, ) - .unwrap_or_else(|err| { - error!( - "Failed to load bank: {err} \ - \nfull snapshot archive: {} \ - \nincremental snapshot archive: {}", - full_snapshot_archive_info.path().display(), - incremental_snapshot_archive_info - .as_ref() - .map(|archive| archive.path().display().to_string()) - .unwrap_or("none".to_string()), - ); - process::exit(1); - }); + .map_err(|err| BankForksUtilsError::BankFromSnapshotsArchive { + source: err, + full_snapshot_archive: full_snapshot_archive_info.path().display().to_string(), + incremental_snapshot_archive: incremental_snapshot_archive_info + .as_ref() + .map(|archive| archive.path().display().to_string()) + .unwrap_or("none".to_string()), + })?; bank } else { - let Some(bank_snapshot) = latest_bank_snapshot else { - error!( - "There is no local state to startup from. Ensure --{} is *not* set to \"{}\" and restart.", - use_snapshot_archives_at_startup::cli::LONG_ARG, - UseSnapshotArchivesAtStartup::Never.to_string(), - ); - process::exit(1); - }; + let bank_snapshot = + latest_bank_snapshot.ok_or_else(|| BankForksUtilsError::NoBankSnapshotDirectory { + flag: use_snapshot_archives_at_startup::cli::LONG_ARG.to_string(), + value: UseSnapshotArchivesAtStartup::Never.to_string(), + })?; // If a newer snapshot archive was downloaded, it is possible that its slot is // higher than the local bank we will load. Did the user intend for this? @@ -327,14 +338,10 @@ fn bank_forks_from_snapshot( accounts_update_notifier, exit, ) - .unwrap_or_else(|err| { - error!( - "Failed to load bank: {err} \ - \nsnapshot: {}", - bank_snapshot.snapshot_path().display(), - ); - process::exit(1); - }); + .map_err(|err| BankForksUtilsError::BankFromSnapshotsDirectory { + source: err, + path: bank_snapshot.snapshot_path(), + })?; bank }; @@ -358,5 +365,5 @@ fn bank_forks_from_snapshot( incremental: incremental_snapshot_hash, }; - (BankForks::new_rw_arc(bank), starting_snapshot_hashes) + Ok((BankForks::new_rw_arc(bank), starting_snapshot_hashes)) } diff --git a/ledger/src/bigtable_upload.rs b/ledger/src/bigtable_upload.rs index be28ee8a0703d8..2a076d46be7aca 100644 --- a/ledger/src/bigtable_upload.rs +++ b/ledger/src/bigtable_upload.rs @@ -178,10 +178,10 @@ pub async fn upload_confirmed_blocks( break; } - let _ = match blockstore.get_rooted_block(slot, true) { - Ok(confirmed_block) => { + let _ = match blockstore.get_rooted_block_with_entries(slot, true) { + Ok(confirmed_block_with_entries) => { num_blocks_read += 1; - sender.send((slot, Some(confirmed_block))) + sender.send((slot, Some(confirmed_block_with_entries))) } Err(err) => { warn!( @@ -227,7 +227,8 @@ pub async fn upload_confirmed_blocks( Some(confirmed_block) => { let bt = bigtable.clone(); Some(tokio::spawn(async move { - bt.upload_confirmed_block(slot, confirmed_block).await + bt.upload_confirmed_block_with_entries(slot, confirmed_block) + .await })) } }); diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 3010a65be7f90c..323c7ac8699a05 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -10,6 +10,7 @@ use { IteratorMode, LedgerColumn, Result, WriteBatch, }, blockstore_meta::*, + blockstore_metrics::BlockstoreRpcApiMetrics, blockstore_options::{ AccessType, BlockstoreOptions, LedgerColumnOptions, BLOCKSTORE_DIRECTORY_ROCKS_FIFO, BLOCKSTORE_DIRECTORY_ROCKS_LEVEL, @@ -143,6 +144,7 @@ pub enum PossibleDuplicateShred { Exists(Shred), // Blockstore has another shred in its spot LastIndexConflict(/* original */ Shred, /* conflict */ Vec), // The index of this shred conflicts with `slot_meta.last_index` ErasureConflict(/* original */ Shred, /* conflict */ Vec), // The coding shred has a conflict in the erasure_meta + MerkleRootConflict(/* original */ Shred, /* conflict */ Vec), // Merkle root conflict in the same fec set } impl PossibleDuplicateShred { @@ -151,6 +153,27 @@ impl PossibleDuplicateShred { Self::Exists(shred) => shred.slot(), Self::LastIndexConflict(shred, _) => shred.slot(), Self::ErasureConflict(shred, _) => shred.slot(), + Self::MerkleRootConflict(shred, _) => shred.slot(), + } + } +} + +enum WorkingEntry { + Dirty(T), // Value has been modified with respect to the blockstore column + Clean(T), // Value matches what is currently in the blockstore column +} + +impl WorkingEntry { + fn should_write(&self) -> bool { + matches!(self, Self::Dirty(_)) + } +} + +impl AsRef for WorkingEntry { + fn as_ref(&self) -> &T { + match self { + Self::Dirty(value) => value, + Self::Clean(value) => value, } } } @@ -222,6 +245,7 @@ pub struct Blockstore { pub shred_timing_point_sender: Option, pub lowest_cleanup_slot: RwLock, pub slots_stats: SlotsStats, + rpc_api_metrics: BlockstoreRpcApiMetrics, } pub struct IndexMetaWorkingSetEntry { @@ -362,6 +386,7 @@ impl Blockstore { max_root, lowest_cleanup_slot: RwLock::::default(), slots_stats: SlotsStats::default(), + rpc_api_metrics: BlockstoreRpcApiMetrics::default(), }; blockstore.cleanup_old_entries()?; blockstore.update_highest_primary_index_slot()?; @@ -461,7 +486,12 @@ impl Blockstore { } fn erasure_meta(&self, erasure_set: ErasureSetId) -> Result> { - self.erasure_meta_cf.get(erasure_set.store_key()) + let (slot, fec_set_index) = erasure_set.store_key(); + self.erasure_meta_cf.get((slot, u64::from(fec_set_index))) + } + + fn merkle_root_meta(&self, erasure_set: ErasureSetId) -> Result> { + self.merkle_root_meta_cf.get(erasure_set.store_key()) } /// Check whether the specified slot is an orphan slot which does not @@ -717,9 +747,14 @@ impl Blockstore { self.merkle_root_meta_cf.submit_rocksdb_cf_metrics(); } + /// Report the accumulated RPC API metrics + pub(crate) fn report_rpc_api_metrics(&self) { + self.rpc_api_metrics.report(); + } + fn try_shred_recovery( &self, - erasure_metas: &HashMap, + erasure_metas: &HashMap>, index_working_set: &mut HashMap, prev_inserted_shreds: &HashMap, reed_solomon_cache: &ReedSolomonCache, @@ -730,7 +765,8 @@ impl Blockstore { // 2. For new data shreds, check if an erasure set exists. If not, don't try recovery // 3. Before trying recovery, check if enough number of shreds have been received // 3a. Enough number of shreds = (#data + #coding shreds) > erasure.num_data - for (erasure_set, erasure_meta) in erasure_metas.iter() { + for (erasure_set, working_erasure_meta) in erasure_metas.iter() { + let erasure_meta = working_erasure_meta.as_ref(); let slot = erasure_set.slot(); let index_meta_entry = index_working_set.get_mut(&slot).expect("Index"); let index = &mut index_meta_entry.index; @@ -793,6 +829,9 @@ impl Blockstore { /// - [`cf::ErasureMeta`]: the associated ErasureMeta of the coding and data /// shreds inside `shreds` will be updated and committed to /// `cf::ErasureMeta`. + /// - [`cf::MerkleRootMeta`]: the associated MerkleRootMeta of the coding and data + /// shreds inside `shreds` will be updated and committed to + /// `cf::MerkleRootMeta`. /// - [`cf::Index`]: stores (slot id, index to the index_working_set_entry) /// pair to the `cf::Index` column family for each index_working_set_entry /// which insert did occur in this function call. @@ -835,6 +874,7 @@ impl Blockstore { let mut just_inserted_shreds = HashMap::with_capacity(shreds.len()); let mut erasure_metas = HashMap::new(); + let mut merkle_root_metas = HashMap::new(); let mut slot_meta_working_set = HashMap::new(); let mut index_working_set = HashMap::new(); let mut duplicate_shreds = vec![]; @@ -854,6 +894,7 @@ impl Blockstore { match self.check_insert_data_shred( shred, &mut erasure_metas, + &mut merkle_root_metas, &mut index_working_set, &mut slot_meta_working_set, &mut write_batch, @@ -891,6 +932,7 @@ impl Blockstore { self.check_insert_coding_shred( shred, &mut erasure_metas, + &mut merkle_root_metas, &mut index_working_set, &mut write_batch, &mut just_inserted_shreds, @@ -937,6 +979,7 @@ impl Blockstore { match self.check_insert_data_shred( shred.clone(), &mut erasure_metas, + &mut merkle_root_metas, &mut index_working_set, &mut slot_meta_working_set, &mut write_batch, @@ -998,8 +1041,27 @@ impl Blockstore { &mut write_batch, )?; - for (erasure_set, erasure_meta) in erasure_metas { - write_batch.put::(erasure_set.store_key(), &erasure_meta)?; + for (erasure_set, working_erasure_meta) in erasure_metas { + if !working_erasure_meta.should_write() { + // No need to rewrite the column + continue; + } + let (slot, fec_set_index) = erasure_set.store_key(); + write_batch.put::( + (slot, u64::from(fec_set_index)), + working_erasure_meta.as_ref(), + )?; + } + + for (erasure_set, working_merkle_root_meta) in merkle_root_metas { + if !working_merkle_root_meta.should_write() { + // No need to rewrite the column + continue; + } + write_batch.put::( + erasure_set.store_key(), + working_merkle_root_meta.as_ref(), + )?; } for (&slot, index_working_set_entry) in index_working_set.iter() { @@ -1158,7 +1220,8 @@ impl Blockstore { fn check_insert_coding_shred( &self, shred: Shred, - erasure_metas: &mut HashMap, + erasure_metas: &mut HashMap>, + merkle_root_metas: &mut HashMap>, index_working_set: &mut HashMap, write_batch: &mut WriteBatch, just_received_shreds: &mut HashMap, @@ -1175,10 +1238,16 @@ impl Blockstore { self.get_index_meta_entry(slot, index_working_set, index_meta_time_us); let index_meta = &mut index_meta_working_set_entry.index; + let erasure_set = shred.erasure_set(); + + if let HashMapEntry::Vacant(entry) = merkle_root_metas.entry(erasure_set) { + if let Some(meta) = self.merkle_root_meta(erasure_set).unwrap() { + entry.insert(WorkingEntry::Clean(meta)); + } + } // This gives the index of first coding shred in this FEC block // So, all coding shreds in a given FEC block will have the same set index - if !is_trusted { if index_meta.coding().contains(shred_index) { metrics.num_coding_shreds_exists += 1; @@ -1190,14 +1259,32 @@ impl Blockstore { metrics.num_coding_shreds_invalid += 1; return false; } + + if let Some(merkle_root_meta) = merkle_root_metas.get(&erasure_set) { + // A previous shred has been inserted in this batch or in blockstore + // Compare our current shred against the previous shred for potential + // conflicts + if !self.check_merkle_root_consistency( + just_received_shreds, + slot, + merkle_root_meta.as_ref(), + &shred, + duplicate_shreds, + ) { + return false; + } + } } - let erasure_set = shred.erasure_set(); - let erasure_meta = erasure_metas.entry(erasure_set).or_insert_with(|| { + let erasure_meta_entry = erasure_metas.entry(erasure_set).or_insert_with(|| { self.erasure_meta(erasure_set) .expect("Expect database get to succeed") - .unwrap_or_else(|| ErasureMeta::from_coding_shred(&shred).unwrap()) + .map(WorkingEntry::Clean) + .unwrap_or_else(|| { + WorkingEntry::Dirty(ErasureMeta::from_coding_shred(&shred).unwrap()) + }) }); + let erasure_meta = erasure_meta_entry.as_ref(); if !erasure_meta.check_coding_shred(&shred) { metrics.num_coding_shreds_invalid_erasure_config += 1; @@ -1255,6 +1342,10 @@ impl Blockstore { if result { index_meta_working_set_entry.did_insert_occur = true; metrics.num_inserted += 1; + + merkle_root_metas + .entry(erasure_set) + .or_insert(WorkingEntry::Dirty(MerkleRootMeta::from_shred(&shred))); } if let HashMapEntry::Vacant(entry) = just_received_shreds.entry(shred.id()) { @@ -1303,8 +1394,8 @@ impl Blockstore { /// /// The resulting `write_batch` may include updates to [`cf::DeadSlots`] /// and [`cf::ShredData`]. Note that it will also update the in-memory copy - /// of `erasure_metas` and `index_working_set`, which will later be - /// used to update other column families such as [`cf::ErasureMeta`] and + /// of `erasure_metas`, `merkle_root_metas`, and `index_working_set`, which will + /// later be used to update other column families such as [`cf::ErasureMeta`] and /// [`cf::Index`]. /// /// Arguments: @@ -1312,6 +1403,9 @@ impl Blockstore { /// - `erasure_metas`: the in-memory hash-map that maintains the dirty /// copy of the erasure meta. It will later be written to /// `cf::ErasureMeta` in insert_shreds_handle_duplicate(). + /// - `merkle_root_metas`: the in-memory hash-map that maintains the dirty + /// copy of the merkle root meta. It will later be written to + /// `cf::MerkleRootMeta` in `insert_shreds_handle_duplicate()`. /// - `index_working_set`: the in-memory hash-map that maintains the /// dirty copy of the index meta. It will later be written to /// `cf::Index` in insert_shreds_handle_duplicate(). @@ -1334,7 +1428,8 @@ impl Blockstore { fn check_insert_data_shred( &self, shred: Shred, - erasure_metas: &mut HashMap, + erasure_metas: &mut HashMap>, + merkle_root_metas: &mut HashMap>, index_working_set: &mut HashMap, slot_meta_working_set: &mut HashMap, write_batch: &mut WriteBatch, @@ -1360,6 +1455,12 @@ impl Blockstore { ); let slot_meta = &mut slot_meta_entry.new_slot_meta.borrow_mut(); + let erasure_set = shred.erasure_set(); + if let HashMapEntry::Vacant(entry) = merkle_root_metas.entry(erasure_set) { + if let Some(meta) = self.merkle_root_meta(erasure_set).unwrap() { + entry.insert(WorkingEntry::Clean(meta)); + } + } if !is_trusted { if Self::is_data_shred_present(&shred, slot_meta, index_meta.data()) { @@ -1392,9 +1493,23 @@ impl Blockstore { ) { return Err(InsertDataShredError::InvalidShred); } + + if let Some(merkle_root_meta) = merkle_root_metas.get(&erasure_set) { + // A previous shred has been inserted in this batch or in blockstore + // Compare our current shred against the previous shred for potential + // conflicts + if !self.check_merkle_root_consistency( + just_inserted_shreds, + slot, + merkle_root_meta.as_ref(), + &shred, + duplicate_shreds, + ) { + return Err(InsertDataShredError::InvalidShred); + } + } } - let erasure_set = shred.erasure_set(); let newly_completed_data_sets = self.insert_data_shred( slot_meta, index_meta.data_mut(), @@ -1402,12 +1517,15 @@ impl Blockstore { write_batch, shred_source, )?; + merkle_root_metas + .entry(erasure_set) + .or_insert(WorkingEntry::Dirty(MerkleRootMeta::from_shred(&shred))); just_inserted_shreds.insert(shred.id(), shred); index_meta_working_set_entry.did_insert_occur = true; slot_meta_entry.did_insert_occur = true; if let HashMapEntry::Vacant(entry) = erasure_metas.entry(erasure_set) { if let Some(meta) = self.erasure_meta(erasure_set).unwrap() { - entry.insert(meta); + entry.insert(WorkingEntry::Clean(meta)); } } Ok(newly_completed_data_sets) @@ -1446,20 +1564,79 @@ impl Blockstore { shred_index < slot_meta.consumed || data_index.contains(shred_index) } - fn get_data_shred_from_just_inserted_or_db<'a>( + /// Finds the corresponding shred at `shred_id` in the just inserted + /// shreds or the backing store. Returns None if there is no shred. + fn get_shred_from_just_inserted_or_db<'a>( &'a self, just_inserted_shreds: &'a HashMap, - slot: Slot, - index: u64, - ) -> Cow<'a, Vec> { - let key = ShredId::new(slot, u32::try_from(index).unwrap(), ShredType::Data); - if let Some(shred) = just_inserted_shreds.get(&key) { - Cow::Borrowed(shred.payload()) - } else { + shred_id: ShredId, + ) -> Option>> { + let (slot, index, shred_type) = shred_id.unpack(); + match (just_inserted_shreds.get(&shred_id), shred_type) { + (Some(shred), _) => Some(Cow::Borrowed(shred.payload())), // If it doesn't exist in the just inserted set, it must exist in // the backing store - Cow::Owned(self.get_data_shred(slot, index).unwrap().unwrap()) + (_, ShredType::Data) => self + .get_data_shred(slot, u64::from(index)) + .unwrap() + .map(Cow::Owned), + (_, ShredType::Code) => self + .get_coding_shred(slot, u64::from(index)) + .unwrap() + .map(Cow::Owned), + } + } + + /// Returns true if there is no merkle root conflict between + /// the existing `merkle_root_meta` and `shred` + /// + /// Otherwise return false and if not already present, add duplicate proof to + /// `duplicate_shreds`. + fn check_merkle_root_consistency( + &self, + just_inserted_shreds: &HashMap, + slot: Slot, + merkle_root_meta: &MerkleRootMeta, + shred: &Shred, + duplicate_shreds: &mut Vec, + ) -> bool { + let new_merkle_root = shred.merkle_root().ok(); + if merkle_root_meta.merkle_root() == new_merkle_root { + // No conflict, either both merkle shreds with same merkle root + // or both legacy shreds with merkle_root `None` + return true; } + + warn!( + "Received conflicting merkle roots for slot: {}, erasure_set: {:?} + original merkle root meta {:?} vs + conflicting merkle root {:?} shred index {} type {:?}. Reporting as duplicate", + slot, + shred.erasure_set(), + merkle_root_meta, + new_merkle_root, + shred.index(), + shred.shred_type(), + ); + + if !self.has_duplicate_shreds_in_slot(slot) { + let shred_id = ShredId::new( + slot, + merkle_root_meta.first_received_shred_index(), + merkle_root_meta.first_received_shred_type(), + ); + let conflicting_shred = self + .get_shred_from_just_inserted_or_db(just_inserted_shreds, shred_id) + .unwrap_or_else(|| { + panic!("First received shred indicated by merkle root meta {:?} is missing from blockstore. This inconsistency may cause duplicate block detection to fail", merkle_root_meta); + }) + .into_owned(); + duplicate_shreds.push(PossibleDuplicateShred::MerkleRootConflict( + shred.clone(), + conflicting_shred, + )); + } + false } fn should_insert_data_shred( @@ -1489,12 +1666,16 @@ impl Blockstore { .and_then(|leader_schedule| leader_schedule.slot_leader_at(slot, None)); if !self.has_duplicate_shreds_in_slot(slot) { + let shred_id = ShredId::new( + slot, + u32::try_from(last_index.unwrap()).unwrap(), + ShredType::Data, + ); let ending_shred: Vec = self - .get_data_shred_from_just_inserted_or_db( - just_inserted_shreds, - slot, - last_index.unwrap(), - ) + .get_shred_from_just_inserted_or_db(just_inserted_shreds, shred_id) + .unwrap_or_else(|| { + panic!("Last index data shred indicated by slot meta {:?} is missing from blockstore. This inconsistency may cause duplicate block detection to fail", slot_meta) + }) .into_owned(); if self @@ -1528,12 +1709,16 @@ impl Blockstore { .and_then(|leader_schedule| leader_schedule.slot_leader_at(slot, None)); if !self.has_duplicate_shreds_in_slot(slot) { + let shred_id = ShredId::new( + slot, + u32::try_from(slot_meta.received - 1).unwrap(), + ShredType::Data, + ); let ending_shred: Vec = self - .get_data_shred_from_just_inserted_or_db( - just_inserted_shreds, - slot, - slot_meta.received - 1, - ) + .get_shred_from_just_inserted_or_db(just_inserted_shreds, shred_id) + .unwrap_or_else(|| { + panic!("Last received data shred indicated by slot meta {:?} is missing from blockstore. This inconsistency may cause duplicate block detection to fail", slot_meta) + }) .into_owned(); if self @@ -1961,10 +2146,9 @@ impl Blockstore { } pub fn get_rooted_block_time(&self, slot: Slot) -> Result { - datapoint_info!( - "blockstore-rpc-api", - ("method", "get_rooted_block_time", String) - ); + self.rpc_api_metrics + .num_get_rooted_block_time + .fetch_add(1, Ordering::Relaxed); let _lock = self.check_lowest_cleanup_slot(slot)?; if self.is_root(slot) { @@ -1981,8 +2165,11 @@ impl Blockstore { } pub fn get_block_height(&self, slot: Slot) -> Result> { - datapoint_info!("blockstore-rpc-api", ("method", "get_block_height", String)); + self.rpc_api_metrics + .num_get_block_height + .fetch_add(1, Ordering::Relaxed); let _lock = self.check_lowest_cleanup_slot(slot)?; + self.block_height_cf.get(slot) } @@ -2010,7 +2197,9 @@ impl Blockstore { slot: Slot, require_previous_blockhash: bool, ) -> Result { - datapoint_info!("blockstore-rpc-api", ("method", "get_rooted_block", String)); + self.rpc_api_metrics + .num_get_rooted_block + .fetch_add(1, Ordering::Relaxed); let _lock = self.check_lowest_cleanup_slot(slot)?; if self.is_root(slot) { @@ -2033,10 +2222,9 @@ impl Blockstore { slot: Slot, require_previous_blockhash: bool, ) -> Result { - datapoint_info!( - "blockstore-rpc-api", - ("method", "get_rooted_block_with_entries", String) - ); + self.rpc_api_metrics + .num_get_rooted_block_with_entries + .fetch_add(1, Ordering::Relaxed); let _lock = self.check_lowest_cleanup_slot(slot)?; if self.is_root(slot) { @@ -2228,13 +2416,9 @@ impl Blockstore { if let Some(highest_primary_index_slot) = *w_highest_primary_index_slot { if oldest_slot > highest_primary_index_slot { *w_highest_primary_index_slot = None; - self.transaction_status_index_cf.delete(0)?; - self.transaction_status_index_cf.delete(1)?; + self.db.set_clean_slot_0(true); } } - if w_highest_primary_index_slot.is_none() { - self.db.set_clean_slot_0(true); - } Ok(()) } @@ -2441,10 +2625,10 @@ impl Blockstore { &self, signature: Signature, ) -> Result> { - datapoint_info!( - "blockstore-rpc-api", - ("method", "get_rooted_transaction_status", String) - ); + self.rpc_api_metrics + .num_get_rooted_transaction_status + .fetch_add(1, Ordering::Relaxed); + self.get_transaction_status(signature, &HashSet::default()) } @@ -2454,10 +2638,10 @@ impl Blockstore { signature: Signature, confirmed_unrooted_slots: &HashSet, ) -> Result> { - datapoint_info!( - "blockstore-rpc-api", - ("method", "get_transaction_status", String) - ); + self.rpc_api_metrics + .num_get_transaction_status + .fetch_add(1, Ordering::Relaxed); + self.get_transaction_status_with_counter(signature, confirmed_unrooted_slots) .map(|(status, _)| status) } @@ -2467,10 +2651,10 @@ impl Blockstore { &self, signature: Signature, ) -> Result> { - datapoint_info!( - "blockstore-rpc-api", - ("method", "get_rooted_transaction", String) - ); + self.rpc_api_metrics + .num_get_rooted_transaction + .fetch_add(1, Ordering::Relaxed); + self.get_transaction_with_status(signature, &HashSet::default()) } @@ -2480,10 +2664,10 @@ impl Blockstore { signature: Signature, highest_confirmed_slot: Slot, ) -> Result> { - datapoint_info!( - "blockstore-rpc-api", - ("method", "get_complete_transaction", String) - ); + self.rpc_api_metrics + .num_get_complete_transaction + .fetch_add(1, Ordering::Relaxed); + let max_root = self.max_root(); let confirmed_unrooted_slots: HashSet<_> = AncestorIterator::new_inclusive(highest_confirmed_slot, self) @@ -2593,10 +2777,10 @@ impl Blockstore { start_slot: Slot, end_slot: Slot, ) -> Result> { - datapoint_info!( - "blockstore-rpc-api", - ("method", "get_confirmed_signatures_for_address", String) - ); + self.rpc_api_metrics + .num_get_confirmed_signatures_for_address + .fetch_add(1, Ordering::Relaxed); + self.find_address_signatures(pubkey, start_slot, end_slot) .map(|signatures| signatures.iter().map(|(_, signature)| *signature).collect()) } @@ -2631,10 +2815,10 @@ impl Blockstore { until: Option, limit: usize, ) -> Result { - datapoint_info!( - "blockstore-rpc-api", - ("method", "get_confirmed_signatures_for_address2", String) - ); + self.rpc_api_metrics + .num_get_confirmed_signatures_for_address2 + .fetch_add(1, Ordering::Relaxed); + let max_root = self.max_root(); let confirmed_unrooted_slots: HashSet<_> = AncestorIterator::new_inclusive(highest_slot, self) @@ -2951,18 +3135,18 @@ impl Blockstore { if let Ok(entries) = self.get_slot_entries(slot, 0) { entries.into_par_iter().for_each(|entry| { entry.transactions.into_iter().for_each(|tx| { + if let Some(lookups) = tx.message.address_table_lookups() { + add_to_set( + &lookup_tables, + lookups.iter().map(|lookup| &lookup.account_key), + ); + } // Attempt to verify transaction and load addresses from the current bank, // or manually scan the transaction for addresses if the transaction. if let Ok(tx) = bank.fully_verify_transaction(tx.clone()) { add_to_set(&result, tx.message().account_keys().iter()); } else { add_to_set(&result, tx.message.static_account_keys()); - if let Some(lookups) = tx.message.address_table_lookups() { - add_to_set( - &lookup_tables, - lookups.iter().map(|lookup| &lookup.account_key), - ); - } let tx = SanitizedVersionedTransaction::try_from(tx) .expect("transaction failed to sanitize"); @@ -2982,6 +3166,7 @@ impl Blockstore { lookup_tables.into_par_iter().for_each(|lookup_table_key| { bank.get_account(&lookup_table_key) .map(|lookup_table_account| { + add_to_set(&result, &[lookup_table_key]); AddressLookupTable::deserialize(lookup_table_account.data()).map(|t| { add_to_set(&result, &t.addresses[..]); }) @@ -4543,7 +4728,7 @@ fn adjust_ulimit_nofile(enforce_ulimit_nofile: bool) -> Result<()> { // usually not enough // AppendVecs and disk Account Index are also heavy users of mmapped files. // This should be kept in sync with published validator instructions. - // https://docs.solana.com/running-validator/validator-start#increased-memory-mapped-files-limit + // https://docs.solanalabs.com/operations/guides/validator-start#increased-memory-mapped-files-limit let desired_nofile = 1_000_000; fn get_nofile() -> libc::rlimit { @@ -6294,7 +6479,7 @@ pub mod tests { assert_eq!( blockstore.find_missing_data_indexes( slot, - 0, // first_timestmap + 0, // first_timestamp 0, // defer_threshold_ticks 0, // start_index gap - 1, // end_index @@ -6305,7 +6490,7 @@ pub mod tests { assert_eq!( blockstore.find_missing_data_indexes( slot, - 0, // first_timestmap + 0, // first_timestamp 0, // defer_threshold_ticks gap - 2, // start_index gap, // end_index @@ -6723,6 +6908,412 @@ pub mod tests { ),); } + #[test] + fn test_merkle_root_metas_coding() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + let parent_slot = 0; + let slot = 1; + let index = 0; + let (_, coding_shreds, _) = setup_erasure_shreds(slot, parent_slot, 10); + let coding_shred = coding_shreds[index as usize].clone(); + + let mut erasure_metas = HashMap::new(); + let mut merkle_root_metas = HashMap::new(); + let mut index_working_set = HashMap::new(); + let mut just_received_shreds = HashMap::new(); + let mut write_batch = blockstore.db.batch().unwrap(); + let mut index_meta_time_us = 0; + assert!(blockstore.check_insert_coding_shred( + coding_shred.clone(), + &mut erasure_metas, + &mut merkle_root_metas, + &mut index_working_set, + &mut write_batch, + &mut just_received_shreds, + &mut index_meta_time_us, + &mut vec![], + false, + ShredSource::Turbine, + &mut BlockstoreInsertionMetrics::default(), + )); + + assert_eq!(merkle_root_metas.len(), 1); + assert_eq!( + merkle_root_metas + .get(&coding_shred.erasure_set()) + .unwrap() + .as_ref() + .merkle_root(), + coding_shred.merkle_root().ok(), + ); + assert_eq!( + merkle_root_metas + .get(&coding_shred.erasure_set()) + .unwrap() + .as_ref() + .first_received_shred_index(), + index + ); + assert_eq!( + merkle_root_metas + .get(&coding_shred.erasure_set()) + .unwrap() + .as_ref() + .first_received_shred_type(), + ShredType::Code, + ); + + for (erasure_set, working_merkle_root_meta) in merkle_root_metas { + write_batch + .put::( + erasure_set.store_key(), + working_merkle_root_meta.as_ref(), + ) + .unwrap(); + } + blockstore.db.write(write_batch).unwrap(); + + // Add a shred with different merkle root and index + let (_, coding_shreds, _) = setup_erasure_shreds(slot, parent_slot, 10); + let new_coding_shred = coding_shreds[(index + 1) as usize].clone(); + + erasure_metas.clear(); + index_working_set.clear(); + just_received_shreds.clear(); + let mut merkle_root_metas = HashMap::new(); + let mut write_batch = blockstore.db.batch().unwrap(); + let mut duplicates = vec![]; + + assert!(!blockstore.check_insert_coding_shred( + new_coding_shred.clone(), + &mut erasure_metas, + &mut merkle_root_metas, + &mut index_working_set, + &mut write_batch, + &mut just_received_shreds, + &mut index_meta_time_us, + &mut duplicates, + false, + ShredSource::Turbine, + &mut BlockstoreInsertionMetrics::default(), + )); + + // No insert, notify duplicate + assert_eq!(duplicates.len(), 1); + match &duplicates[0] { + PossibleDuplicateShred::MerkleRootConflict(shred, _) if shred.slot() == slot => (), + _ => panic!("No merkle root conflict"), + } + + // Verify that we still have the merkle root meta from the original shred + assert_eq!(merkle_root_metas.len(), 1); + assert_eq!( + merkle_root_metas + .get(&coding_shred.erasure_set()) + .unwrap() + .as_ref() + .merkle_root(), + coding_shred.merkle_root().ok() + ); + assert_eq!( + merkle_root_metas + .get(&coding_shred.erasure_set()) + .unwrap() + .as_ref() + .first_received_shred_index(), + index + ); + + // Blockstore should also have the merkle root meta of the original shred + assert_eq!( + blockstore + .merkle_root_meta(coding_shred.erasure_set()) + .unwrap() + .unwrap() + .merkle_root(), + coding_shred.merkle_root().ok() + ); + assert_eq!( + blockstore + .merkle_root_meta(coding_shred.erasure_set()) + .unwrap() + .unwrap() + .first_received_shred_index(), + index + ); + + // Add a shred from different fec set + let new_index = index + 31; + let (_, coding_shreds, _) = + setup_erasure_shreds_with_index(slot, parent_slot, 10, new_index); + let new_coding_shred = coding_shreds[0].clone(); + + assert!(blockstore.check_insert_coding_shred( + new_coding_shred.clone(), + &mut erasure_metas, + &mut merkle_root_metas, + &mut index_working_set, + &mut write_batch, + &mut just_received_shreds, + &mut index_meta_time_us, + &mut vec![], + false, + ShredSource::Turbine, + &mut BlockstoreInsertionMetrics::default(), + )); + + // Verify that we still have the merkle root meta for the original shred + // and the new shred + assert_eq!(merkle_root_metas.len(), 2); + assert_eq!( + merkle_root_metas + .get(&coding_shred.erasure_set()) + .unwrap() + .as_ref() + .merkle_root(), + coding_shred.merkle_root().ok() + ); + assert_eq!( + merkle_root_metas + .get(&coding_shred.erasure_set()) + .unwrap() + .as_ref() + .first_received_shred_index(), + index + ); + assert_eq!( + merkle_root_metas + .get(&new_coding_shred.erasure_set()) + .unwrap() + .as_ref() + .merkle_root(), + new_coding_shred.merkle_root().ok() + ); + assert_eq!( + merkle_root_metas + .get(&new_coding_shred.erasure_set()) + .unwrap() + .as_ref() + .first_received_shred_index(), + new_index + ); + } + + #[test] + fn test_merkle_root_metas_data() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + let parent_slot = 0; + let slot = 1; + let index = 11; + let fec_set_index = 11; + let (data_shreds, _, _) = + setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index); + let data_shred = data_shreds[0].clone(); + + let mut erasure_metas = HashMap::new(); + let mut merkle_root_metas = HashMap::new(); + let mut index_working_set = HashMap::new(); + let mut just_received_shreds = HashMap::new(); + let mut slot_meta_working_set = HashMap::new(); + let mut write_batch = blockstore.db.batch().unwrap(); + let mut index_meta_time_us = 0; + blockstore + .check_insert_data_shred( + data_shred.clone(), + &mut erasure_metas, + &mut merkle_root_metas, + &mut index_working_set, + &mut slot_meta_working_set, + &mut write_batch, + &mut just_received_shreds, + &mut index_meta_time_us, + false, + &mut vec![], + None, + ShredSource::Turbine, + ) + .unwrap(); + + assert_eq!(merkle_root_metas.len(), 1); + assert_eq!( + merkle_root_metas + .get(&data_shred.erasure_set()) + .unwrap() + .as_ref() + .merkle_root(), + data_shred.merkle_root().ok() + ); + assert_eq!( + merkle_root_metas + .get(&data_shred.erasure_set()) + .unwrap() + .as_ref() + .first_received_shred_index(), + index + ); + assert_eq!( + merkle_root_metas + .get(&data_shred.erasure_set()) + .unwrap() + .as_ref() + .first_received_shred_type(), + ShredType::Data, + ); + + for (erasure_set, working_merkle_root_meta) in merkle_root_metas { + write_batch + .put::( + erasure_set.store_key(), + working_merkle_root_meta.as_ref(), + ) + .unwrap(); + } + blockstore.db.write(write_batch).unwrap(); + + // Add a shred with different merkle root and index + let (data_shreds, _, _) = + setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index); + let new_data_shred = data_shreds[1].clone(); + + erasure_metas.clear(); + index_working_set.clear(); + just_received_shreds.clear(); + let mut merkle_root_metas = HashMap::new(); + let mut write_batch = blockstore.db.batch().unwrap(); + let mut duplicates = vec![]; + + assert!(blockstore + .check_insert_data_shred( + new_data_shred.clone(), + &mut erasure_metas, + &mut merkle_root_metas, + &mut index_working_set, + &mut slot_meta_working_set, + &mut write_batch, + &mut just_received_shreds, + &mut index_meta_time_us, + false, + &mut duplicates, + None, + ShredSource::Turbine, + ) + .is_err()); + + // No insert, notify duplicate + assert_eq!(duplicates.len(), 1); + assert_matches!( + duplicates[0], + PossibleDuplicateShred::MerkleRootConflict(_, _) + ); + + // Verify that we still have the merkle root meta from the original shred + assert_eq!(merkle_root_metas.len(), 1); + assert_eq!( + merkle_root_metas + .get(&data_shred.erasure_set()) + .unwrap() + .as_ref() + .merkle_root(), + data_shred.merkle_root().ok() + ); + assert_eq!( + merkle_root_metas + .get(&data_shred.erasure_set()) + .unwrap() + .as_ref() + .first_received_shred_index(), + index + ); + + // Blockstore should also have the merkle root meta of the original shred + assert_eq!( + blockstore + .merkle_root_meta(data_shred.erasure_set()) + .unwrap() + .unwrap() + .merkle_root(), + data_shred.merkle_root().ok() + ); + assert_eq!( + blockstore + .merkle_root_meta(data_shred.erasure_set()) + .unwrap() + .unwrap() + .first_received_shred_index(), + index + ); + + // Add a shred from different fec set + let new_index = fec_set_index + 31; + let new_data_shred = Shred::new_from_data( + slot, + new_index, + 1, // parent_offset + &[3, 3, 3], // data + ShredFlags::empty(), + 0, // reference_tick, + 0, // version + fec_set_index + 30, + ); + + blockstore + .check_insert_data_shred( + new_data_shred.clone(), + &mut erasure_metas, + &mut merkle_root_metas, + &mut index_working_set, + &mut slot_meta_working_set, + &mut write_batch, + &mut just_received_shreds, + &mut index_meta_time_us, + false, + &mut vec![], + None, + ShredSource::Turbine, + ) + .unwrap(); + + // Verify that we still have the merkle root meta for the original shred + // and the new shred + assert_eq!(merkle_root_metas.len(), 2); + assert_eq!( + merkle_root_metas + .get(&data_shred.erasure_set()) + .unwrap() + .as_ref() + .merkle_root(), + data_shred.merkle_root().ok() + ); + assert_eq!( + merkle_root_metas + .get(&data_shred.erasure_set()) + .unwrap() + .as_ref() + .first_received_shred_index(), + index + ); + assert_eq!( + merkle_root_metas + .get(&new_data_shred.erasure_set()) + .unwrap() + .as_ref() + .merkle_root(), + new_data_shred.merkle_root().ok() + ); + assert_eq!( + merkle_root_metas + .get(&new_data_shred.erasure_set()) + .unwrap() + .as_ref() + .first_received_shred_index(), + new_index + ); + } + #[test] fn test_check_insert_coding_shred() { let ledger_path = get_tmp_ledger_path_auto_delete!(); @@ -6741,6 +7332,7 @@ pub mod tests { ); let mut erasure_metas = HashMap::new(); + let mut merkle_root_metas = HashMap::new(); let mut index_working_set = HashMap::new(); let mut just_received_shreds = HashMap::new(); let mut write_batch = blockstore.db.batch().unwrap(); @@ -6748,6 +7340,7 @@ pub mod tests { assert!(blockstore.check_insert_coding_shred( coding_shred.clone(), &mut erasure_metas, + &mut merkle_root_metas, &mut index_working_set, &mut write_batch, &mut just_received_shreds, @@ -6763,6 +7356,7 @@ pub mod tests { assert!(!blockstore.check_insert_coding_shred( coding_shred.clone(), &mut erasure_metas, + &mut merkle_root_metas, &mut index_working_set, &mut write_batch, &mut just_received_shreds, @@ -9250,6 +9844,15 @@ pub mod tests { slot: u64, parent_slot: u64, num_entries: u64, + ) -> (Vec, Vec, Arc) { + setup_erasure_shreds_with_index(slot, parent_slot, num_entries, 0) + } + + fn setup_erasure_shreds_with_index( + slot: u64, + parent_slot: u64, + num_entries: u64, + fec_set_index: u32, ) -> (Vec, Vec, Arc) { let entries = make_slot_entries_with_transactions(num_entries); let leader_keypair = Arc::new(Keypair::new()); @@ -9257,10 +9860,10 @@ pub mod tests { let (data_shreds, coding_shreds) = shredder.entries_to_shreds( &leader_keypair, &entries, - true, // is_last_in_slot - 0, // next_shred_index - 0, // next_code_index - true, // merkle_variant + true, // is_last_in_slot + fec_set_index, // next_shred_index + fec_set_index, // next_code_index + true, // merkle_variant &ReedSolomonCache::default(), &mut ProcessShredsStats::default(), ); @@ -9524,7 +10127,7 @@ pub mod tests { } #[test] - fn test_rewards_protobuf_backward_compatability() { + fn test_rewards_protobuf_backward_compatibility() { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); @@ -9567,7 +10170,7 @@ pub mod tests { // ledger archives, but typically those require contemporaraneous software for other reasons. // However, we are persisting the test since the apis still exist in `blockstore_db`. #[test] - fn test_transaction_status_protobuf_backward_compatability() { + fn test_transaction_status_protobuf_backward_compatibility() { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs index f6b3662ed19e28..4b599a353d569c 100644 --- a/ledger/src/blockstore/blockstore_purge.rs +++ b/ledger/src/blockstore/blockstore_purge.rs @@ -401,7 +401,7 @@ impl Blockstore { .into_iter() .flat_map(|entry| entry.transactions); for (i, transaction) in transactions.enumerate() { - if let Some(&signature) = transaction.signatures.get(0) { + if let Some(&signature) = transaction.signatures.first() { batch.delete::((signature, slot))?; batch.delete::((signature, slot))?; if !primary_indexes.is_empty() { diff --git a/ledger/src/blockstore_cleanup_service.rs b/ledger/src/blockstore_cleanup_service.rs index dbd8e64e612186..d9212bf6ddfb58 100644 --- a/ledger/src/blockstore_cleanup_service.rs +++ b/ledger/src/blockstore_cleanup_service.rs @@ -303,7 +303,7 @@ mod tests { fn flush_blockstore_contents_to_disk(blockstore: Blockstore) -> Blockstore { // The find_slots_to_clean() routine uses a method that queries data // from RocksDB SST files. On a running validator, these are created - // fairly reguarly as new data is coming in and contents of memory are + // fairly regularly as new data is coming in and contents of memory are // pushed to disk. In a unit test environment, we aren't pushing nearly // enough data for this to happen organically. So, instead open and // close the Blockstore which will perform the flush to SSTs. diff --git a/ledger/src/blockstore_db.rs b/ledger/src/blockstore_db.rs index 0b2b14445539d6..18ba491ea34bd1 100644 --- a/ledger/src/blockstore_db.rs +++ b/ledger/src/blockstore_db.rs @@ -35,7 +35,7 @@ use { }, solana_storage_proto::convert::generated, std::{ - collections::HashMap, + collections::{HashMap, HashSet}, ffi::{CStr, CString}, fs, marker::PhantomData, @@ -419,49 +419,52 @@ impl Rocks { } let oldest_slot = OldestSlot::default(); let column_options = options.column_options.clone(); + let cf_descriptors = Self::cf_descriptors(path, &options, &oldest_slot); // Open the database let db = match access_type { - AccessType::Primary | AccessType::PrimaryForMaintenance => Rocks { - db: DB::open_cf_descriptors( - &db_options, - path, - Self::cf_descriptors(&options, &oldest_slot), - )?, - access_type, - oldest_slot, - column_options, - write_batch_perf_status: PerfSamplingStatus::default(), - }, + AccessType::Primary | AccessType::PrimaryForMaintenance => { + DB::open_cf_descriptors(&db_options, path, cf_descriptors)? + } AccessType::Secondary => { let secondary_path = path.join("solana-secondary"); - info!( - "Opening Rocks with secondary (read only) access at: {:?}", - secondary_path + "Opening Rocks with secondary (read only) access at: {secondary_path:?}. \ + This secondary access could temporarily degrade other accesses, such as \ + by solana-validator" ); - info!("This secondary access could temporarily degrade other accesses, such as by solana-validator"); - - Rocks { - db: DB::open_cf_descriptors_as_secondary( - &db_options, - path, - &secondary_path, - Self::cf_descriptors(&options, &oldest_slot), - )?, - access_type, - oldest_slot, - column_options, - write_batch_perf_status: PerfSamplingStatus::default(), - } + DB::open_cf_descriptors_as_secondary( + &db_options, + path, + &secondary_path, + cf_descriptors, + )? } }; - db.configure_compaction(); + let rocks = Rocks { + db, + access_type, + oldest_slot, + column_options, + write_batch_perf_status: PerfSamplingStatus::default(), + }; + + rocks.configure_compaction(); - Ok(db) + Ok(rocks) } + /// Create the column family (CF) descriptors necessary to open the database. + /// + /// In order to open a RocksDB database with Primary access, all columns must be opened. So, + /// in addition to creating descriptors for all of the expected columns, also create + /// descriptors for columns that were discovered but are otherwise unknown to the software. + /// + /// One case where columns could be unknown is if a RocksDB database is modified with a newer + /// software version that adds a new column, and then also opened with an older version that + /// did not have knowledge of that new column. fn cf_descriptors( + path: &Path, options: &BlockstoreOptions, oldest_slot: &OldestSlot, ) -> Vec { @@ -469,7 +472,7 @@ impl Rocks { let (cf_descriptor_shred_data, cf_descriptor_shred_code) = new_cf_descriptor_pair_shreds::(options, oldest_slot); - vec![ + let mut cf_descriptors = vec![ new_cf_descriptor::(options, oldest_slot), new_cf_descriptor::(options, oldest_slot), new_cf_descriptor::(options, oldest_slot), @@ -491,7 +494,52 @@ impl Rocks { new_cf_descriptor::(options, oldest_slot), new_cf_descriptor::(options, oldest_slot), new_cf_descriptor::(options, oldest_slot), - ] + ]; + + // If the access type is Secondary, we don't need to open all of the + // columns so we can just return immediately. + match options.access_type { + AccessType::Secondary => { + return cf_descriptors; + } + AccessType::Primary | AccessType::PrimaryForMaintenance => {} + } + + // Attempt to detect the column families that are present. It is not a + // fatal error if we cannot, for example, if the Blockstore is brand + // new and will be created by the call to Rocks::open(). + let detected_cfs = match DB::list_cf(&Options::default(), path) { + Ok(detected_cfs) => detected_cfs, + Err(err) => { + warn!("Unable to detect Rocks columns: {err:?}"); + vec![] + } + }; + // The default column is handled automatically, we don't need to create + // a descriptor for it + const DEFAULT_COLUMN_NAME: &str = "default"; + let known_cfs: HashSet<_> = cf_descriptors + .iter() + .map(|cf_descriptor| cf_descriptor.name().to_string()) + .chain(std::iter::once(DEFAULT_COLUMN_NAME.to_string())) + .collect(); + detected_cfs.iter().for_each(|cf_name| { + if known_cfs.get(cf_name.as_str()).is_none() { + info!("Detected unknown column {cf_name}, opening column with basic options"); + // This version of the software was unaware of the column, so + // it is fair to assume that we will not attempt to read or + // write the column. So, set some bare bones settings to avoid + // using extra resources on this unknown column. + let mut options = Options::default(); + // Lower the default to avoid unnecessary allocations + options.set_write_buffer_size(1024 * 1024); + // Disable compactions to avoid any modifications to the column + options.set_disable_auto_compactions(true); + cf_descriptors.push(ColumnFamilyDescriptor::new(cf_name, options)); + } + }); + + cf_descriptors } fn columns() -> Vec<&'static str> { @@ -719,10 +767,6 @@ impl Rocks { pub trait Column { type Index; - fn key_size() -> usize { - std::mem::size_of::() - } - fn key(index: Self::Index) -> Vec; fn index(key: &[u8]) -> Self::Index; // This trait method is primarily used by `Database::delete_range_cf()`, and is therefore only @@ -2175,7 +2219,9 @@ fn should_enable_compression() -> bool { #[cfg(test)] pub mod tests { - use {super::*, crate::blockstore_db::columns::ShredData}; + use { + super::*, crate::blockstore_db::columns::ShredData, std::path::PathBuf, tempfile::tempdir, + }; #[test] fn test_compaction_filter() { @@ -2228,6 +2274,7 @@ pub mod tests { #[test] fn test_cf_names_and_descriptors_equal_length() { + let path = PathBuf::default(); let options = BlockstoreOptions::default(); let oldest_slot = OldestSlot::default(); // The names and descriptors don't need to be in the same order for our use cases; @@ -2235,7 +2282,7 @@ pub mod tests { // should update both lists. assert_eq!( Rocks::columns().len(), - Rocks::cf_descriptors(&options, &oldest_slot).len() + Rocks::cf_descriptors(&path, &options, &oldest_slot).len() ); } @@ -2260,6 +2307,49 @@ pub mod tests { assert!(!should_enable_cf_compaction("something else")); } + #[test] + fn test_open_unknown_columns() { + solana_logger::setup(); + + let temp_dir = tempdir().unwrap(); + let db_path = temp_dir.path(); + + // Open with Primary to create the new database + { + let options = BlockstoreOptions { + access_type: AccessType::Primary, + enforce_ulimit_nofile: false, + ..BlockstoreOptions::default() + }; + let mut rocks = Rocks::open(db_path, options).unwrap(); + + // Introduce a new column that will not be known + rocks + .db + .create_cf("new_column", &Options::default()) + .unwrap(); + } + + // Opening with either Secondary or Primary access should succeed, + // even though the Rocks code is unaware of "new_column" + { + let options = BlockstoreOptions { + access_type: AccessType::Secondary, + enforce_ulimit_nofile: false, + ..BlockstoreOptions::default() + }; + let _ = Rocks::open(db_path, options).unwrap(); + } + { + let options = BlockstoreOptions { + access_type: AccessType::Primary, + enforce_ulimit_nofile: false, + ..BlockstoreOptions::default() + }; + let _ = Rocks::open(db_path, options).unwrap(); + } + } + impl LedgerColumn where C: ColumnIndexDeprecation + ProtobufColumn + ColumnName, diff --git a/ledger/src/blockstore_meta.rs b/ledger/src/blockstore_meta.rs index 41a16c9ae3fee3..60f8a223c8a3b6 100644 --- a/ledger/src/blockstore_meta.rs +++ b/ledger/src/blockstore_meta.rs @@ -140,8 +140,8 @@ pub(crate) struct ErasureConfig { #[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)] pub struct MerkleRootMeta { - /// The merkle root - merkle_root: Hash, + /// The merkle root, `None` for legacy shreds + merkle_root: Option, /// The first received shred index first_received_shred_index: u32, /// The shred type of the first received shred @@ -406,6 +406,34 @@ impl ErasureMeta { } } +impl MerkleRootMeta { + pub(crate) fn from_shred(shred: &Shred) -> Self { + Self { + // An error here after the shred has already sigverified + // can only indicate that the leader is sending + // legacy or malformed shreds. We should still store + // `None` for those cases in blockstore, as a later + // shred that contains a proper merkle root would constitute + // a valid duplicate shred proof. + merkle_root: shred.merkle_root().ok(), + first_received_shred_index: shred.index(), + first_received_shred_type: shred.shred_type(), + } + } + + pub(crate) fn merkle_root(&self) -> Option { + self.merkle_root + } + + pub(crate) fn first_received_shred_index(&self) -> u32 { + self.first_received_shred_index + } + + pub(crate) fn first_received_shred_type(&self) -> ShredType { + self.first_received_shred_type + } +} + impl DuplicateSlotProof { pub(crate) fn new(shred1: Vec, shred2: Vec) -> Self { DuplicateSlotProof { shred1, shred2 } diff --git a/ledger/src/blockstore_metric_report_service.rs b/ledger/src/blockstore_metric_report_service.rs index 393442a3e25aca..b08dab1ad38dc5 100644 --- a/ledger/src/blockstore_metric_report_service.rs +++ b/ledger/src/blockstore_metric_report_service.rs @@ -34,6 +34,7 @@ impl BlockstoreMetricReportService { BLOCKSTORE_METRICS_REPORT_PERIOD_MILLIS, )); blockstore.submit_rocksdb_cf_metrics_for_all_cfs(); + blockstore.report_rpc_api_metrics(); }) .unwrap(); Self { t_cf_metric } diff --git a/ledger/src/blockstore_metrics.rs b/ledger/src/blockstore_metrics.rs index 46cceb55b36a7c..eea1ba0ce01d0b 100644 --- a/ledger/src/blockstore_metrics.rs +++ b/ledger/src/blockstore_metrics.rs @@ -143,6 +143,104 @@ impl BlockstoreInsertionMetrics { } } +/// A metrics struct to track the number of times Blockstore RPC function are called. +#[derive(Default)] +pub(crate) struct BlockstoreRpcApiMetrics { + pub num_get_block_height: AtomicU64, + pub num_get_complete_transaction: AtomicU64, + pub num_get_confirmed_signatures_for_address: AtomicU64, + pub num_get_confirmed_signatures_for_address2: AtomicU64, + pub num_get_rooted_block: AtomicU64, + pub num_get_rooted_block_time: AtomicU64, + pub num_get_rooted_transaction: AtomicU64, + pub num_get_rooted_transaction_status: AtomicU64, + pub num_get_rooted_block_with_entries: AtomicU64, + pub num_get_transaction_status: AtomicU64, +} + +impl BlockstoreRpcApiMetrics { + pub fn report(&self) { + let num_get_block_height = self.num_get_block_height.swap(0, Ordering::Relaxed); + let num_get_complete_transaction = + self.num_get_complete_transaction.swap(0, Ordering::Relaxed); + let num_get_confirmed_signatures_for_address = self + .num_get_confirmed_signatures_for_address + .swap(0, Ordering::Relaxed); + let num_get_confirmed_signatures_for_address2 = self + .num_get_confirmed_signatures_for_address2 + .swap(0, Ordering::Relaxed); + let num_get_rooted_block = self.num_get_rooted_block.swap(0, Ordering::Relaxed); + let num_get_rooted_block_time = self.num_get_rooted_block_time.swap(0, Ordering::Relaxed); + let num_get_rooted_transaction = self.num_get_rooted_transaction.swap(0, Ordering::Relaxed); + let num_get_rooted_transaction_status = self + .num_get_rooted_transaction_status + .swap(0, Ordering::Relaxed); + let num_get_rooted_block_with_entries = self + .num_get_rooted_block_with_entries + .swap(0, Ordering::Relaxed); + let num_get_transaction_status = self.num_get_transaction_status.swap(0, Ordering::Relaxed); + + let total_num_queries = num_get_block_height + .saturating_add(num_get_complete_transaction) + .saturating_add(num_get_confirmed_signatures_for_address) + .saturating_add(num_get_confirmed_signatures_for_address2) + .saturating_add(num_get_rooted_block) + .saturating_add(num_get_rooted_block_time) + .saturating_add(num_get_rooted_transaction) + .saturating_add(num_get_rooted_transaction_status) + .saturating_add(num_get_rooted_block_with_entries) + .saturating_add(num_get_transaction_status); + + if total_num_queries > 0 { + datapoint_info!( + "blockstore-rpc-api", + ("num_get_block_height", num_get_block_height as i64, i64), + ( + "num_get_complete_transaction", + num_get_complete_transaction as i64, + i64 + ), + ( + "num_get_confirmed_signatures_for_address", + num_get_confirmed_signatures_for_address as i64, + i64 + ), + ( + "num_get_confirmed_signatures_for_address2", + num_get_confirmed_signatures_for_address2 as i64, + i64 + ), + ("num_get_rooted_block", num_get_rooted_block as i64, i64), + ( + "num_get_rooted_block_time", + num_get_rooted_block_time as i64, + i64 + ), + ( + "num_get_rooted_transaction", + num_get_rooted_transaction as i64, + i64 + ), + ( + "num_get_rooted_transaction_status", + num_get_rooted_transaction_status as i64, + i64 + ), + ( + "num_get_rooted_block_with_entries", + num_get_rooted_block_with_entries as i64, + i64 + ), + ( + "num_get_transaction_status", + num_get_transaction_status as i64, + i64 + ), + ); + } + } +} + /// A metrics struct that exposes RocksDB's column family properties. /// /// Here we only expose a subset of all the internal properties which are diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 5218b55c4b9050..cc8a4e5cb607ac 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -74,7 +74,7 @@ use { thiserror::Error, }; -struct TransactionBatchWithIndexes<'a, 'b> { +pub struct TransactionBatchWithIndexes<'a, 'b> { pub batch: TransactionBatch<'a, 'b>, pub transaction_indexes: Vec, } @@ -134,7 +134,7 @@ fn get_first_error( first_err } -fn execute_batch( +pub fn execute_batch( batch: &TransactionBatchWithIndexes, bank: &Arc, transaction_status_sender: Option<&TransactionStatusSender>, @@ -751,7 +751,8 @@ pub fn test_process_blockstore( None, None, exit, - ); + ) + .unwrap(); process_blockstore_from_root( blockstore, @@ -1221,6 +1222,7 @@ fn confirm_slot_entries( slot, index: entry_index, entry: entry.into(), + starting_transaction_index: entry_tx_starting_index, }) { warn!( "Slot {}, entry {} entry_notification_sender send failed: {:?}", @@ -1830,7 +1832,7 @@ pub struct TransactionStatusBatch { pub transaction_indexes: Vec, } -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct TransactionStatusSender { pub sender: Sender, } @@ -1945,7 +1947,9 @@ pub mod tests { genesis_utils::{ self, create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs, }, - installed_scheduler_pool::{MockInstalledScheduler, SchedulingContext, WaitReason}, + installed_scheduler_pool::{ + MockInstalledScheduler, MockUninstalledScheduler, SchedulingContext, + }, }, solana_sdk::{ account::{AccountSharedData, WritableAccount}, @@ -2691,7 +2695,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(2); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let keypair = Keypair::new(); let slot_entries = create_ticks(genesis_config.ticks_per_slot, 1, genesis_config.hash()); let tx = system_transaction::transfer( @@ -2856,7 +2860,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(1000); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); @@ -2893,7 +2897,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(1000); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); @@ -2953,7 +2957,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(1000); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); @@ -3103,8 +3107,12 @@ pub mod tests { let mock_program_id = solana_sdk::pubkey::new_rand(); - let mut bank = Bank::new_for_tests(&genesis_config); - bank.add_mockup_builtin(mock_program_id, MockBuiltinOk::vm); + let bank = Bank::new_with_mockup_builtin_for_tests( + &genesis_config, + mock_program_id, + MockBuiltinOk::vm, + ) + .0; let tx = Transaction::new_signed_with_payer( &[Instruction::new_with_bincode( @@ -3118,7 +3126,6 @@ pub mod tests { ); let entry = next_entry(&bank.last_blockhash(), 1, vec![tx]); - let bank = Arc::new(bank); let result = process_entries_for_tests_without_scheduler(&bank, vec![entry]); bank.freeze(); let blockhash_ok = bank.last_blockhash(); @@ -3144,8 +3151,12 @@ pub mod tests { let mut bankhash_err = None; (0..get_instruction_errors().len()).for_each(|err| { - let mut bank = Bank::new_for_tests(&genesis_config); - bank.add_mockup_builtin(mock_program_id, MockBuiltinErr::vm); + let bank = Bank::new_with_mockup_builtin_for_tests( + &genesis_config, + mock_program_id, + MockBuiltinErr::vm, + ) + .0; let tx = Transaction::new_signed_with_payer( &[Instruction::new_with_bincode( @@ -3181,7 +3192,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(1000); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); @@ -3275,7 +3286,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(1000); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); @@ -3321,7 +3332,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(1_000_000_000); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; const NUM_TRANSFERS_PER_ENTRY: usize = 8; const NUM_TRANSFERS: usize = NUM_TRANSFERS_PER_ENTRY * 32; @@ -3388,7 +3399,7 @@ pub mod tests { .. } = create_genesis_config((num_accounts + 1) as u64 * initial_lamports); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let mut keypairs: Vec = vec![]; @@ -3455,7 +3466,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(1000); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); @@ -3517,7 +3528,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(11_000); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let pubkey = solana_sdk::pubkey::new_rand(); bank.transfer(1_000, &mint_keypair, &pubkey).unwrap(); assert_eq!(bank.transaction_count(), 1); @@ -3558,7 +3569,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(11_000); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let success_tx = system_transaction::transfer( @@ -3844,7 +3855,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(100); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank0 = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let genesis_hash = genesis_config.hash(); let keypair = Keypair::new(); @@ -3875,7 +3886,7 @@ pub mod tests { AccountSecondaryIndexes::default(), AccountShrinkThreshold::default(), ); - *bank.epoch_schedule() + bank.epoch_schedule().clone() } fn frozen_bank_slots(bank_forks: &BankForks) -> Vec { @@ -3908,7 +3919,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(1_000_000_000); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let present_account_key = Keypair::new(); let present_account = AccountSharedData::new(1, 10, &Pubkey::default()); @@ -3966,14 +3977,18 @@ pub mod tests { &validator_keypairs, vec![100; validator_keypairs.len()], ); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); bank0.freeze(); - let bank1 = Arc::new(Bank::new_from_parent( - bank0.clone(), - &solana_sdk::pubkey::new_rand(), - 1, - )); + let bank1 = bank_forks + .write() + .unwrap() + .insert(Bank::new_from_parent( + bank0.clone(), + &solana_sdk::pubkey::new_rand(), + 1, + )) + .clone_without_scheduler(); // The new blockhash is going to be the hash of the last tick in the block let bank_1_blockhash = bank1.last_blockhash(); @@ -4373,9 +4388,9 @@ pub mod tests { .. } = create_genesis_config(100 * LAMPORTS_PER_SOL); let genesis_hash = genesis_config.hash(); - let bank = BankWithScheduler::new_without_scheduler(Arc::new(Bank::new_for_tests( - &genesis_config, - ))); + let bank = BankWithScheduler::new_without_scheduler( + Bank::new_with_bank_forks_for_tests(&genesis_config).0, + ); let mut timing = ConfirmationTiming::default(); let mut progress = ConfirmationProgress::new(genesis_hash); let amount = genesis_config.rent.minimum_balance(0); @@ -4532,11 +4547,12 @@ pub mod tests { let txs = create_test_transactions(&mint_keypair, &genesis_config.hash()); let mut mocked_scheduler = MockInstalledScheduler::new(); - let mut seq = mockall::Sequence::new(); + let seq = Arc::new(Mutex::new(mockall::Sequence::new())); + let seq_cloned = seq.clone(); mocked_scheduler .expect_context() .times(1) - .in_sequence(&mut seq) + .in_sequence(&mut seq.lock().unwrap()) .return_const(context); mocked_scheduler .expect_schedule_execution() @@ -4544,15 +4560,21 @@ pub mod tests { .returning(|_| ()); mocked_scheduler .expect_wait_for_termination() - .with(mockall::predicate::eq(WaitReason::DroppedFromBankForks)) - .times(1) - .in_sequence(&mut seq) - .returning(|_| None); - mocked_scheduler - .expect_return_to_pool() + .with(mockall::predicate::eq(true)) .times(1) - .in_sequence(&mut seq) - .returning(|| ()); + .in_sequence(&mut seq.lock().unwrap()) + .returning(move |_| { + let mut mocked_uninstalled_scheduler = MockUninstalledScheduler::new(); + mocked_uninstalled_scheduler + .expect_return_to_pool() + .times(1) + .in_sequence(&mut seq_cloned.lock().unwrap()) + .returning(|| ()); + ( + (Ok(()), ExecuteTimings::default()), + Box::new(mocked_uninstalled_scheduler), + ) + }); let bank = BankWithScheduler::new(bank, Some(Box::new(mocked_scheduler))); let batch = bank.prepare_sanitized_batch(&txs); @@ -4591,7 +4613,7 @@ pub mod tests { genesis_config.ticks_per_slot = TICKS_PER_SLOT; let genesis_hash = genesis_config.hash(); - let slot_0_bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (slot_0_bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); assert_eq!(slot_0_bank.slot(), 0); assert_eq!(slot_0_bank.tick_height(), 0); assert_eq!(slot_0_bank.max_tick_height(), 2); @@ -4606,7 +4628,12 @@ pub mod tests { assert_eq!(slot_0_bank.get_hash_age(&genesis_hash), Some(1)); assert_eq!(slot_0_bank.get_hash_age(&slot_0_hash), Some(0)); - let slot_2_bank = Arc::new(Bank::new_from_parent(slot_0_bank, &collector_id, 2)); + let new_bank = Bank::new_from_parent(slot_0_bank, &collector_id, 2); + let slot_2_bank = bank_forks + .write() + .unwrap() + .insert(new_bank) + .clone_without_scheduler(); assert_eq!(slot_2_bank.slot(), 2); assert_eq!(slot_2_bank.tick_height(), 2); assert_eq!(slot_2_bank.max_tick_height(), 6); diff --git a/ledger/src/entry_notifier_interface.rs b/ledger/src/entry_notifier_interface.rs index 174be9e1b7f1f4..dc605e0ebaf8b5 100644 --- a/ledger/src/entry_notifier_interface.rs +++ b/ledger/src/entry_notifier_interface.rs @@ -1,7 +1,13 @@ use {solana_entry::entry::EntrySummary, solana_sdk::clock::Slot, std::sync::Arc}; pub trait EntryNotifier { - fn notify_entry(&self, slot: Slot, index: usize, entry: &EntrySummary); + fn notify_entry( + &self, + slot: Slot, + index: usize, + entry: &EntrySummary, + starting_transaction_index: usize, + ); } pub type EntryNotifierArc = Arc; diff --git a/ledger/src/entry_notifier_service.rs b/ledger/src/entry_notifier_service.rs index ec7eae0bc75723..6a249abf2510b6 100644 --- a/ledger/src/entry_notifier_service.rs +++ b/ledger/src/entry_notifier_service.rs @@ -17,6 +17,7 @@ pub struct EntryNotification { pub slot: Slot, pub index: usize, pub entry: EntrySummary, + pub starting_transaction_index: usize, } pub type EntryNotifierSender = Sender; @@ -54,9 +55,13 @@ impl EntryNotifierService { entry_notification_receiver: &EntryNotifierReceiver, entry_notifier: EntryNotifierArc, ) -> Result<(), RecvTimeoutError> { - let EntryNotification { slot, index, entry } = - entry_notification_receiver.recv_timeout(Duration::from_secs(1))?; - entry_notifier.notify_entry(slot, index, &entry); + let EntryNotification { + slot, + index, + entry, + starting_transaction_index, + } = entry_notification_receiver.recv_timeout(Duration::from_secs(1))?; + entry_notifier.notify_entry(slot, index, &entry, starting_transaction_index); Ok(()) } diff --git a/ledger/src/leader_schedule_cache.rs b/ledger/src/leader_schedule_cache.rs index 733ea3c359befd..f847f6ce2871fe 100644 --- a/ledger/src/leader_schedule_cache.rs +++ b/ledger/src/leader_schedule_cache.rs @@ -40,7 +40,7 @@ pub struct LeaderScheduleCache { impl LeaderScheduleCache { pub fn new_from_bank(bank: &Bank) -> Self { - Self::new(*bank.epoch_schedule(), bank) + Self::new(bank.epoch_schedule().clone(), bank) } pub fn new(epoch_schedule: EpochSchedule, root_bank: &Bank) -> Self { @@ -56,9 +56,11 @@ impl LeaderScheduleCache { cache.set_root(root_bank); // Calculate the schedule for all epochs between 0 and leader_schedule_epoch(root) - let leader_schedule_epoch = epoch_schedule.get_leader_schedule_epoch(root_bank.slot()); + let leader_schedule_epoch = cache + .epoch_schedule + .get_leader_schedule_epoch(root_bank.slot()); for epoch in 0..leader_schedule_epoch { - let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(epoch); + let first_slot_in_epoch = cache.epoch_schedule.get_first_slot_in_epoch(epoch); cache.slot_leader_at(first_slot_in_epoch, Some(root_bank)); } cache @@ -507,7 +509,7 @@ mod tests { } = create_genesis_config(10_000 * bootstrap_validator_stake_lamports()); genesis_config.epoch_schedule.warmup = false; - let bank = Bank::new_for_tests(&genesis_config); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); // Create new vote account @@ -531,7 +533,11 @@ mod tests { target_slot += 1; } - let bank = Bank::new_from_parent(Arc::new(bank), &Pubkey::default(), target_slot); + let bank = bank_forks + .write() + .unwrap() + .insert(Bank::new_from_parent(bank, &Pubkey::default(), target_slot)) + .clone_without_scheduler(); let mut expected_slot = 0; let epoch = bank.get_leader_schedule_epoch(target_slot); for i in 0..epoch { diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index 5fda160e29b976..1ce6c7ccc164cb 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -283,9 +283,10 @@ impl ErasureSetId { self.0 } - // Storage key for ErasureMeta in blockstore db. - pub(crate) fn store_key(&self) -> (Slot, /*fec_set_index:*/ u64) { - (self.0, u64::from(self.1)) + // Storage key for ErasureMeta and MerkleRootMeta in blockstore db. + // Note: ErasureMeta column uses u64 so this will need to be typecast + pub(crate) fn store_key(&self) -> (Slot, /*fec_set_index:*/ u32) { + (self.0, self.1) } } @@ -334,6 +335,7 @@ impl Shred { dispatch!(pub(crate) fn erasure_shard_index(&self) -> Result); dispatch!(pub fn into_payload(self) -> Vec); + dispatch!(pub fn merkle_root(&self) -> Result); dispatch!(pub fn payload(&self) -> &Vec); dispatch!(pub fn sanitize(&self) -> Result<(), Error>); @@ -893,6 +895,7 @@ pub fn should_discard_shred( root: Slot, max_slot: Slot, shred_version: u16, + should_drop_legacy_shreds: impl Fn(Slot) -> bool, stats: &mut ShredFetchStats, ) -> bool { debug_assert!(root < max_slot); @@ -967,7 +970,11 @@ pub fn should_discard_shred( } } match shred_variant { - ShredVariant::LegacyCode | ShredVariant::LegacyData => (), + ShredVariant::LegacyCode | ShredVariant::LegacyData => { + if should_drop_legacy_shreds(slot) { + return true; + } + } ShredVariant::MerkleCode(_) => { stats.num_shreds_merkle_code = stats.num_shreds_merkle_code.saturating_add(1); } @@ -1171,6 +1178,7 @@ mod tests { root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats )); assert_eq!(stats, ShredFetchStats::default()); @@ -1181,6 +1189,7 @@ mod tests { root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats )); assert_eq!(stats.index_overrun, 1); @@ -1191,6 +1200,7 @@ mod tests { root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats )); assert_eq!(stats.index_overrun, 2); @@ -1201,6 +1211,7 @@ mod tests { root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats )); assert_eq!(stats.index_overrun, 3); @@ -1211,6 +1222,7 @@ mod tests { root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats )); assert_eq!(stats.index_overrun, 4); @@ -1221,6 +1233,7 @@ mod tests { root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats )); assert_eq!(stats.bad_parent_offset, 1); @@ -1241,6 +1254,7 @@ mod tests { root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats )); @@ -1260,6 +1274,7 @@ mod tests { root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats )); assert_eq!(1, stats.index_out_of_bounds); @@ -1280,6 +1295,7 @@ mod tests { root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats )); packet.buffer_mut()[OFFSET_OF_SHRED_VARIANT] = u8::MAX; @@ -1289,6 +1305,7 @@ mod tests { root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats )); assert_eq!(1, stats.bad_shred_type); @@ -1300,6 +1317,7 @@ mod tests { root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats )); assert_eq!(1, stats.bad_shred_type); diff --git a/ledger/src/shred/merkle.rs b/ledger/src/shred/merkle.rs index 4f1cd22111e07f..8ed51a6653b411 100644 --- a/ledger/src/shred/merkle.rs +++ b/ledger/src/shred/merkle.rs @@ -154,7 +154,7 @@ impl ShredData { Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?) } - fn merkle_root(&self) -> Result { + pub(super) fn merkle_root(&self) -> Result { let proof_size = self.proof_size()?; let index = self.erasure_shard_index()?; let proof_offset = Self::proof_offset(proof_size)?; @@ -266,7 +266,7 @@ impl ShredCode { Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?) } - fn merkle_root(&self) -> Result { + pub(super) fn merkle_root(&self) -> Result { let proof_size = self.proof_size()?; let index = self.erasure_shard_index()?; let proof_offset = Self::proof_offset(proof_size)?; @@ -821,7 +821,8 @@ pub(super) fn make_shreds_from_data( } } let now = Instant::now(); - let erasure_batch_size = shredder::get_erasure_batch_size(DATA_SHREDS_PER_FEC_BLOCK); + let erasure_batch_size = + shredder::get_erasure_batch_size(DATA_SHREDS_PER_FEC_BLOCK, is_last_in_slot); let proof_size = get_proof_size(erasure_batch_size); let data_buffer_size = ShredData::capacity(proof_size)?; let chunk_size = DATA_SHREDS_PER_FEC_BLOCK * data_buffer_size; @@ -872,7 +873,8 @@ pub(super) fn make_shreds_from_data( let data_buffer_size = ShredData::capacity(proof_size).ok()?; let num_data_shreds = (data.len() + data_buffer_size - 1) / data_buffer_size; let num_data_shreds = num_data_shreds.max(1); - let erasure_batch_size = shredder::get_erasure_batch_size(num_data_shreds); + let erasure_batch_size = + shredder::get_erasure_batch_size(num_data_shreds, is_last_in_slot); (proof_size == get_proof_size(erasure_batch_size)) .then_some((proof_size, data_buffer_size)) }) @@ -932,7 +934,8 @@ pub(super) fn make_shreds_from_data( .scan(next_code_index, |next_code_index, chunk| { let out = Some(*next_code_index); let num_data_shreds = chunk.len(); - let erasure_batch_size = shredder::get_erasure_batch_size(num_data_shreds); + let erasure_batch_size = + shredder::get_erasure_batch_size(num_data_shreds, is_last_in_slot); let num_coding_shreds = erasure_batch_size - num_data_shreds; *next_code_index += num_coding_shreds as u32; out @@ -945,7 +948,13 @@ pub(super) fn make_shreds_from_data( .into_iter() .zip(next_code_index) .map(|(shreds, next_code_index)| { - make_erasure_batch(keypair, shreds, next_code_index, reed_solomon_cache) + make_erasure_batch( + keypair, + shreds, + next_code_index, + is_last_in_slot, + reed_solomon_cache, + ) }) .collect() } else { @@ -954,7 +963,13 @@ pub(super) fn make_shreds_from_data( .into_par_iter() .zip(next_code_index) .map(|(shreds, next_code_index)| { - make_erasure_batch(keypair, shreds, next_code_index, reed_solomon_cache) + make_erasure_batch( + keypair, + shreds, + next_code_index, + is_last_in_slot, + reed_solomon_cache, + ) }) .collect() }) @@ -969,10 +984,11 @@ fn make_erasure_batch( keypair: &Keypair, shreds: Vec, next_code_index: u32, + is_last_in_slot: bool, reed_solomon_cache: &ReedSolomonCache, ) -> Result, Error> { let num_data_shreds = shreds.len(); - let erasure_batch_size = shredder::get_erasure_batch_size(num_data_shreds); + let erasure_batch_size = shredder::get_erasure_batch_size(num_data_shreds, is_last_in_slot); let num_coding_shreds = erasure_batch_size - num_data_shreds; let proof_size = get_proof_size(erasure_batch_size); debug_assert!(shreds @@ -1056,7 +1072,10 @@ mod test { itertools::Itertools, rand::{seq::SliceRandom, CryptoRng, Rng}, rayon::ThreadPoolBuilder, - solana_sdk::signature::{Keypair, Signer}, + solana_sdk::{ + packet::PACKET_DATA_SIZE, + signature::{Keypair, Signer}, + }, std::{cmp::Ordering, iter::repeat_with}, test_case::test_case, }; @@ -1124,8 +1143,7 @@ mod test { assert_eq!(entry, &bytes[..SIZE_OF_MERKLE_PROOF_ENTRY]); } - fn run_merkle_tree_round_trip(size: usize) { - let mut rng = rand::thread_rng(); + fn run_merkle_tree_round_trip(rng: &mut R, size: usize) { let nodes = repeat_with(|| rng.gen::<[u8; 32]>()).map(Hash::from); let nodes: Vec<_> = nodes.take(size).collect(); let tree = make_merkle_tree(nodes.clone()); @@ -1145,8 +1163,9 @@ mod test { #[test] fn test_merkle_tree_round_trip() { - for size in [1, 2, 3, 4, 5, 6, 7, 8, 9, 19, 37, 64, 79] { - run_merkle_tree_round_trip(size); + let mut rng = rand::thread_rng(); + for size in 1..=143 { + run_merkle_tree_round_trip(&mut rng, size); } } @@ -1327,32 +1346,49 @@ mod test { } } - #[test_case(0)] - #[test_case(15600)] - #[test_case(31200)] - #[test_case(46800)] - fn test_make_shreds_from_data(data_size: usize) { + #[test_case(0, false)] + #[test_case(0, true)] + #[test_case(15600, false)] + #[test_case(15600, true)] + #[test_case(31200, false)] + #[test_case(31200, true)] + #[test_case(46800, false)] + #[test_case(46800, true)] + fn test_make_shreds_from_data(data_size: usize, is_last_in_slot: bool) { let mut rng = rand::thread_rng(); let data_size = data_size.saturating_sub(16); let reed_solomon_cache = ReedSolomonCache::default(); for data_size in data_size..data_size + 32 { - run_make_shreds_from_data(&mut rng, data_size, &reed_solomon_cache); + run_make_shreds_from_data(&mut rng, data_size, is_last_in_slot, &reed_solomon_cache); } } - #[test] - fn test_make_shreds_from_data_rand() { + #[test_case(false)] + #[test_case(true)] + fn test_make_shreds_from_data_rand(is_last_in_slot: bool) { let mut rng = rand::thread_rng(); let reed_solomon_cache = ReedSolomonCache::default(); for _ in 0..32 { let data_size = rng.gen_range(0..31200 * 7); - run_make_shreds_from_data(&mut rng, data_size, &reed_solomon_cache); + run_make_shreds_from_data(&mut rng, data_size, is_last_in_slot, &reed_solomon_cache); + } + } + + #[ignore] + #[test_case(false)] + #[test_case(true)] + fn test_make_shreds_from_data_paranoid(is_last_in_slot: bool) { + let mut rng = rand::thread_rng(); + let reed_solomon_cache = ReedSolomonCache::default(); + for data_size in 0..=PACKET_DATA_SIZE * 4 * 64 { + run_make_shreds_from_data(&mut rng, data_size, is_last_in_slot, &reed_solomon_cache); } } fn run_make_shreds_from_data( rng: &mut R, data_size: usize, + is_last_in_slot: bool, reed_solomon_cache: &ReedSolomonCache, ) { let thread_pool = ThreadPoolBuilder::new().num_threads(2).build().unwrap(); @@ -1373,7 +1409,7 @@ mod test { parent_slot, shred_version, reference_tick, - true, // is_last_in_slot + is_last_in_slot, next_shred_index, next_code_index, reed_solomon_cache, @@ -1480,14 +1516,17 @@ mod test { .flags .contains(ShredFlags::LAST_SHRED_IN_SLOT)) .count(), - 1 + if is_last_in_slot { 1 } else { 0 } + ); + assert_eq!( + data_shreds + .last() + .unwrap() + .data_header + .flags + .contains(ShredFlags::LAST_SHRED_IN_SLOT), + is_last_in_slot ); - assert!(data_shreds - .last() - .unwrap() - .data_header - .flags - .contains(ShredFlags::LAST_SHRED_IN_SLOT)); // Assert that data shreds can be recovered from coding shreds. let recovered_data_shreds: Vec<_> = shreds .iter() diff --git a/ledger/src/shred/shred_code.rs b/ledger/src/shred/shred_code.rs index ba85d92af25187..0ad97a0f729a77 100644 --- a/ledger/src/shred/shred_code.rs +++ b/ledger/src/shred/shred_code.rs @@ -6,7 +6,7 @@ use { CodingShredHeader, Error, ShredCommonHeader, ShredType, SignedData, DATA_SHREDS_PER_FEC_BLOCK, MAX_DATA_SHREDS_PER_SLOT, SIZE_OF_NONCE, }, - solana_sdk::{clock::Slot, packet::PACKET_DATA_SIZE, signature::Signature}, + solana_sdk::{clock::Slot, hash::Hash, packet::PACKET_DATA_SIZE, signature::Signature}, static_assertions::const_assert_eq, }; @@ -47,6 +47,13 @@ impl ShredCode { } } + pub(super) fn merkle_root(&self) -> Result { + match self { + Self::Legacy(_) => Err(Error::InvalidShredType), + Self::Merkle(shred) => shred.merkle_root(), + } + } + pub(super) fn new_from_parity_shard( slot: Slot, index: u32, diff --git a/ledger/src/shred/shred_data.rs b/ledger/src/shred/shred_data.rs index 9bf2c0bf05f79e..ecb40367b4ef08 100644 --- a/ledger/src/shred/shred_data.rs +++ b/ledger/src/shred/shred_data.rs @@ -7,7 +7,7 @@ use { DataShredHeader, Error, ShredCommonHeader, ShredFlags, ShredType, ShredVariant, SignedData, MAX_DATA_SHREDS_PER_SLOT, }, - solana_sdk::{clock::Slot, signature::Signature}, + solana_sdk::{clock::Slot, hash::Hash, signature::Signature}, }; #[derive(Clone, Debug, Eq, PartialEq)] @@ -41,6 +41,13 @@ impl ShredData { } } + pub(super) fn merkle_root(&self) -> Result { + match self { + Self::Legacy(_) => Err(Error::InvalidShredType), + Self::Merkle(shred) => shred.merkle_root(), + } + } + pub(super) fn new_from_data( slot: Slot, index: u32, diff --git a/ledger/src/shredder.rs b/ledger/src/shredder.rs index 1a597c41f984d4..f3203876de7066 100644 --- a/ledger/src/shredder.rs +++ b/ledger/src/shredder.rs @@ -207,7 +207,13 @@ impl Shredder { .iter() .scan(next_code_index, |next_code_index, chunk| { let num_data_shreds = chunk.len(); - let erasure_batch_size = get_erasure_batch_size(num_data_shreds); + let is_last_in_slot = chunk + .last() + .copied() + .map(Shred::last_in_slot) + .unwrap_or(true); + let erasure_batch_size = + get_erasure_batch_size(num_data_shreds, is_last_in_slot); *next_code_index += (erasure_batch_size - num_data_shreds) as u32; Some(*next_code_index) }), @@ -276,7 +282,12 @@ impl Shredder { && shred.version() == version && shred.fec_set_index() == fec_set_index)); let num_data = data.len(); - let num_coding = get_erasure_batch_size(num_data) + let is_last_in_slot = data + .last() + .map(Borrow::borrow) + .map(Shred::last_in_slot) + .unwrap_or(true); + let num_coding = get_erasure_batch_size(num_data, is_last_in_slot) .checked_sub(num_data) .unwrap(); assert!(num_coding > 0); @@ -434,11 +445,16 @@ impl Default for ReedSolomonCache { } /// Maps number of data shreds in each batch to the erasure batch size. -pub(crate) fn get_erasure_batch_size(num_data_shreds: usize) -> usize { - ERASURE_BATCH_SIZE +pub(crate) fn get_erasure_batch_size(num_data_shreds: usize, is_last_in_slot: bool) -> usize { + let erasure_batch_size = ERASURE_BATCH_SIZE .get(num_data_shreds) .copied() - .unwrap_or(2 * num_data_shreds) + .unwrap_or(2 * num_data_shreds); + if is_last_in_slot { + erasure_batch_size.max(2 * DATA_SHREDS_PER_FEC_BLOCK) + } else { + erasure_batch_size + } } // Returns offsets to fec_set_index when spliting shreds into erasure batches. @@ -518,17 +534,19 @@ mod tests { }) .collect(); + let is_last_in_slot = true; let size = serialized_size(&entries).unwrap() as usize; // Integer division to ensure we have enough shreds to fit all the data let data_buffer_size = ShredData::capacity(/*merkle_proof_size:*/ None).unwrap(); let num_expected_data_shreds = (size + data_buffer_size - 1) / data_buffer_size; let num_expected_coding_shreds = - get_erasure_batch_size(num_expected_data_shreds) - num_expected_data_shreds; + get_erasure_batch_size(num_expected_data_shreds, is_last_in_slot) + - num_expected_data_shreds; let start_index = 0; let (data_shreds, coding_shreds) = shredder.entries_to_shreds( &keypair, &entries, - true, // is_last_in_slot + is_last_in_slot, start_index, // next_shred_index start_index, // next_code_index true, // merkle_variant @@ -792,7 +810,7 @@ mod tests { assert_eq!(data_shreds.len(), num_data_shreds); assert_eq!( num_coding_shreds, - get_erasure_batch_size(num_data_shreds) - num_data_shreds + get_erasure_batch_size(num_data_shreds, is_last_in_slot) - num_data_shreds ); let all_shreds = data_shreds @@ -1189,7 +1207,10 @@ mod tests { .iter() .group_by(|shred| shred.fec_set_index()) .into_iter() - .map(|(_, chunk)| get_erasure_batch_size(chunk.count())) + .map(|(_, chunk)| { + let chunk: Vec<_> = chunk.collect(); + get_erasure_batch_size(chunk.len(), chunk.last().unwrap().last_in_slot()) + }) .sum(); assert_eq!(coding_shreds.len(), num_shreds - data_shreds.len()); } @@ -1232,9 +1253,10 @@ mod tests { #[test] fn test_max_shreds_per_slot() { for num_data_shreds in 32..128 { - let num_coding_shreds = get_erasure_batch_size(num_data_shreds) - .checked_sub(num_data_shreds) - .unwrap(); + let num_coding_shreds = + get_erasure_batch_size(num_data_shreds, /*is_last_in_slot:*/ false) + .checked_sub(num_data_shreds) + .unwrap(); assert!( MAX_DATA_SHREDS_PER_SLOT * num_coding_shreds <= MAX_CODE_SHREDS_PER_SLOT * num_data_shreds diff --git a/ledger/src/use_snapshot_archives_at_startup.rs b/ledger/src/use_snapshot_archives_at_startup.rs index b173ed1564e5fa..6e19d0c424dcf8 100644 --- a/ledger/src/use_snapshot_archives_at_startup.rs +++ b/ledger/src/use_snapshot_archives_at_startup.rs @@ -48,4 +48,8 @@ pub mod cli { pub fn default_value() -> &'static str { UseSnapshotArchivesAtStartup::default().into() } + + pub fn default_value_for_ledger_tool() -> &'static str { + UseSnapshotArchivesAtStartup::Always.into() + } } diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index bd6a15eee1881e..4ed4ee46f74f9a 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -4,6 +4,7 @@ use { crossbeam_channel::{unbounded, Receiver}, gag::BufferRedirect, log::*, + rand::seq::IteratorRandom, serial_test::serial, solana_accounts_db::{ accounts_db::create_accounts_run_and_snapshot_dirs, hardened_unpack::open_genesis_config, @@ -15,7 +16,7 @@ use { }, optimistic_confirmation_verifier::OptimisticConfirmationVerifier, replay_stage::DUPLICATE_THRESHOLD, - validator::ValidatorConfig, + validator::{BlockVerificationMethod, ValidatorConfig}, }, solana_download_utils::download_snapshot_archive, solana_entry::entry::create_ticks, @@ -239,10 +240,7 @@ fn test_local_cluster_signature_subscribe() { ); let (mut sig_subscribe_client, receiver) = PubsubClient::signature_subscribe( - &format!( - "ws://{}", - &non_bootstrap_info.rpc_pubsub().unwrap().to_string() - ), + &format!("ws://{}", non_bootstrap_info.rpc_pubsub().unwrap()), &transaction.signatures[0], Some(RpcSignatureSubscribeConfig { commitment: Some(CommitmentConfig::processed()), @@ -2385,7 +2383,7 @@ fn test_hard_fork_with_gap_in_roots() { .reversed_rooted_slot_iterator(common_root) .unwrap() .collect::>(); - // artifically restore the forcibly purged genesis only for the validator A just for the sake of + // artificially restore the forcibly purged genesis only for the validator A just for the sake of // the final assertions. slots_a.push(genesis_slot); roots_a.push(genesis_slot); @@ -2563,33 +2561,40 @@ fn run_test_load_program_accounts_partition(scan_commitment: CommitmentConfig) { #[test] #[serial] fn test_rpc_block_subscribe() { - let total_stake = 100 * DEFAULT_NODE_STAKE; - let leader_stake = total_stake; - let node_stakes = vec![leader_stake]; + let leader_stake = 100 * DEFAULT_NODE_STAKE; + let rpc_stake = DEFAULT_NODE_STAKE; + let total_stake = leader_stake + rpc_stake; + let node_stakes = vec![leader_stake, rpc_stake]; let mut validator_config = ValidatorConfig::default_for_test(); validator_config.enable_default_rpc_block_subscribe(); let validator_keys = [ "28bN3xyvrP4E8LwEgtLjhnkb7cY4amQb6DrYAbAYjgRV4GAGgkVM2K7wnxnAS7WDneuavza7x21MiafLu1HkwQt4", + "2saHBBoTkLMmttmPQP8KfBkcCw45S5cwtV3wTdGCscRC8uxdgvHxpHiWXKx4LvJjNJtnNcbSv5NdheokFFqnNDt8", ] .iter() .map(|s| (Arc::new(Keypair::from_base58_string(s)), true)) .take(node_stakes.len()) .collect::>(); + let rpc_node_pubkey = &validator_keys[1].0.pubkey(); let mut config = ClusterConfig { cluster_lamports: total_stake, node_stakes, - validator_configs: vec![validator_config], + validator_configs: make_identical_validator_configs(&validator_config, 2), validator_keys: Some(validator_keys), skip_warmup_slots: true, ..ClusterConfig::default() }; let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); + let rpc_node_contact_info = cluster.get_contact_info(rpc_node_pubkey).unwrap(); let (mut block_subscribe_client, receiver) = PubsubClient::block_subscribe( &format!( "ws://{}", - &cluster.entry_point_info.rpc_pubsub().unwrap().to_string() + // It is important that we subscribe to a non leader node as there + // is a race condition which can cause leader nodes to not send + // BlockUpdate notifications properly. See https://github.com/solana-labs/solana/pull/34421 + &rpc_node_contact_info.rpc_pubsub().unwrap().to_string() ), RpcBlockSubscribeFilter::All, Some(RpcBlockSubscribeConfig { @@ -2921,24 +2926,26 @@ fn setup_transfer_scan_threads( .get_latest_blockhash_with_commitment(CommitmentConfig::processed()) .unwrap(); for i in 0..starting_keypairs_.len() { - client - .async_transfer( - 1, - &starting_keypairs_[i], - &target_keypairs_[i].pubkey(), - blockhash, - ) - .unwrap(); + let result = client.async_transfer( + 1, + &starting_keypairs_[i], + &target_keypairs_[i].pubkey(), + blockhash, + ); + if result.is_err() { + debug!("Failed in transfer for starting keypair: {:?}", result); + } } for i in 0..starting_keypairs_.len() { - client - .async_transfer( - 1, - &target_keypairs_[i], - &starting_keypairs_[i].pubkey(), - blockhash, - ) - .unwrap(); + let result = client.async_transfer( + 1, + &target_keypairs_[i], + &starting_keypairs_[i].pubkey(), + blockhash, + ); + if result.is_err() { + debug!("Failed in transfer for starting keypair: {:?}", result); + } } } }) @@ -4281,7 +4288,7 @@ fn test_leader_failure_4() { // // Validator A (60%) // Validator B (40%) -// / --- 10 --- [..] --- 16 (B is voting, due to network issues is initally not able to see the other fork at all) +// / --- 10 --- [..] --- 16 (B is voting, due to network issues is initially not able to see the other fork at all) // / // 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 (A votes 1 - 9 votes are landing normally. B does the same however votes are not landing) // \ @@ -4477,7 +4484,7 @@ fn test_slot_hash_expiry() { ); } -// This test simulates a case where a leader sends a duplicate block with different ancestory. One +// This test simulates a case where a leader sends a duplicate block with different ancestry. One // version builds off of the rooted path, however the other version builds off a pruned branch. The // validators that receive the pruned version will need to repair in order to continue, which // requires an ancestor hashes repair. @@ -4506,7 +4513,7 @@ fn test_slot_hash_expiry() { // reached as minority cannot pass threshold otherwise). // 4) Let minority produce forks on pruned forks until out of leader slots then kill. // 5) Truncate majority ledger past fork slot so it starts building off of fork slot. -// 6) Restart majority and wait untill it starts producing blocks on main fork and roots something +// 6) Restart majority and wait until it starts producing blocks on main fork and roots something // past the fork slot. // 7) Construct our ledger by copying majority ledger and copying blocks from minority for the pruned path. // 8) In our node's ledger, change the parent of the latest slot in majority fork to be the latest @@ -5027,6 +5034,7 @@ fn test_boot_from_local_state() { #[test] #[serial] #[allow(unused_attributes)] +#[ignore] fn test_duplicate_shreds_switch_failure() { fn wait_for_duplicate_fork_frozen(ledger_path: &Path, dup_slot: Slot) -> Hash { // Ensure all the slots <= dup_slot are also full so we know we can replay up to dup_slot @@ -5448,6 +5456,44 @@ fn test_duplicate_shreds_switch_failure() { ); } +#[test] +#[serial] +fn test_randomly_mixed_block_verification_methods_between_bootstrap_and_not() { + // tailored logging just to see two block verification methods are working correctly + solana_logger::setup_with_default( + "solana_metrics::metrics=warn,\ + solana_core=warn,\ + solana_runtime::installed_scheduler_pool=trace,\ + solana_ledger::blockstore_processor=debug,\ + info", + ); + + let num_nodes = 2; + let mut config = ClusterConfig::new_with_equal_stakes( + num_nodes, + DEFAULT_CLUSTER_LAMPORTS, + DEFAULT_NODE_STAKE, + ); + + // Randomly switch to use unified scheduler + config + .validator_configs + .iter_mut() + .choose(&mut rand::thread_rng()) + .unwrap() + .block_verification_method = BlockVerificationMethod::UnifiedScheduler; + + let local = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); + cluster_tests::spend_and_verify_all_nodes( + &local.entry_point_info, + &local.funding_keypair, + num_nodes, + HashSet::new(), + SocketAddrSpace::Unspecified, + &local.connection_cache, + ); +} + /// Forks previous marked invalid should be marked as such in fork choice on restart #[test] #[serial] diff --git a/logger/src/lib.rs b/logger/src/lib.rs index 7b9ae30d881053..6cc57e81c531f3 100644 --- a/logger/src/lib.rs +++ b/logger/src/lib.rs @@ -58,7 +58,6 @@ pub fn setup() { pub fn setup_file_with_default(logfile: &str, filter: &str) { use std::fs::OpenOptions; let file = OpenOptions::new() - .write(true) .create(true) .append(true) .open(logfile) diff --git a/metrics/README.md b/metrics/README.md index db9eb8f1036b49..f1890aacc308fe 100644 --- a/metrics/README.md +++ b/metrics/README.md @@ -4,10 +4,10 @@ ## InfluxDB -In oder to explore validator specific metrics from mainnet-beta, testnet or devnet you can use Chronograf: +In order to explore validator specific metrics from mainnet-beta, testnet or devnet you can use Chronograf: -* https://metrics.solana.com:8888/ (production enviroment) -* https://metrics.solana.com:8889/ (testing enviroment) +* https://metrics.solana.com:8888/ (production environment) +* https://metrics.solana.com:8889/ (testing environment) For local cluster deployments you should use: @@ -47,4 +47,4 @@ The fee market dashboard shows: ### Ping Results -The ping reults dashboard displays relevant information about the Ping API +The ping results dashboard displays relevant information about the Ping API diff --git a/net/README.md b/net/README.md index 70953a44f0d68d..a3dd929fcf40b7 100644 --- a/net/README.md +++ b/net/README.md @@ -43,7 +43,7 @@ $ ./gce.sh delete #<-- Dispose of the network (billing stops here) ### Running the network over public IP addresses By default private IP addresses are used with all instances in the same -availability zone to avoid GCE network engress charges. However to run the +availability zone to avoid GCE network egress charges. However to run the network over public IP addresses: ```bash $ ./gce.sh create -P ... diff --git a/perf/src/recycler.rs b/perf/src/recycler.rs index 87c44399e7fbc8..f940dc36c8ad3e 100644 --- a/perf/src/recycler.rs +++ b/perf/src/recycler.rs @@ -241,7 +241,7 @@ mod tests { let count = rng.gen_range(1..128); let _packets: Vec<_> = repeat_with(|| recycler.allocate("")).take(count).collect(); } - // Assert that the gc size has shrinked. + // Assert that the gc size has shrunk. assert_eq!( recycler.recycler.gc.lock().unwrap().len(), RECYCLER_SHRINK_SIZE diff --git a/poh/Cargo.toml b/poh/Cargo.toml index 683d668ddfbd7a..0b93acffe5a2c4 100644 --- a/poh/Cargo.toml +++ b/poh/Cargo.toml @@ -28,6 +28,7 @@ rand = { workspace = true } solana-logger = { workspace = true } solana-perf = { workspace = true } solana-poh = { path = ".", features = ["dev-context-only-utils"] } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } [features] dev-context-only-utils = [] diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index a598e001fc8684..49c2d4dc3d9a88 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -10,6 +10,8 @@ //! For Entries: //! * recorded entry must be >= WorkingBank::min_tick_height && entry must be < WorkingBank::max_tick_height //! +#[cfg(feature = "dev-context-only-utils")] +use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo}; use { crate::{leader_bank_notifier::LeaderBankNotifier, poh_service::PohService}, crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, SendError, Sender, TrySendError}, @@ -18,11 +20,7 @@ use { entry::{hash_transactions, Entry}, poh::Poh, }, - solana_ledger::{ - blockstore::Blockstore, - genesis_utils::{create_genesis_config, GenesisConfigInfo}, - leader_schedule_cache::LeaderScheduleCache, - }, + solana_ledger::{blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache}, solana_measure::{measure, measure_us}, solana_metrics::poh_timing_point::{send_poh_timing_point, PohTimingSender, SlotPohTimingInfo}, solana_runtime::{bank::Bank, installed_scheduler_pool::BankWithScheduler}, @@ -1053,6 +1051,7 @@ impl PohRecorder { } // Used in tests + #[cfg(feature = "dev-context-only-utils")] pub fn schedule_dummy_max_height_reached_failure(&mut self) { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); @@ -1609,7 +1608,7 @@ mod tests { assert!(poh_recorder.working_bank.is_some()); // Drop entry receiver, and try to tick again. Because - // the reciever is closed, the ticks will not be drained from the cache, + // the receiver is closed, the ticks will not be drained from the cache, // and the working bank will be cleared drop(entry_receiver); poh_recorder.tick(); diff --git a/poh/src/poh_service.rs b/poh/src/poh_service.rs index a01c688a527aec..e69db7f119862b 100644 --- a/poh/src/poh_service.rs +++ b/poh/src/poh_service.rs @@ -402,7 +402,7 @@ mod tests { fn test_poh_service() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; let prev_hash = bank.last_blockhash(); let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()) diff --git a/program-runtime/src/accounts_data_meter.rs b/program-runtime/src/accounts_data_meter.rs index 3e0553d1e67b80..21db92f7e63cc2 100644 --- a/program-runtime/src/accounts_data_meter.rs +++ b/program-runtime/src/accounts_data_meter.rs @@ -4,7 +4,7 @@ /// The maximum allowed size, in bytes, of the accounts data /// 128 GB was chosen because it is the RAM amount listed under Hardware Recommendations on -/// [Validator Requirements](https://docs.solana.com/running-validator/validator-reqs), and +/// [Validator Requirements](https://docs.solanalabs.com/operations/requirements), and /// validators often put the ledger on a RAM disk (i.e. tmpfs). pub const MAX_ACCOUNTS_DATA_LEN: u64 = 128_000_000_000; diff --git a/program-runtime/src/compute_budget.rs b/program-runtime/src/compute_budget.rs index a568162c139c37..0657df5c8a364b 100644 --- a/program-runtime/src/compute_budget.rs +++ b/program-runtime/src/compute_budget.rs @@ -1,9 +1,6 @@ use { crate::compute_budget_processor::{self, process_compute_budget_instructions}, - solana_sdk::{ - feature_set::FeatureSet, instruction::CompiledInstruction, pubkey::Pubkey, - transaction::Result, - }, + solana_sdk::{instruction::CompiledInstruction, pubkey::Pubkey, transaction::Result}, }; #[cfg(RUSTC_WITH_SPECIALIZATION)] @@ -183,9 +180,8 @@ impl ComputeBudget { pub fn try_from_instructions<'a>( instructions: impl Iterator, - feature_set: &FeatureSet, ) -> Result { - let compute_budget_limits = process_compute_budget_instructions(instructions, feature_set)?; + let compute_budget_limits = process_compute_budget_instructions(instructions)?; Ok(ComputeBudget { compute_unit_limit: u64::from(compute_budget_limits.compute_unit_limit), heap_size: compute_budget_limits.updated_heap_bytes, diff --git a/program-runtime/src/compute_budget_processor.rs b/program-runtime/src/compute_budget_processor.rs index b2c3a892493d41..f87bbcd6c17fd7 100644 --- a/program-runtime/src/compute_budget_processor.rs +++ b/program-runtime/src/compute_budget_processor.rs @@ -1,17 +1,12 @@ -//! Process compute_budget instructions to extract and sanitize limits. use { crate::{ compute_budget::DEFAULT_HEAP_COST, prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, }, solana_sdk::{ - borsh0_10::try_from_slice_unchecked, + borsh1::try_from_slice_unchecked, compute_budget::{self, ComputeBudgetInstruction}, entrypoint::HEAP_LENGTH as MIN_HEAP_FRAME_BYTES, - feature_set::{ - add_set_tx_loaded_accounts_data_size_instruction, remove_deprecated_request_unit_ix, - FeatureSet, - }, fee::FeeBudgetLimits, instruction::{CompiledInstruction, InstructionError}, pubkey::Pubkey, @@ -33,7 +28,6 @@ pub struct ComputeBudgetLimits { pub compute_unit_limit: u32, pub compute_unit_price: u64, pub loaded_accounts_bytes: u32, - pub deprecated_additional_fee: Option, } impl Default for ComputeBudgetLimits { @@ -43,23 +37,17 @@ impl Default for ComputeBudgetLimits { compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT, compute_unit_price: 0, loaded_accounts_bytes: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, - deprecated_additional_fee: None, } } } impl From for FeeBudgetLimits { fn from(val: ComputeBudgetLimits) -> Self { - let prioritization_fee = - if let Some(deprecated_additional_fee) = val.deprecated_additional_fee { - deprecated_additional_fee - } else { - let prioritization_fee_details = PrioritizationFeeDetails::new( - PrioritizationFeeType::ComputeUnitPrice(val.compute_unit_price), - u64::from(val.compute_unit_limit), - ); - prioritization_fee_details.get_fee() - }; + let prioritization_fee_details = PrioritizationFeeDetails::new( + PrioritizationFeeType::ComputeUnitPrice(val.compute_unit_price), + u64::from(val.compute_unit_limit), + ); + let prioritization_fee = prioritization_fee_details.get_fee(); FeeBudgetLimits { // NOTE - usize::from(u32).unwrap() may fail if target is 16-bit and @@ -79,19 +67,12 @@ impl From for FeeBudgetLimits { /// are retrieved and returned, pub fn process_compute_budget_instructions<'a>( instructions: impl Iterator, - feature_set: &FeatureSet, ) -> Result { - let support_request_units_deprecated = - !feature_set.is_active(&remove_deprecated_request_unit_ix::id()); - let support_set_loaded_accounts_data_size_limit_ix = - feature_set.is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()); - let mut num_non_compute_budget_instructions: u32 = 0; let mut updated_compute_unit_limit = None; let mut updated_compute_unit_price = None; let mut requested_heap_size = None; let mut updated_loaded_accounts_data_size_limit = None; - let mut deprecated_additional_fee = None; for (i, (program_id, instruction)) in instructions.enumerate() { if compute_budget::check_id(program_id) { @@ -102,21 +83,6 @@ pub fn process_compute_budget_instructions<'a>( let duplicate_instruction_error = TransactionError::DuplicateInstruction(i as u8); match try_from_slice_unchecked(&instruction.data) { - Ok(ComputeBudgetInstruction::RequestUnitsDeprecated { - units: compute_unit_limit, - additional_fee, - }) if support_request_units_deprecated => { - if updated_compute_unit_limit.is_some() { - return Err(duplicate_instruction_error); - } - if updated_compute_unit_price.is_some() { - return Err(duplicate_instruction_error); - } - updated_compute_unit_limit = Some(compute_unit_limit); - updated_compute_unit_price = - support_deprecated_requested_units(additional_fee, compute_unit_limit); - deprecated_additional_fee = Some(u64::from(additional_fee)); - } Ok(ComputeBudgetInstruction::RequestHeapFrame(bytes)) => { if requested_heap_size.is_some() { return Err(duplicate_instruction_error); @@ -139,9 +105,7 @@ pub fn process_compute_budget_instructions<'a>( } updated_compute_unit_price = Some(micro_lamports); } - Ok(ComputeBudgetInstruction::SetLoadedAccountsDataSizeLimit(bytes)) - if support_set_loaded_accounts_data_size_limit_ix => - { + Ok(ComputeBudgetInstruction::SetLoadedAccountsDataSizeLimit(bytes)) => { if updated_loaded_accounts_data_size_limit.is_some() { return Err(duplicate_instruction_error); } @@ -179,7 +143,6 @@ pub fn process_compute_budget_instructions<'a>( compute_unit_limit, compute_unit_price, loaded_accounts_bytes, - deprecated_additional_fee, }) } @@ -188,17 +151,6 @@ fn sanitize_requested_heap_size(bytes: u32) -> bool { && bytes % 1024 == 0 } -// Supports request_units_deprecated ix, returns compute_unit_price from deprecated requested -// units. -fn support_deprecated_requested_units(additional_fee: u32, compute_unit_limit: u32) -> Option { - // TODO: remove support of 'Deprecated' after feature remove_deprecated_request_unit_ix::id() is activated - let prioritization_fee_details = PrioritizationFeeDetails::new( - PrioritizationFeeType::Deprecated(u64::from(additional_fee)), - u64::from(compute_unit_limit), - ); - Some(prioritization_fee_details.get_priority()) -} - #[cfg(test)] mod tests { use { @@ -216,27 +168,17 @@ mod tests { }; macro_rules! test { - ( $instructions: expr, $expected_result: expr, $support_set_loaded_accounts_data_size_limit_ix: expr ) => { + ( $instructions: expr, $expected_result: expr) => { let payer_keypair = Keypair::new(); let tx = SanitizedTransaction::from_transaction_for_tests(Transaction::new( &[&payer_keypair], Message::new($instructions, Some(&payer_keypair.pubkey())), Hash::default(), )); - let mut feature_set = FeatureSet::default(); - feature_set.activate(&remove_deprecated_request_unit_ix::id(), 0); - if $support_set_loaded_accounts_data_size_limit_ix { - feature_set.activate(&add_set_tx_loaded_accounts_data_size_instruction::id(), 0); - } - let result = process_compute_budget_instructions( - tx.message().program_instructions_iter(), - &feature_set, - ); + let result = + process_compute_budget_instructions(tx.message().program_instructions_iter()); assert_eq!($expected_result, result); }; - ( $instructions: expr, $expected_result: expr ) => { - test!($instructions, $expected_result, false); - }; } #[test] @@ -448,148 +390,82 @@ mod tests { ], Err(TransactionError::DuplicateInstruction(2)) ); - - // deprecated - test!( - &[Instruction::new_with_borsh( - compute_budget::id(), - &compute_budget::ComputeBudgetInstruction::RequestUnitsDeprecated { - units: 1_000, - additional_fee: 10 - }, - vec![] - )], - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )) - ); } #[test] fn test_process_loaded_accounts_data_size_limit_instruction() { - // Assert for empty instructions, change value of support_set_loaded_accounts_data_size_limit_ix - // will not change results, which should all be default - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - test!( - &[], - Ok(ComputeBudgetLimits { - compute_unit_limit: 0, - ..ComputeBudgetLimits::default() - }), - support_set_loaded_accounts_data_size_limit_ix - ); - } + test!( + &[], + Ok(ComputeBudgetLimits { + compute_unit_limit: 0, + ..ComputeBudgetLimits::default() + }) + ); // Assert when set_loaded_accounts_data_size_limit presents, - // if support_set_loaded_accounts_data_size_limit_ix then - // budget is set with data_size - // else - // return InstructionError + // budget is set with data_size let data_size = 1; - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - let expected_result = if support_set_loaded_accounts_data_size_limit_ix { - Ok(ComputeBudgetLimits { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, - loaded_accounts_bytes: data_size, - ..ComputeBudgetLimits::default() - }) - } else { - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )) - }; - - test!( - &[ - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - expected_result, - support_set_loaded_accounts_data_size_limit_ix - ); - } + let expected_result = Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + loaded_accounts_bytes: data_size, + ..ComputeBudgetLimits::default() + }); + + test!( + &[ + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + expected_result + ); // Assert when set_loaded_accounts_data_size_limit presents, with greater than max value - // if support_set_loaded_accounts_data_size_limit_ix then - // budget is set to max data size - // else - // return InstructionError + // budget is set to max data size let data_size = MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES + 1; - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - let expected_result = if support_set_loaded_accounts_data_size_limit_ix { - Ok(ComputeBudgetLimits { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, - loaded_accounts_bytes: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, - ..ComputeBudgetLimits::default() - }) - } else { - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )) - }; - - test!( - &[ - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - expected_result, - support_set_loaded_accounts_data_size_limit_ix - ); - } + let expected_result = Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + loaded_accounts_bytes: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, + ..ComputeBudgetLimits::default() + }); + + test!( + &[ + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + expected_result + ); // Assert when set_loaded_accounts_data_size_limit is not presented - // if support_set_loaded_accounts_data_size_limit_ix then - // budget is set to default data size - // else - // return - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - let expected_result = Ok(ComputeBudgetLimits { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, - loaded_accounts_bytes: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, - ..ComputeBudgetLimits::default() - }); - - test!( - &[Instruction::new_with_bincode( - Pubkey::new_unique(), - &0_u8, - vec![] - ),], - expected_result, - support_set_loaded_accounts_data_size_limit_ix - ); - } + // budget is set to default data size + let expected_result = Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + loaded_accounts_bytes: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, + ..ComputeBudgetLimits::default() + }); + + test!( + &[Instruction::new_with_bincode( + Pubkey::new_unique(), + &0_u8, + vec![] + ),], + expected_result + ); // Assert when set_loaded_accounts_data_size_limit presents more than once, - // if support_set_loaded_accounts_data_size_limit_ix then - // return DuplicateInstruction - // else - // return InstructionError + // return DuplicateInstruction let data_size = MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES; - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - let expected_result = if support_set_loaded_accounts_data_size_limit_ix { - Err(TransactionError::DuplicateInstruction(2)) - } else { - Err(TransactionError::InstructionError( - 1, - InstructionError::InvalidInstructionData, - )) - }; - - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size), - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size), - ], - expected_result, - support_set_loaded_accounts_data_size_limit_ix - ); - } + let expected_result = Err(TransactionError::DuplicateInstruction(2)); + + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size), + ], + expected_result + ); } #[test] @@ -607,14 +483,8 @@ mod tests { Hash::default(), )); - let mut feature_set = FeatureSet::default(); - feature_set.activate(&remove_deprecated_request_unit_ix::id(), 0); - feature_set.activate(&add_set_tx_loaded_accounts_data_size_instruction::id(), 0); - - let result = process_compute_budget_instructions( - transaction.message().program_instructions_iter(), - &feature_set, - ); + let result = + process_compute_budget_instructions(transaction.message().program_instructions_iter()); // assert process_instructions will be successful with default, // and the default compute_unit_limit is 2 times default: one for bpf ix, one for @@ -627,78 +497,4 @@ mod tests { }) ); } - - fn try_prioritization_fee_from_deprecated_requested_units( - additional_fee: u32, - compute_unit_limit: u32, - ) { - let payer_keypair = Keypair::new(); - let tx = SanitizedTransaction::from_transaction_for_tests(Transaction::new( - &[&payer_keypair], - Message::new( - &[Instruction::new_with_borsh( - compute_budget::id(), - &compute_budget::ComputeBudgetInstruction::RequestUnitsDeprecated { - units: compute_unit_limit, - additional_fee, - }, - vec![], - )], - Some(&payer_keypair.pubkey()), - ), - Hash::default(), - )); - - // sucessfully process deprecated instruction - let compute_budget_limits = process_compute_budget_instructions( - tx.message().program_instructions_iter(), - &FeatureSet::default(), - ) - .unwrap(); - - // assert compute_budget_limit - let expected_compute_unit_price = (additional_fee as u128) - .saturating_mul(1_000_000) - .checked_div(compute_unit_limit as u128) - .map(|cu_price| u64::try_from(cu_price).unwrap_or(u64::MAX)) - .unwrap(); - let expected_compute_unit_limit = compute_unit_limit.min(MAX_COMPUTE_UNIT_LIMIT); - assert_eq!( - compute_budget_limits.compute_unit_price, - expected_compute_unit_price - ); - assert_eq!( - compute_budget_limits.compute_unit_limit, - expected_compute_unit_limit - ); - - // assert fee_budget_limits - let fee_budget_limits = FeeBudgetLimits::from(compute_budget_limits); - assert_eq!( - fee_budget_limits.prioritization_fee, - u64::from(additional_fee) - ); - assert_eq!( - fee_budget_limits.compute_unit_limit, - u64::from(expected_compute_unit_limit) - ); - } - - #[test] - fn test_support_deprecated_requested_units() { - // a normal case - try_prioritization_fee_from_deprecated_requested_units(647, 6002); - - // requesting cu limit more than MAX, div result will be round down - try_prioritization_fee_from_deprecated_requested_units( - 640, - MAX_COMPUTE_UNIT_LIMIT + 606_002, - ); - - // requesting cu limit more than MAX, div result will round up - try_prioritization_fee_from_deprecated_requested_units( - 764, - MAX_COMPUTE_UNIT_LIMIT + 606_004, - ); - } } diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index 6ee87fefa7ccdc..fc40cdbbac5543 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -20,7 +20,7 @@ use { solana_sdk::{ account::AccountSharedData, bpf_loader_deprecated, - feature_set::{check_slice_translation_size, native_programs_consume_cu, FeatureSet}, + feature_set::{native_programs_consume_cu, FeatureSet}, hash::Hash, instruction::{AccountMeta, InstructionError}, native_loader, @@ -167,7 +167,6 @@ pub struct InvokeContext<'a> { accounts_data_meter: AccountsDataMeter, pub programs_loaded_for_tx_batch: &'a LoadedProgramsForTxBatch, pub programs_modified_by_tx: &'a mut LoadedProgramsForTxBatch, - pub programs_updated_only_for_global_cache: &'a mut LoadedProgramsForTxBatch, pub feature_set: Arc, pub timings: ExecuteDetailsTimings, pub blockhash: Hash, @@ -185,7 +184,6 @@ impl<'a> InvokeContext<'a> { compute_budget: ComputeBudget, programs_loaded_for_tx_batch: &'a LoadedProgramsForTxBatch, programs_modified_by_tx: &'a mut LoadedProgramsForTxBatch, - programs_updated_only_for_global_cache: &'a mut LoadedProgramsForTxBatch, feature_set: Arc, blockhash: Hash, lamports_per_signature: u64, @@ -201,7 +199,6 @@ impl<'a> InvokeContext<'a> { accounts_data_meter: AccountsDataMeter::new(prev_accounts_data_len), programs_loaded_for_tx_batch, programs_modified_by_tx, - programs_updated_only_for_global_cache, feature_set, timings: ExecuteDetailsTimings::default(), blockhash, @@ -413,7 +410,7 @@ impl<'a> InvokeContext<'a> { })?; let borrowed_program_account = instruction_context .try_borrow_instruction_account(self.transaction_context, program_account_index)?; - if !borrowed_program_account.is_executable() { + if !borrowed_program_account.is_executable(&self.feature_set) { ic_msg!(self, "Account {} is not executable", callee_program_id); return Err(InstructionError::AccountNotExecutable); } @@ -602,12 +599,6 @@ impl<'a> InvokeContext<'a> { .unwrap_or(true) } - // Set should type size be checked during user pointer translation - pub fn get_check_size(&self) -> bool { - self.feature_set - .is_active(&check_slice_translation_size::id()) - } - // Set this instruction syscall context pub fn set_syscall_context( &mut self, @@ -624,7 +615,7 @@ impl<'a> InvokeContext<'a> { pub fn get_syscall_context(&self) -> Result<&SyscallContext, InstructionError> { self.syscall_context .last() - .and_then(|syscall_context| syscall_context.as_ref()) + .and_then(std::option::Option::as_ref) .ok_or(InstructionError::CallDepth) } @@ -688,7 +679,6 @@ macro_rules! with_mock_invoke_context { }); let programs_loaded_for_tx_batch = LoadedProgramsForTxBatch::default(); let mut programs_modified_by_tx = LoadedProgramsForTxBatch::default(); - let mut programs_updated_only_for_global_cache = LoadedProgramsForTxBatch::default(); let mut $invoke_context = InvokeContext::new( &mut $transaction_context, &sysvar_cache, @@ -696,7 +686,6 @@ macro_rules! with_mock_invoke_context { compute_budget, &programs_loaded_for_tx_batch, &mut programs_modified_by_tx, - &mut programs_updated_only_for_global_cache, Arc::new(FeatureSet::all_enabled()), Hash::default(), 0, @@ -832,17 +821,17 @@ mod tests { MockInstruction::NoopFail => return Err(InstructionError::GenericError), MockInstruction::ModifyOwned => instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .set_data_from_slice(&[1])?, + .set_data_from_slice(&[1], &invoke_context.feature_set)?, MockInstruction::ModifyNotOwned => instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .set_data_from_slice(&[1])?, + .set_data_from_slice(&[1], &invoke_context.feature_set)?, MockInstruction::ModifyReadonly => instruction_context .try_borrow_instruction_account(transaction_context, 2)? - .set_data_from_slice(&[1])?, + .set_data_from_slice(&[1], &invoke_context.feature_set)?, MockInstruction::UnbalancedPush => { instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .checked_add_lamports(1)?; + .checked_add_lamports(1, &invoke_context.feature_set)?; let program_id = *transaction_context.get_key_of_account_at_index(3)?; let metas = vec![ AccountMeta::new_readonly( @@ -873,7 +862,7 @@ mod tests { } MockInstruction::UnbalancedPop => instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .checked_add_lamports(1)?, + .checked_add_lamports(1, &invoke_context.feature_set)?, MockInstruction::ConsumeComputeUnits { compute_units_to_consume, desired_result, @@ -885,7 +874,7 @@ mod tests { } MockInstruction::Resize { new_len } => instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .set_data(vec![0; new_len as usize])?, + .set_data(vec![0; new_len as usize], &invoke_context.feature_set)?, } } else { return Err(InstructionError::InvalidInstructionData); @@ -973,8 +962,8 @@ mod tests { let owned_account = AccountSharedData::new(42, 1, &callee_program_id); let not_owned_account = AccountSharedData::new(84, 1, &solana_sdk::pubkey::new_rand()); let readonly_account = AccountSharedData::new(168, 1, &solana_sdk::pubkey::new_rand()); - let loader_account = AccountSharedData::new(0, 0, &native_loader::id()); - let mut program_account = AccountSharedData::new(1, 0, &native_loader::id()); + let loader_account = AccountSharedData::new(0, 1, &native_loader::id()); + let mut program_account = AccountSharedData::new(1, 1, &native_loader::id()); program_account.set_executable(true); let transaction_accounts = vec![ (solana_sdk::pubkey::new_rand(), owned_account), @@ -984,7 +973,7 @@ mod tests { (solana_sdk::pubkey::new_rand(), loader_account), ]; let metas = vec![ - AccountMeta::new(transaction_accounts.get(0).unwrap().0, false), + AccountMeta::new(transaction_accounts.first().unwrap().0, false), AccountMeta::new(transaction_accounts.get(1).unwrap().0, false), AccountMeta::new_readonly(transaction_accounts.get(2).unwrap().0, false), ]; @@ -1001,7 +990,7 @@ mod tests { let mut programs_loaded_for_tx_batch = LoadedProgramsForTxBatch::default(); programs_loaded_for_tx_batch.replenish( callee_program_id, - Arc::new(LoadedProgram::new_builtin(0, 0, MockBuiltin::vm)), + Arc::new(LoadedProgram::new_builtin(0, 1, MockBuiltin::vm)), ); invoke_context.programs_loaded_for_tx_batch = &programs_loaded_for_tx_batch; diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index ac16578acf6183..b7b92a0409c800 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -3,9 +3,9 @@ use { invoke_context::{BuiltinFunctionWithContext, InvokeContext}, timings::ExecuteDetailsTimings, }, - itertools::Itertools, log::{debug, error, log_enabled, trace}, percentage::PercentageInteger, + rand::{thread_rng, Rng}, solana_measure::measure::Measure, solana_rbpf::{ elf::Executable, @@ -25,7 +25,7 @@ use { fmt::{Debug, Formatter}, sync::{ atomic::{AtomicU64, Ordering}, - Arc, RwLock, + Arc, Condvar, Mutex, RwLock, }, }, }; @@ -60,18 +60,6 @@ pub trait ForkGraph { } } -/// Provides information about current working slot, and its ancestors -pub trait WorkingSlot { - /// Returns the current slot - fn current_slot(&self) -> Slot; - - /// Returns the epoch of the current slot - fn current_epoch(&self) -> Epoch; - - /// Returns true if the `other` slot is an ancestor of self, false otherwise - fn is_ancestor(&self, other: Slot) -> bool; -} - #[derive(Default)] pub enum LoadedProgramType { /// Tombstone for undeployed, closed or unloadable programs @@ -115,6 +103,9 @@ impl LoadedProgramType { LoadedProgramType::LegacyV0(program) | LoadedProgramType::LegacyV1(program) | LoadedProgramType::Typed(program) => Some(program.get_loader()), + LoadedProgramType::FailedVerification(env) | LoadedProgramType::Unloaded(env) => { + Some(env) + } #[cfg(test)] LoadedProgramType::TestLoaded(environment) => Some(environment), _ => None, @@ -136,8 +127,10 @@ pub struct LoadedProgram { pub maybe_expiration_slot: Option, /// How often this entry was used by a transaction pub tx_usage_counter: AtomicU64, - /// How often this entry was used by a transaction + /// How often this entry was used by an instruction pub ix_usage_counter: AtomicU64, + /// Latest slot in which the entry was used + pub latest_access_slot: AtomicU64, } #[derive(Debug, Default)] @@ -357,6 +350,7 @@ impl LoadedProgram { tx_usage_counter: AtomicU64::new(0), program, ix_usage_counter: AtomicU64::new(0), + latest_access_slot: AtomicU64::new(0), }) } @@ -368,7 +362,8 @@ impl LoadedProgram { effective_slot: self.effective_slot, maybe_expiration_slot: self.maybe_expiration_slot, tx_usage_counter: AtomicU64::new(self.tx_usage_counter.load(Ordering::Relaxed)), - ix_usage_counter: AtomicU64::new(self.tx_usage_counter.load(Ordering::Relaxed)), + ix_usage_counter: AtomicU64::new(self.ix_usage_counter.load(Ordering::Relaxed)), + latest_access_slot: AtomicU64::new(self.latest_access_slot.load(Ordering::Relaxed)), }) } @@ -390,6 +385,7 @@ impl LoadedProgram { tx_usage_counter: AtomicU64::new(0), program: LoadedProgramType::Builtin(BuiltinProgram::new_builtin(function_registry)), ix_usage_counter: AtomicU64::new(0), + latest_access_slot: AtomicU64::new(0), } } @@ -404,6 +400,7 @@ impl LoadedProgram { maybe_expiration_slot, tx_usage_counter: AtomicU64::default(), ix_usage_counter: AtomicU64::default(), + latest_access_slot: AtomicU64::new(0), }; debug_assert!(tombstone.is_tombstone()); tombstone @@ -425,6 +422,16 @@ impl LoadedProgram { && slot >= self.deployment_slot && slot < self.effective_slot } + + pub fn update_access_slot(&self, slot: Slot) { + let _ = self.latest_access_slot.fetch_max(slot, Ordering::Relaxed); + } + + pub fn decayed_usage_counter(&self, now: Slot) -> u64 { + let last_access = self.latest_access_slot.load(Ordering::Relaxed); + let decaying_for = now.saturating_sub(last_access); + self.tx_usage_counter.load(Ordering::Relaxed) >> decaying_for + } } #[derive(Clone, Debug)] @@ -448,11 +455,66 @@ impl Default for ProgramRuntimeEnvironments { } } +#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)] +pub struct LoadingTaskCookie(u64); + +impl LoadingTaskCookie { + fn new() -> Self { + Self(0) + } + + fn update(&mut self) { + let LoadingTaskCookie(cookie) = self; + *cookie = cookie.wrapping_add(1); + } +} + +/// Prevents excessive polling during cooperative loading +#[derive(Debug, Default)] +pub struct LoadingTaskWaiter { + cookie: Mutex, + cond: Condvar, +} + +impl LoadingTaskWaiter { + pub fn new() -> Self { + Self { + cookie: Mutex::new(LoadingTaskCookie::new()), + cond: Condvar::new(), + } + } + + pub fn cookie(&self) -> LoadingTaskCookie { + *self.cookie.lock().unwrap() + } + + pub fn notify(&self) { + let mut cookie = self.cookie.lock().unwrap(); + cookie.update(); + self.cond.notify_all(); + } + + pub fn wait(&self, cookie: LoadingTaskCookie) -> LoadingTaskCookie { + let cookie_guard = self.cookie.lock().unwrap(); + *self + .cond + .wait_while(cookie_guard, |current_cookie| *current_cookie == cookie) + .unwrap() + } +} + +#[derive(Debug, Default)] +struct SecondLevel { + slot_versions: Vec>, + /// Contains the bank and TX batch a program at this address is currently being loaded + cooperative_loading_lock: Option<(Slot, std::thread::ThreadId)>, +} + pub struct LoadedPrograms { /// A two level index: /// - /// Pubkey is the address of a program, multiple versions can coexists simultaneously under the same address (in different slots). - entries: HashMap>>, + /// The first level is for the address at which programs are deployed and the second level for the slot (and thus also fork). + entries: HashMap, /// The slot of the last rerooting pub latest_root_slot: Slot, /// The epoch of the last rerooting @@ -469,6 +531,7 @@ pub struct LoadedPrograms { pub programs_to_recompile: Vec<(Pubkey, Arc)>, pub stats: Stats, pub fork_graph: Option>>, + pub loading_task_waiter: Arc, } impl Debug for LoadedPrograms { @@ -482,21 +545,6 @@ impl Debug for LoadedPrograms { } } -impl Default for LoadedPrograms { - fn default() -> Self { - Self { - entries: HashMap::new(), - latest_root_slot: 0, - latest_root_epoch: 0, - environments: ProgramRuntimeEnvironments::default(), - upcoming_environments: None, - programs_to_recompile: Vec::default(), - stats: Stats::default(), - fork_graph: None, - } - } -} - #[derive(Clone, Debug, Default)] pub struct LoadedProgramsForTxBatch { /// Pubkey is the address of a program. @@ -506,12 +554,6 @@ pub struct LoadedProgramsForTxBatch { pub environments: ProgramRuntimeEnvironments, } -pub struct ExtractedPrograms { - pub loaded: LoadedProgramsForTxBatch, - pub missing: Vec<(Pubkey, u64)>, - pub unloaded: Vec<(Pubkey, u64)>, -} - impl LoadedProgramsForTxBatch { pub fn new(slot: Slot, environments: ProgramRuntimeEnvironments) -> Self { Self { @@ -572,6 +614,20 @@ pub enum LoadedProgramMatchCriteria { } impl LoadedPrograms { + pub fn new(root_slot: Slot, root_epoch: Epoch) -> Self { + Self { + entries: HashMap::new(), + latest_root_slot: root_slot, + latest_root_epoch: root_epoch, + environments: ProgramRuntimeEnvironments::default(), + upcoming_environments: None, + programs_to_recompile: Vec::default(), + stats: Stats::default(), + fork_graph: None, + loading_task_waiter: Arc::new(LoadingTaskWaiter::default()), + } + } + pub fn set_fork_graph(&mut self, fork_graph: Arc>) { self.fork_graph = Some(fork_graph); } @@ -594,12 +650,12 @@ impl LoadedPrograms { key: Pubkey, entry: Arc, ) -> (bool, Arc) { - let second_level = self.entries.entry(key).or_default(); - let index = second_level + let slot_versions = &mut self.entries.entry(key).or_default().slot_versions; + let index = slot_versions .iter() .position(|at| at.effective_slot >= entry.effective_slot); if let Some((existing, entry_index)) = - index.and_then(|index| second_level.get(index).map(|value| (value, index))) + index.and_then(|index| slot_versions.get(index).map(|value| (value, index))) { if existing.deployment_slot == entry.deployment_slot && existing.effective_slot == entry.effective_slot @@ -607,23 +663,21 @@ impl LoadedPrograms { if matches!(existing.program, LoadedProgramType::Unloaded(_)) { // The unloaded program is getting reloaded // Copy over the usage counter to the new entry - let mut usage_count = existing.tx_usage_counter.load(Ordering::Relaxed); - saturating_add_assign!( - usage_count, - entry.tx_usage_counter.load(Ordering::Relaxed) + entry.tx_usage_counter.fetch_add( + existing.tx_usage_counter.load(Ordering::Relaxed), + Ordering::Relaxed, ); - entry.tx_usage_counter.store(usage_count, Ordering::Relaxed); - entry.ix_usage_counter.store( + entry.ix_usage_counter.fetch_add( existing.ix_usage_counter.load(Ordering::Relaxed), Ordering::Relaxed, ); - second_level.remove(entry_index); + slot_versions.remove(entry_index); } else if existing.is_tombstone() != entry.is_tombstone() { // Either the old entry is tombstone and the new one is not. // (Let's give the new entry a chance). // Or, the old entry is not a tombstone and the new one is a tombstone. // (Remove the old entry, as the tombstone makes it obsolete). - second_level.remove(entry_index); + slot_versions.remove(entry_index); } else { self.stats.replacements.fetch_add(1, Ordering::Relaxed); return (true, existing.clone()); @@ -631,7 +685,7 @@ impl LoadedPrograms { } } self.stats.insertions.fetch_add(1, Ordering::Relaxed); - second_level.insert(index.unwrap_or(second_level.len()), entry.clone()); + slot_versions.insert(index.unwrap_or(slot_versions.len()), entry.clone()); (false, entry) } @@ -646,14 +700,11 @@ impl LoadedPrograms { } pub fn prune_by_deployment_slot(&mut self, slot: Slot) { - self.entries.retain(|_key, second_level| { - *second_level = second_level - .iter() - .filter(|entry| entry.deployment_slot != slot) - .cloned() - .collect(); - !second_level.is_empty() - }); + for second_level in self.entries.values_mut() { + second_level + .slot_versions + .retain(|entry| entry.deployment_slot != slot); + } self.remove_programs_with_no_entries(); } @@ -679,19 +730,38 @@ impl LoadedPrograms { for second_level in self.entries.values_mut() { // Remove entries un/re/deployed on orphan forks let mut first_ancestor_found = false; - *second_level = second_level + let mut first_ancestor_env = None; + second_level.slot_versions = second_level + .slot_versions .iter() .rev() .filter(|entry| { let relation = fork_graph.relationship(entry.deployment_slot, new_root_slot); if entry.deployment_slot >= new_root_slot { matches!(relation, BlockRelation::Equal | BlockRelation::Descendant) - } else if !first_ancestor_found - && (matches!(relation, BlockRelation::Ancestor) - || entry.deployment_slot <= self.latest_root_slot) + } else if matches!(relation, BlockRelation::Ancestor) + || entry.deployment_slot <= self.latest_root_slot { - first_ancestor_found = true; - first_ancestor_found + if !first_ancestor_found { + first_ancestor_found = true; + first_ancestor_env = entry.program.get_environment(); + return true; + } + // Do not prune the entry if the runtime environment of the entry is different + // than the entry that was previously found (stored in first_ancestor_env). + // Different environment indicates that this entry might belong to an older + // epoch that had a different environment (e.g. different feature set). + // Once the root moves to the new/current epoch, the entry will get pruned. + // But, until then the entry might still be getting used by an older slot. + if let Some(entry_env) = entry.program.get_environment() { + if let Some(env) = first_ancestor_env { + if !Arc::ptr_eq(entry_env, env) { + return true; + } + } + } + self.stats.prunes_orphan.fetch_add(1, Ordering::Relaxed); + false } else { self.stats.prunes_orphan.fetch_add(1, Ordering::Relaxed); false @@ -718,7 +788,7 @@ impl LoadedPrograms { }) .cloned() .collect(); - second_level.reverse(); + second_level.slot_versions.reverse(); } self.remove_programs_with_no_entries(); debug_assert!(self.latest_root_slot <= new_root_slot); @@ -770,94 +840,109 @@ impl LoadedPrograms { /// Extracts a subset of the programs relevant to a transaction batch /// and returns which program accounts the accounts DB needs to load. - pub fn extract( - &self, - working_slot: &S, - keys: impl Iterator, - ) -> ExtractedPrograms { - let environments = self.get_environments_for_epoch(working_slot.current_epoch()); - let mut missing = Vec::new(); - let mut unloaded = Vec::new(); - let current_slot = working_slot.current_slot(); - let found = keys - .filter_map(|(key, (match_criteria, count))| { - if let Some(second_level) = self.entries.get(&key) { - for entry in second_level.iter().rev() { - let is_ancestor = if let Some(fork_graph) = &self.fork_graph { - fork_graph - .read() - .map(|fork_graph_r| { - matches!( - fork_graph_r - .relationship(entry.deployment_slot, current_slot), - BlockRelation::Ancestor - ) - }) - .unwrap_or(false) - } else { - working_slot.is_ancestor(entry.deployment_slot) - }; - - if entry.deployment_slot <= self.latest_root_slot - || entry.deployment_slot == current_slot - || is_ancestor - { - if current_slot >= entry.effective_slot { - if !Self::is_entry_usable(entry, current_slot, &match_criteria) { - missing.push((key, count)); - return None; - } - - if !Self::matches_environment(entry, environments) { - missing.push((key, count)); - return None; - } + pub fn extract( + &mut self, + search_for: &mut Vec<(Pubkey, (LoadedProgramMatchCriteria, u64))>, + loaded_programs_for_tx_batch: &mut LoadedProgramsForTxBatch, + ) -> Option<(Pubkey, u64)> { + debug_assert!(self.fork_graph.is_some()); + let locked_fork_graph = self.fork_graph.as_ref().unwrap().read().unwrap(); + let mut cooperative_loading_task = None; + search_for.retain(|(key, (match_criteria, usage_count))| { + if let Some(second_level) = self.entries.get_mut(key) { + for entry in second_level.slot_versions.iter().rev() { + let is_ancestor = matches!( + locked_fork_graph + .relationship(entry.deployment_slot, loaded_programs_for_tx_batch.slot), + BlockRelation::Ancestor + ); - if let LoadedProgramType::Unloaded(_environment) = &entry.program { - unloaded.push((key, count)); - return None; - } + if entry.deployment_slot <= self.latest_root_slot + || entry.deployment_slot == loaded_programs_for_tx_batch.slot + || is_ancestor + { + let entry_to_return = if loaded_programs_for_tx_batch.slot + >= entry.effective_slot + && Self::matches_environment( + entry, + &loaded_programs_for_tx_batch.environments, + ) { + if !Self::is_entry_usable( + entry, + loaded_programs_for_tx_batch.slot, + match_criteria, + ) { + break; + } - let mut usage_count = - entry.tx_usage_counter.load(Ordering::Relaxed); - saturating_add_assign!(usage_count, count); - entry.tx_usage_counter.store(usage_count, Ordering::Relaxed); - return Some((key, entry.clone())); - } else if entry.is_implicit_delay_visibility_tombstone(current_slot) { - // Found a program entry on the current fork, but it's not effective - // yet. It indicates that the program has delayed visibility. Return - // the tombstone to reflect that. - return Some(( - key, - Arc::new(LoadedProgram::new_tombstone( - entry.deployment_slot, - LoadedProgramType::DelayVisibility, - )), - )); + if let LoadedProgramType::Unloaded(_environment) = &entry.program { + break; } - } + entry.clone() + } else if entry.is_implicit_delay_visibility_tombstone( + loaded_programs_for_tx_batch.slot, + ) { + // Found a program entry on the current fork, but it's not effective + // yet. It indicates that the program has delayed visibility. Return + // the tombstone to reflect that. + Arc::new(LoadedProgram::new_tombstone( + entry.deployment_slot, + LoadedProgramType::DelayVisibility, + )) + } else { + continue; + }; + entry_to_return.update_access_slot(loaded_programs_for_tx_batch.slot); + entry_to_return + .tx_usage_counter + .fetch_add(*usage_count, Ordering::Relaxed); + loaded_programs_for_tx_batch + .entries + .insert(*key, entry_to_return); + return false; } } - missing.push((key, count)); - None - }) - .collect::>>(); - + } + if cooperative_loading_task.is_none() { + // We have not selected a task so far + let second_level = self.entries.entry(*key).or_default(); + if second_level.cooperative_loading_lock.is_none() { + // Select this missing entry which is not selected by any other TX batch yet + cooperative_loading_task = Some((*key, *usage_count)); + second_level.cooperative_loading_lock = Some(( + loaded_programs_for_tx_batch.slot, + std::thread::current().id(), + )); + } + } + true + }); + drop(locked_fork_graph); self.stats .misses - .fetch_add(missing.len() as u64, Ordering::Relaxed); - self.stats - .hits - .fetch_add(found.len() as u64, Ordering::Relaxed); - ExtractedPrograms { - loaded: LoadedProgramsForTxBatch { - entries: found, - slot: current_slot, - environments: environments.clone(), - }, - missing, - unloaded, - } + .fetch_add(search_for.len() as u64, Ordering::Relaxed); + self.stats.hits.fetch_add( + loaded_programs_for_tx_batch.entries.len() as u64, + Ordering::Relaxed, + ); + cooperative_loading_task + } + + /// Called by Bank::replenish_program_cache() for each program that is done loading. + pub fn finish_cooperative_loading_task( + &mut self, + slot: Slot, + key: Pubkey, + loaded_program: Arc, + ) { + let second_level = self.entries.entry(key).or_default(); + debug_assert_eq!( + second_level.cooperative_loading_lock, + Some((slot, std::thread::current().id())) + ); + second_level.cooperative_loading_lock = None; + self.assign_program(key, loaded_program); + self.loading_task_waiter.notify(); } pub fn merge(&mut self, tx_batch_cache: &LoadedProgramsForTxBatch) { @@ -866,18 +951,18 @@ impl LoadedPrograms { }) } - /// Returns the list of loaded programs which are verified and compiled sorted by `tx_usage_counter`. - /// - /// Entries from program runtime v1 and v2 can be individually filtered. - pub fn get_entries_sorted_by_tx_usage( + /// Returns the list of loaded programs which are verified and compiled. + pub fn get_flattened_entries( &self, include_program_runtime_v1: bool, include_program_runtime_v2: bool, ) -> Vec<(Pubkey, Arc)> { self.entries .iter() - .flat_map(|(id, list)| { - list.iter() + .flat_map(|(id, second_level)| { + second_level + .slot_versions + .iter() .filter_map(move |program| match program.program { LoadedProgramType::LegacyV0(_) | LoadedProgramType::LegacyV1(_) if include_program_runtime_v1 => @@ -892,18 +977,52 @@ impl LoadedPrograms { _ => None, }) }) - .sorted_by_cached_key(|(_id, program)| program.tx_usage_counter.load(Ordering::Relaxed)) .collect() } /// Unloads programs which were used infrequently pub fn sort_and_unload(&mut self, shrink_to: PercentageInteger) { - let sorted_candidates = self.get_entries_sorted_by_tx_usage(true, true); + let mut sorted_candidates = self.get_flattened_entries(true, true); + sorted_candidates + .sort_by_cached_key(|(_id, program)| program.tx_usage_counter.load(Ordering::Relaxed)); let num_to_unload = sorted_candidates .len() .saturating_sub(shrink_to.apply_to(MAX_LOADED_ENTRY_COUNT)); self.unload_program_entries(sorted_candidates.iter().take(num_to_unload)); - self.remove_programs_with_no_entries(); + } + + /// Evicts programs using 2's random selection, choosing the least used program out of the two entries. + /// The eviction is performed enough number of times to reduce the cache usage to the given percentage. + pub fn evict_using_2s_random_selection(&mut self, shrink_to: PercentageInteger, now: Slot) { + let mut candidates = self.get_flattened_entries(true, true); + let num_to_unload = candidates + .len() + .saturating_sub(shrink_to.apply_to(MAX_LOADED_ENTRY_COUNT)); + fn random_index_and_usage_counter( + candidates: &[(Pubkey, Arc)], + now: Slot, + ) -> (usize, u64) { + let mut rng = thread_rng(); + let index = rng.gen_range(0..candidates.len()); + let usage_counter = candidates + .get(index) + .expect("Failed to get cached entry") + .1 + .decayed_usage_counter(now); + (index, usage_counter) + } + + for _ in 0..num_to_unload { + let (index1, usage_counter1) = random_index_and_usage_counter(&candidates, now); + let (index2, usage_counter2) = random_index_and_usage_counter(&candidates, now); + + let (program, entry) = if usage_counter1 < usage_counter2 { + candidates.swap_remove(index1) + } else { + candidates.swap_remove(index2) + }; + self.unload_program_entry(&program, &entry); + } } /// Removes all the entries at the given keys, if they exist @@ -914,8 +1033,8 @@ impl LoadedPrograms { } fn unload_program(&mut self, id: &Pubkey) { - if let Some(entries) = self.entries.get_mut(id) { - entries.iter_mut().for_each(|entry| { + if let Some(second_level) = self.entries.get_mut(id) { + for entry in second_level.slot_versions.iter_mut() { if let Some(unloaded) = entry.to_unloaded() { *entry = Arc::new(unloaded); self.stats @@ -923,8 +1042,13 @@ impl LoadedPrograms { .entry(*id) .and_modify(|c| saturating_add_assign!(*c, 1)) .or_insert(1); + } else { + error!( + "Failed to create an unloaded cache entry for a program type {:?}", + entry.program + ); } - }); + } } } @@ -933,32 +1057,47 @@ impl LoadedPrograms { keys.iter().for_each(|key| self.unload_program(key)); } + /// This function removes the given entry for the given program from the cache. + /// The function expects that the program and entry exists in the cache. Otherwise it'll panic. + fn unload_program_entry(&mut self, program: &Pubkey, remove_entry: &Arc) { + let second_level = self.entries.get_mut(program).expect("Cache lookup failed"); + let candidate = second_level + .slot_versions + .iter_mut() + .find(|entry| entry == &remove_entry) + .expect("Program entry not found"); + + // Certain entry types cannot be unloaded, such as tombstones, or already unloaded entries. + // For such entries, `to_unloaded()` will return None. + // These entry types do not occupy much memory. + if let Some(unloaded) = candidate.to_unloaded() { + if candidate.tx_usage_counter.load(Ordering::Relaxed) == 1 { + self.stats.one_hit_wonders.fetch_add(1, Ordering::Relaxed); + } + self.stats + .evictions + .entry(*program) + .and_modify(|c| saturating_add_assign!(*c, 1)) + .or_insert(1); + *candidate = Arc::new(unloaded); + } + } + fn unload_program_entries<'a>( &mut self, remove: impl Iterator)>, ) { - for (id, program) in remove { - if let Some(entries) = self.entries.get_mut(id) { - if let Some(candidate) = entries.iter_mut().find(|entry| entry == &program) { - if let Some(unloaded) = candidate.to_unloaded() { - if candidate.tx_usage_counter.load(Ordering::Relaxed) == 1 { - self.stats.one_hit_wonders.fetch_add(1, Ordering::Relaxed); - } - self.stats - .evictions - .entry(*id) - .and_modify(|c| saturating_add_assign!(*c, 1)) - .or_insert(1); - *candidate = Arc::new(unloaded); - } - } - } + for (program, entry) in remove { + self.unload_program_entry(program, entry); } } fn remove_programs_with_no_entries(&mut self) { let num_programs_before_removal = self.entries.len(); - self.entries.retain(|_, programs| !programs.is_empty()); + self.entries.retain(|_, second_level| { + !second_level.slot_versions.is_empty() + || second_level.cooperative_loading_lock.is_some() + }); if self.entries.len() < num_programs_before_removal { self.stats.empty_entries.fetch_add( num_programs_before_removal.saturating_sub(self.entries.len()) as u64, @@ -980,7 +1119,7 @@ impl solana_frozen_abi::abi_example::AbiExample for LoadedProgram { impl solana_frozen_abi::abi_example::AbiExample for LoadedPrograms { fn example() -> Self { // LoadedPrograms isn't serializable by definition. - Self::default() + Self::new(Slot::default(), Epoch::default()) } } @@ -988,17 +1127,14 @@ impl solana_frozen_abi::abi_example::AbiExample for LoadedProgram mod tests { use { crate::loaded_programs::{ - BlockRelation, ExtractedPrograms, ForkGraph, LoadedProgram, LoadedProgramMatchCriteria, - LoadedProgramType, LoadedPrograms, LoadedProgramsForTxBatch, ProgramRuntimeEnvironment, - WorkingSlot, DELAY_VISIBILITY_SLOT_OFFSET, + BlockRelation, ForkGraph, LoadedProgram, LoadedProgramMatchCriteria, LoadedProgramType, + LoadedPrograms, LoadedProgramsForTxBatch, ProgramRuntimeEnvironment, + ProgramRuntimeEnvironments, DELAY_VISIBILITY_SLOT_OFFSET, }, assert_matches::assert_matches, percentage::Percentage, solana_rbpf::program::BuiltinProgram, - solana_sdk::{ - clock::{Epoch, Slot}, - pubkey::Pubkey, - }, + solana_sdk::{clock::Slot, pubkey::Pubkey}, std::{ ops::ControlFlow, sync::{ @@ -1012,7 +1148,8 @@ mod tests { std::sync::OnceLock::::new(); fn new_mock_cache() -> LoadedPrograms { - let mut cache = LoadedPrograms::default(); + let mut cache = LoadedPrograms::new(0, 0); + cache.environments.program_runtime_v1 = MOCK_ENVIRONMENT .get_or_init(|| Arc::new(BuiltinProgram::new_mock())) .clone(); @@ -1050,6 +1187,7 @@ mod tests { maybe_expiration_slot: expiry, tx_usage_counter: usage_counter, ix_usage_counter: AtomicU64::default(), + latest_access_slot: AtomicU64::new(deployment_slot), }) } @@ -1062,6 +1200,7 @@ mod tests { maybe_expiration_slot: None, tx_usage_counter: AtomicU64::default(), ix_usage_counter: AtomicU64::default(), + latest_access_slot: AtomicU64::default(), }) } @@ -1090,6 +1229,7 @@ mod tests { maybe_expiration_slot: None, tx_usage_counter: AtomicU64::default(), ix_usage_counter: AtomicU64::default(), + latest_access_slot: AtomicU64::default(), } .to_unloaded() .expect("Failed to unload the program"), @@ -1105,8 +1245,9 @@ mod tests { cache .entries .values() - .map(|programs| { - programs + .map(|second_level| { + second_level + .slot_versions .iter() .filter(|program| predicate(&program.program)) .count() @@ -1114,10 +1255,184 @@ mod tests { .sum() } + #[test] + fn test_usage_counter_decay() { + let _cache = new_mock_cache::(); + let program = new_test_loaded_program_with_usage(10, 11, AtomicU64::new(32)); + program.update_access_slot(15); + assert_eq!(program.decayed_usage_counter(15), 32); + assert_eq!(program.decayed_usage_counter(16), 16); + assert_eq!(program.decayed_usage_counter(17), 8); + assert_eq!(program.decayed_usage_counter(18), 4); + assert_eq!(program.decayed_usage_counter(19), 2); + assert_eq!(program.decayed_usage_counter(20), 1); + assert_eq!(program.decayed_usage_counter(21), 0); + assert_eq!(program.decayed_usage_counter(15), 32); + assert_eq!(program.decayed_usage_counter(14), 32); + + program.update_access_slot(18); + assert_eq!(program.decayed_usage_counter(15), 32); + assert_eq!(program.decayed_usage_counter(16), 32); + assert_eq!(program.decayed_usage_counter(17), 32); + assert_eq!(program.decayed_usage_counter(18), 32); + assert_eq!(program.decayed_usage_counter(19), 16); + assert_eq!(program.decayed_usage_counter(20), 8); + assert_eq!(program.decayed_usage_counter(21), 4); + } + + #[test] + fn test_random_eviction() { + let mut programs = vec![]; + + let mut cache = new_mock_cache::(); + + // This test adds different kind of entries to the cache. + // Tombstones and unloaded entries are expected to not be evicted. + // It also adds multiple entries for three programs as it tries to create a typical cache instance. + let program1 = Pubkey::new_unique(); + let program1_deployment_slots = [0, 10, 20]; + let program1_usage_counters = [4, 5, 25]; + program1_deployment_slots + .iter() + .enumerate() + .for_each(|(i, deployment_slot)| { + let usage_counter = *program1_usage_counters.get(i).unwrap_or(&0); + cache.replenish( + program1, + new_test_loaded_program_with_usage( + *deployment_slot, + (*deployment_slot) + 2, + AtomicU64::new(usage_counter), + ), + ); + programs.push((program1, *deployment_slot, usage_counter)); + }); + + let env = Arc::new(BuiltinProgram::new_mock()); + for slot in 21..31 { + set_tombstone( + &mut cache, + program1, + slot, + LoadedProgramType::FailedVerification(env.clone()), + ); + } + + for slot in 31..41 { + insert_unloaded_program(&mut cache, program1, slot); + } + + let program2 = Pubkey::new_unique(); + let program2_deployment_slots = [5, 11]; + let program2_usage_counters = [0, 2]; + program2_deployment_slots + .iter() + .enumerate() + .for_each(|(i, deployment_slot)| { + let usage_counter = *program2_usage_counters.get(i).unwrap_or(&0); + cache.replenish( + program2, + new_test_loaded_program_with_usage( + *deployment_slot, + (*deployment_slot) + 2, + AtomicU64::new(usage_counter), + ), + ); + programs.push((program2, *deployment_slot, usage_counter)); + }); + + for slot in 21..31 { + set_tombstone( + &mut cache, + program2, + slot, + LoadedProgramType::DelayVisibility, + ); + } + + for slot in 31..41 { + insert_unloaded_program(&mut cache, program2, slot); + } + + let program3 = Pubkey::new_unique(); + let program3_deployment_slots = [0, 5, 15]; + let program3_usage_counters = [100, 3, 20]; + program3_deployment_slots + .iter() + .enumerate() + .for_each(|(i, deployment_slot)| { + let usage_counter = *program3_usage_counters.get(i).unwrap_or(&0); + cache.replenish( + program3, + new_test_loaded_program_with_usage( + *deployment_slot, + (*deployment_slot) + 2, + AtomicU64::new(usage_counter), + ), + ); + programs.push((program3, *deployment_slot, usage_counter)); + }); + + for slot in 21..31 { + set_tombstone(&mut cache, program3, slot, LoadedProgramType::Closed); + } + + for slot in 31..41 { + insert_unloaded_program(&mut cache, program3, slot); + } + + programs.sort_by_key(|(_id, _slot, usage_count)| *usage_count); + + let num_loaded = num_matching_entries(&cache, |program_type| { + matches!(program_type, LoadedProgramType::TestLoaded(_)) + }); + let num_unloaded = num_matching_entries(&cache, |program_type| { + matches!(program_type, LoadedProgramType::Unloaded(_)) + }); + let num_tombstones = num_matching_entries(&cache, |program_type| { + matches!( + program_type, + LoadedProgramType::DelayVisibility + | LoadedProgramType::FailedVerification(_) + | LoadedProgramType::Closed + ) + }); + + // Test that the cache is constructed with the expected number of entries. + assert_eq!(num_loaded, 8); + assert_eq!(num_unloaded, 30); + assert_eq!(num_tombstones, 30); + + // Evicting to 2% should update cache with + // * 5 active entries + // * 33 unloaded entries (3 active programs will get unloaded) + // * 30 tombstones (tombstones are not evicted) + cache.evict_using_2s_random_selection(Percentage::from(2), 21); + + let num_loaded = num_matching_entries(&cache, |program_type| { + matches!(program_type, LoadedProgramType::TestLoaded(_)) + }); + let num_unloaded = num_matching_entries(&cache, |program_type| { + matches!(program_type, LoadedProgramType::Unloaded(_)) + }); + let num_tombstones = num_matching_entries(&cache, |program_type| { + matches!( + program_type, + LoadedProgramType::DelayVisibility + | LoadedProgramType::FailedVerification(_) + | LoadedProgramType::Closed + ) + }); + + // Test that expected number of loaded entries get evicted/unloaded. + assert_eq!(num_loaded, 5); + assert_eq!(num_unloaded, 33); + assert_eq!(num_tombstones, 30); + } + #[test] fn test_eviction() { let mut programs = vec![]; - let mut num_total_programs: usize = 0; let mut cache = new_mock_cache::(); @@ -1137,7 +1452,6 @@ mod tests { AtomicU64::new(usage_counter), ), ); - num_total_programs += 1; programs.push((program1, *deployment_slot, usage_counter)); }); @@ -1171,7 +1485,6 @@ mod tests { AtomicU64::new(usage_counter), ), ); - num_total_programs += 1; programs.push((program2, *deployment_slot, usage_counter)); }); @@ -1204,7 +1517,6 @@ mod tests { AtomicU64::new(usage_counter), ), ); - num_total_programs += 1; programs.push((program3, *deployment_slot, usage_counter)); }); @@ -1250,8 +1562,8 @@ mod tests { let unloaded = cache .entries .iter() - .flat_map(|(id, cached_programs)| { - cached_programs.iter().filter_map(|program| { + .flat_map(|(id, second_level)| { + second_level.slot_versions.iter().filter_map(|program| { matches!(program.program, LoadedProgramType::Unloaded(_)) .then_some((*id, program.tx_usage_counter.load(Ordering::Relaxed))) }) @@ -1304,8 +1616,8 @@ mod tests { }); assert_eq!(num_unloaded, 1); - cache.entries.values().for_each(|programs| { - programs.iter().for_each(|program| { + cache.entries.values().for_each(|second_level| { + second_level.slot_versions.iter().for_each(|program| { if matches!(program.program, LoadedProgramType::Unloaded(_)) { // Test that the usage counter is retained for the unloaded program assert_eq!(program.tx_usage_counter.load(Ordering::Relaxed), 10); @@ -1322,8 +1634,8 @@ mod tests { new_test_loaded_program_with_usage(0, 2, AtomicU64::new(0)), ); - cache.entries.values().for_each(|programs| { - programs.iter().for_each(|program| { + cache.entries.values().for_each(|second_level| { + second_level.slot_versions.iter().for_each(|program| { if matches!(program.program, LoadedProgramType::Unloaded(_)) && program.deployment_slot == 0 && program.effective_slot == 2 @@ -1381,8 +1693,8 @@ mod tests { .entries .get(&program1) .expect("Failed to find the entry"); - assert_eq!(second_level.len(), 1); - assert!(second_level.get(0).unwrap().is_tombstone()); + assert_eq!(second_level.slot_versions.len(), 1); + assert!(second_level.slot_versions.first().unwrap().is_tombstone()); assert_eq!(tombstone.deployment_slot, 10); assert_eq!(tombstone.effective_slot, 10); @@ -1397,8 +1709,8 @@ mod tests { .entries .get(&program2) .expect("Failed to find the entry"); - assert_eq!(second_level.len(), 1); - assert!(!second_level.get(0).unwrap().is_tombstone()); + assert_eq!(second_level.slot_versions.len(), 1); + assert!(!second_level.slot_versions.first().unwrap().is_tombstone()); let tombstone = set_tombstone( &mut cache, @@ -1410,9 +1722,9 @@ mod tests { .entries .get(&program2) .expect("Failed to find the entry"); - assert_eq!(second_level.len(), 2); - assert!(!second_level.get(0).unwrap().is_tombstone()); - assert!(second_level.get(1).unwrap().is_tombstone()); + assert_eq!(second_level.slot_versions.len(), 2); + assert!(!second_level.slot_versions.first().unwrap().is_tombstone()); + assert!(second_level.slot_versions.get(1).unwrap().is_tombstone()); assert!(tombstone.is_tombstone()); assert_eq!(tombstone.deployment_slot, 60); assert_eq!(tombstone.effective_slot, 60); @@ -1481,6 +1793,83 @@ mod tests { assert!(cache.entries.is_empty()); } + #[test] + fn test_prune_different_env() { + let mut cache = new_mock_cache::(); + + let fork_graph = Arc::new(RwLock::new(TestForkGraph { + relation: BlockRelation::Ancestor, + })); + + cache.set_fork_graph(fork_graph); + + let program1 = Pubkey::new_unique(); + let loaded_program = new_test_loaded_program(10, 10); + let (existing, program) = cache.replenish(program1, loaded_program.clone()); + assert!(!existing); + assert_eq!(program, loaded_program); + + let new_env = Arc::new(BuiltinProgram::new_mock()); + cache.upcoming_environments = Some(ProgramRuntimeEnvironments { + program_runtime_v1: new_env.clone(), + program_runtime_v2: new_env.clone(), + }); + let updated_program = Arc::new(LoadedProgram { + program: LoadedProgramType::TestLoaded(new_env.clone()), + account_size: 0, + deployment_slot: 20, + effective_slot: 20, + maybe_expiration_slot: None, + tx_usage_counter: AtomicU64::default(), + ix_usage_counter: AtomicU64::default(), + latest_access_slot: AtomicU64::default(), + }); + let (existing, program) = cache.replenish(program1, updated_program.clone()); + assert!(!existing); + assert_eq!(program, updated_program); + + // Test that there are 2 entries for the program + assert_eq!( + cache + .entries + .get(&program1) + .expect("failed to find the program") + .slot_versions + .len(), + 2 + ); + + cache.prune(21, cache.latest_root_epoch); + + // Test that prune didn't remove the entry, since environments are different. + assert_eq!( + cache + .entries + .get(&program1) + .expect("failed to find the program") + .slot_versions + .len(), + 2 + ); + + cache.prune(22, cache.latest_root_epoch.saturating_add(1)); + + let second_level = cache + .entries + .get(&program1) + .expect("failed to find the program"); + // Test that prune removed 1 entry, since epoch changed + assert_eq!(second_level.slot_versions.len(), 1); + + let entry = second_level + .slot_versions + .first() + .expect("Failed to get the program") + .clone(); + // Test that the correct entry remains in the cache + assert_eq!(entry, updated_program); + } + #[derive(Default)] struct TestForkGraphSpecific { forks: Vec>, @@ -1522,35 +1911,28 @@ mod tests { } } - struct TestWorkingSlot(pub Slot); - - impl WorkingSlot for TestWorkingSlot { - fn current_slot(&self) -> Slot { - self.0 - } - - fn current_epoch(&self) -> Epoch { - 0 - } - - fn is_ancestor(&self, _other: Slot) -> bool { - false - } - } - fn match_slot( - table: &LoadedProgramsForTxBatch, + extracted: &LoadedProgramsForTxBatch, program: &Pubkey, deployment_slot: Slot, working_slot: Slot, ) -> bool { - assert_eq!(table.slot, working_slot); - table - .find(program) + assert_eq!(extracted.slot, working_slot); + extracted + .entries + .get(program) .map(|entry| entry.deployment_slot == deployment_slot) .unwrap_or(false) } + fn match_missing( + missing: &[(Pubkey, (LoadedProgramMatchCriteria, u64))], + program: &Pubkey, + _reload: bool, + ) -> bool { + missing.iter().any(|(key, _)| key == program) + } + #[test] fn test_fork_extract_and_prune() { let mut cache = new_mock_cache::(); @@ -1629,131 +2011,100 @@ mod tests { // 23 // Testing fork 0 - 10 - 12 - 22 with current slot at 22 - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( - &TestWorkingSlot(22), - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 2)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 3)), - (program4, (LoadedProgramMatchCriteria::NoCriteria, 4)), - ] - .into_iter(), - ); - - assert!(match_slot(&found, &program1, 20, 22)); - assert!(match_slot(&found, &program4, 0, 22)); - - assert!(missing.contains(&(program2, 2))); - assert!(missing.contains(&(program3, 3))); - assert!(unloaded.is_empty()); - - // Testing fork 0 - 5 - 11 - 15 - 16 with current slot at 15 - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( - &TestWorkingSlot(15), - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); - - assert!(match_slot(&found, &program1, 0, 15)); - assert!(match_slot(&found, &program2, 11, 15)); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 2)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 3)), + (program4, (LoadedProgramMatchCriteria::NoCriteria, 4)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(22, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); + + assert!(match_slot(&extracted, &program1, 20, 22)); + assert!(match_slot(&extracted, &program4, 0, 22)); + + assert!(match_missing(&missing, &program2, false)); + assert!(match_missing(&missing, &program3, false)); + + // Testing fork 0 - 5 - 11 - 15 - 16 with current slot at 16 + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(15, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); + + assert!(match_slot(&extracted, &program1, 0, 15)); + assert!(match_slot(&extracted, &program2, 11, 15)); // The effective slot of program4 deployed in slot 15 is 19. So it should not be usable in slot 16. // A delay visibility tombstone should be returned here. - let tombstone = found.find(&program4).expect("Failed to find the tombstone"); + let tombstone = extracted + .find(&program4) + .expect("Failed to find the tombstone"); assert_matches!(tombstone.program, LoadedProgramType::DelayVisibility); assert_eq!(tombstone.deployment_slot, 15); - assert!(missing.contains(&(program3, 1))); - assert!(unloaded.is_empty()); + assert!(match_missing(&missing, &program3, false)); // Testing the same fork above, but current slot is now 18 (equal to effective slot of program4). - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( - &TestWorkingSlot(18), - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); - - assert!(match_slot(&found, &program1, 0, 18)); - assert!(match_slot(&found, &program2, 11, 18)); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(18, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); + + assert!(match_slot(&extracted, &program1, 0, 18)); + assert!(match_slot(&extracted, &program2, 11, 18)); // The effective slot of program4 deployed in slot 15 is 18. So it should be usable in slot 18. - assert!(match_slot(&found, &program4, 15, 18)); + assert!(match_slot(&extracted, &program4, 15, 18)); - assert!(missing.contains(&(program3, 1))); - assert!(unloaded.is_empty()); + assert!(match_missing(&missing, &program3, false)); // Testing the same fork above, but current slot is now 23 (future slot than effective slot of program4). - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( - &TestWorkingSlot(23), - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); - - assert!(match_slot(&found, &program1, 0, 23)); - assert!(match_slot(&found, &program2, 11, 23)); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(23, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); + + assert!(match_slot(&extracted, &program1, 0, 23)); + assert!(match_slot(&extracted, &program2, 11, 23)); // The effective slot of program4 deployed in slot 15 is 19. So it should be usable in slot 23. - assert!(match_slot(&found, &program4, 15, 23)); + assert!(match_slot(&extracted, &program4, 15, 23)); - assert!(missing.contains(&(program3, 1))); - assert!(unloaded.is_empty()); + assert!(match_missing(&missing, &program3, false)); // Testing fork 0 - 5 - 11 - 15 - 16 with current slot at 11 - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( - &TestWorkingSlot(11), - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); - - assert!(match_slot(&found, &program1, 0, 11)); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(11, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); + + assert!(match_slot(&extracted, &program1, 0, 11)); // program2 was updated at slot 11, but is not effective till slot 12. The result should contain a tombstone. - let tombstone = found.find(&program2).expect("Failed to find the tombstone"); + let tombstone = extracted + .find(&program2) + .expect("Failed to find the tombstone"); assert_matches!(tombstone.program, LoadedProgramType::DelayVisibility); assert_eq!(tombstone.deployment_slot, 11); - assert!(match_slot(&found, &program4, 5, 11)); + assert!(match_slot(&extracted, &program4, 5, 11)); - assert!(missing.contains(&(program3, 1))); - assert!(unloaded.is_empty()); + assert!(match_missing(&missing, &program3, false)); // The following is a special case, where there's an expiration slot let test_program = Arc::new(LoadedProgram { @@ -1764,60 +2115,47 @@ mod tests { maybe_expiration_slot: Some(21), tx_usage_counter: AtomicU64::default(), ix_usage_counter: AtomicU64::default(), + latest_access_slot: AtomicU64::default(), }); assert!(!cache.replenish(program4, test_program).0); // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( - &TestWorkingSlot(19), - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); - - assert!(match_slot(&found, &program1, 0, 19)); - assert!(match_slot(&found, &program2, 11, 19)); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(19, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); + + assert!(match_slot(&extracted, &program1, 0, 19)); + assert!(match_slot(&extracted, &program2, 11, 19)); // Program4 deployed at slot 19 should not be expired yet - assert!(match_slot(&found, &program4, 19, 19)); + assert!(match_slot(&extracted, &program4, 19, 19)); - assert!(missing.contains(&(program3, 1))); - assert!(unloaded.is_empty()); + assert!(match_missing(&missing, &program3, false)); // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 21 // This would cause program4 deployed at slot 19 to be expired. - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( - &TestWorkingSlot(21), - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(21, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); - assert!(match_slot(&found, &program1, 0, 21)); - assert!(match_slot(&found, &program2, 11, 21)); + assert!(match_slot(&extracted, &program1, 0, 21)); + assert!(match_slot(&extracted, &program2, 11, 21)); - assert!(missing.contains(&(program3, 1))); - assert!(missing.contains(&(program4, 1))); - assert!(unloaded.is_empty()); + assert!(match_missing(&missing, &program3, false)); + assert!(match_missing(&missing, &program4, false)); // Remove the expired entry to let the rest of the test continue - if let Some(programs) = cache.entries.get_mut(&program4) { - programs.pop(); + if let Some(second_level) = cache.entries.get_mut(&program4) { + second_level.slot_versions.pop(); } cache.prune(5, 0); @@ -1837,51 +2175,37 @@ mod tests { // | // 23 - // Testing fork 11 - 15 - 16- 19 - 22 with root at 5 and current slot at 21 - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( - &TestWorkingSlot(21), - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + // Testing fork 11 - 15 - 16- 19 - 22 with root at 5 and current slot at 22 + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(21, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); // Since the fork was pruned, we should not find the entry deployed at slot 20. - assert!(match_slot(&found, &program1, 0, 21)); - assert!(match_slot(&found, &program2, 11, 21)); - assert!(match_slot(&found, &program4, 15, 21)); + assert!(match_slot(&extracted, &program1, 0, 21)); + assert!(match_slot(&extracted, &program2, 11, 21)); + assert!(match_slot(&extracted, &program4, 15, 21)); - assert!(missing.contains(&(program3, 1))); - assert!(unloaded.is_empty()); + assert!(match_missing(&missing, &program3, false)); // Testing fork 0 - 5 - 11 - 25 - 27 with current slot at 27 - let ExtractedPrograms { - loaded: found, - missing: _, - unloaded, - } = cache.extract( - &TestWorkingSlot(27), - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); - - assert!(unloaded.is_empty()); - assert!(match_slot(&found, &program1, 0, 27)); - assert!(match_slot(&found, &program2, 11, 27)); - assert!(match_slot(&found, &program3, 25, 27)); - assert!(match_slot(&found, &program4, 5, 27)); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(27, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); + + assert!(match_slot(&extracted, &program1, 0, 27)); + assert!(match_slot(&extracted, &program2, 11, 27)); + assert!(match_slot(&extracted, &program3, 25, 27)); + assert!(match_slot(&extracted, &program4, 5, 27)); cache.prune(15, 0); @@ -1901,28 +2225,21 @@ mod tests { // 23 // Testing fork 16, 19, 23, with root at 15, current slot at 23 - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( - &TestWorkingSlot(23), - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); - - assert!(match_slot(&found, &program1, 0, 23)); - assert!(match_slot(&found, &program2, 11, 23)); - assert!(match_slot(&found, &program4, 15, 23)); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(23, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); + + assert!(match_slot(&extracted, &program1, 0, 23)); + assert!(match_slot(&extracted, &program2, 11, 23)); + assert!(match_slot(&extracted, &program4, 15, 23)); // program3 was deployed on slot 25, which has been pruned - assert!(missing.contains(&(program3, 1))); - assert!(unloaded.is_empty()); + assert!(match_missing(&missing, &program3, false)); } #[test] @@ -1964,52 +2281,38 @@ mod tests { assert!(!cache.replenish(program3, new_test_loaded_program(25, 26)).0); // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( - &TestWorkingSlot(12), - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(12, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); - assert!(match_slot(&found, &program1, 0, 12)); - assert!(match_slot(&found, &program2, 11, 12)); + assert!(match_slot(&extracted, &program1, 0, 12)); + assert!(match_slot(&extracted, &program2, 11, 12)); - assert!(missing.contains(&(program3, 1))); - assert!(unloaded.is_empty()); + assert!(match_missing(&missing, &program3, false)); // Test the same fork, but request the program modified at a later slot than what's in the cache. - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( - &TestWorkingSlot(12), - vec![ - ( - program1, - (LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(5), 1), - ), - ( - program2, - (LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(5), 1), - ), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + ( + program1, + (LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(5), 1), + ), + ( + program2, + (LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(5), 1), + ), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(12, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); - assert!(match_slot(&found, &program2, 11, 12)); + assert!(match_slot(&extracted, &program2, 11, 12)); - assert!(missing.contains(&(program1, 1))); - assert!(missing.contains(&(program3, 1))); - assert!(unloaded.is_empty()); + assert!(match_missing(&missing, &program1, false)); + assert!(match_missing(&missing, &program3, false)); } #[test] @@ -2068,66 +2371,46 @@ mod tests { ); // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( - &TestWorkingSlot(19), - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(19, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); - assert!(match_slot(&found, &program1, 0, 19)); - assert!(match_slot(&found, &program2, 11, 19)); + assert!(match_slot(&extracted, &program1, 0, 19)); + assert!(match_slot(&extracted, &program2, 11, 19)); - assert!(missing.contains(&(program3, 1))); - assert!(unloaded.is_empty()); + assert!(match_missing(&missing, &program3, false)); // Testing fork 0 - 5 - 11 - 25 - 27 with current slot at 27 - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( - &TestWorkingSlot(27), - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(27, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); - assert!(match_slot(&found, &program1, 0, 27)); - assert!(match_slot(&found, &program2, 11, 27)); + assert!(match_slot(&extracted, &program1, 0, 27)); + assert!(match_slot(&extracted, &program2, 11, 27)); - assert!(unloaded.contains(&(program3, 1))); - assert!(missing.is_empty()); + assert!(match_missing(&missing, &program3, true)); // Testing fork 0 - 10 - 20 - 22 with current slot at 22 - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( - &TestWorkingSlot(22), - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(22, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); - assert!(match_slot(&found, &program1, 20, 22)); + assert!(match_slot(&extracted, &program1, 20, 22)); - assert!(missing.contains(&(program2, 1))); - assert!(unloaded.contains(&(program3, 1))); + assert!(match_missing(&missing, &program2, false)); + assert!(match_missing(&missing, &program3, true)); } #[test] @@ -2176,52 +2459,39 @@ mod tests { maybe_expiration_slot: Some(15), tx_usage_counter: AtomicU64::default(), ix_usage_counter: AtomicU64::default(), + latest_access_slot: AtomicU64::default(), }); assert!(!cache.replenish(program1, test_program).0); // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( - &TestWorkingSlot(12), - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(12, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); // Program1 deployed at slot 11 should not be expired yet - assert!(match_slot(&found, &program1, 11, 12)); - assert!(match_slot(&found, &program2, 11, 12)); + assert!(match_slot(&extracted, &program1, 11, 12)); + assert!(match_slot(&extracted, &program2, 11, 12)); - assert!(missing.contains(&(program3, 1))); - assert!(unloaded.is_empty()); + assert!(match_missing(&missing, &program3, false)); // Testing fork 0 - 5 - 11 - 12 - 15 - 16 - 19 - 21 - 23 with current slot at 15 // This would cause program4 deployed at slot 15 to be expired. - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( - &TestWorkingSlot(15), - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); - assert!(unloaded.is_empty()); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(15, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); - assert!(match_slot(&found, &program2, 11, 15)); + assert!(match_slot(&extracted, &program2, 11, 15)); - assert!(missing.contains(&(program1, 1))); - assert!(missing.contains(&(program3, 1))); + assert!(match_missing(&missing, &program1, false)); + assert!(match_missing(&missing, &program3, false)); // Test that the program still exists in the cache, even though it is expired. assert_eq!( @@ -2229,6 +2499,7 @@ mod tests { .entries .get(&program1) .expect("Didn't find program1") + .slot_versions .len(), 3 ); @@ -2240,10 +2511,14 @@ mod tests { .entries .get(&program1) .expect("Didn't find program1") + .slot_versions .len(), 1 ); + // Unlock the cooperative loading lock so that the subsequent prune can do its job + cache.finish_cooperative_loading_task(15, program1, new_test_loaded_program(0, 1)); + // New root 15 should evict the expired entry for program1 cache.prune(15, 0); assert!(cache.entries.get(&program1).is_none()); @@ -2275,21 +2550,14 @@ mod tests { cache.prune(10, 0); - let ExtractedPrograms { - loaded: found, - missing: _, - unloaded, - } = cache.extract( - &TestWorkingSlot(20), - vec![(program1, (LoadedProgramMatchCriteria::NoCriteria, 1))].into_iter(), - ); - assert!(unloaded.is_empty()); + let mut missing = vec![(program1, (LoadedProgramMatchCriteria::NoCriteria, 1))]; + let mut extracted = LoadedProgramsForTxBatch::new(20, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); // The cache should have the program deployed at slot 0 assert_eq!( - found - .entries - .get(&program1) + extracted + .find(&program1) .expect("Did not find the program") .deployment_slot, 0 @@ -2323,93 +2591,63 @@ mod tests { let program2 = Pubkey::new_unique(); assert!(!cache.replenish(program2, new_test_loaded_program(10, 11)).0); - let ExtractedPrograms { - loaded: found, - missing: _, - unloaded: _, - } = cache.extract( - &TestWorkingSlot(20), - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(20, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); - assert!(match_slot(&found, &program1, 0, 20)); - assert!(match_slot(&found, &program2, 10, 20)); - - let ExtractedPrograms { - loaded: found, - missing, - unloaded: _, - } = cache.extract( - &TestWorkingSlot(6), - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + assert!(match_slot(&extracted, &program1, 0, 20)); + assert!(match_slot(&extracted, &program2, 10, 20)); - assert!(match_slot(&found, &program1, 5, 6)); - assert!(missing.contains(&(program2, 1))); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(6, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); + + assert!(match_slot(&extracted, &program1, 5, 6)); + assert!(match_missing(&missing, &program2, false)); // Pruning slot 5 will remove program1 entry deployed at slot 5. // On fork chaining from slot 5, the entry deployed at slot 0 will become visible. cache.prune_by_deployment_slot(5); - let ExtractedPrograms { - loaded: found, - missing: _, - unloaded: _, - } = cache.extract( - &TestWorkingSlot(20), - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(20, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); - assert!(match_slot(&found, &program1, 0, 20)); - assert!(match_slot(&found, &program2, 10, 20)); - - let ExtractedPrograms { - loaded: found, - missing, - unloaded: _, - } = cache.extract( - &TestWorkingSlot(6), - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + assert!(match_slot(&extracted, &program1, 0, 20)); + assert!(match_slot(&extracted, &program2, 10, 20)); - assert!(match_slot(&found, &program1, 0, 6)); - assert!(missing.contains(&(program2, 1))); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(6, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); + + assert!(match_slot(&extracted, &program1, 0, 6)); + assert!(match_missing(&missing, &program2, false)); // Pruning slot 10 will remove program2 entry deployed at slot 10. // As there is no other entry for program2, extract() will return it as missing. cache.prune_by_deployment_slot(10); - let ExtractedPrograms { - loaded: found, - missing: _, - unloaded: _, - } = cache.extract( - &TestWorkingSlot(20), - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(20, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); - assert!(match_slot(&found, &program1, 0, 20)); - assert!(missing.contains(&(program2, 1))); + assert!(match_slot(&extracted, &program1, 0, 20)); + assert!(match_missing(&missing, &program2, false)); } #[test] diff --git a/program-runtime/src/message_processor.rs b/program-runtime/src/message_processor.rs index e3a0dabd8d07bf..b8b2177f91476f 100644 --- a/program-runtime/src/message_processor.rs +++ b/program-runtime/src/message_processor.rs @@ -56,7 +56,6 @@ impl MessageProcessor { log_collector: Option>>, programs_loaded_for_tx_batch: &LoadedProgramsForTxBatch, programs_modified_by_tx: &mut LoadedProgramsForTxBatch, - programs_updated_only_for_global_cache: &mut LoadedProgramsForTxBatch, feature_set: Arc, compute_budget: ComputeBudget, timings: &mut ExecuteTimings, @@ -73,7 +72,6 @@ impl MessageProcessor { compute_budget, programs_loaded_for_tx_batch, programs_modified_by_tx, - programs_updated_only_for_global_cache, feature_set, blockhash, lamports_per_signature, @@ -230,16 +228,16 @@ mod tests { MockSystemInstruction::TransferLamports { lamports } => { instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .checked_sub_lamports(lamports)?; + .checked_sub_lamports(lamports, &invoke_context.feature_set)?; instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .checked_add_lamports(lamports)?; + .checked_add_lamports(lamports, &invoke_context.feature_set)?; Ok(()) } MockSystemInstruction::ChangeData { data } => { instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .set_data(vec![data])?; + .set_data(vec![data], &invoke_context.feature_set)?; Ok(()) } } @@ -302,7 +300,6 @@ mod tests { ))); let sysvar_cache = SysvarCache::default(); let mut programs_modified_by_tx = LoadedProgramsForTxBatch::default(); - let mut programs_updated_only_for_global_cache = LoadedProgramsForTxBatch::default(); let result = MessageProcessor::process_message( &message, &program_indices, @@ -310,7 +307,6 @@ mod tests { None, &programs_loaded_for_tx_batch, &mut programs_modified_by_tx, - &mut programs_updated_only_for_global_cache, Arc::new(FeatureSet::all_enabled()), ComputeBudget::default(), &mut ExecuteTimings::default(), @@ -354,7 +350,6 @@ mod tests { ]), ))); let mut programs_modified_by_tx = LoadedProgramsForTxBatch::default(); - let mut programs_updated_only_for_global_cache = LoadedProgramsForTxBatch::default(); let result = MessageProcessor::process_message( &message, &program_indices, @@ -362,7 +357,6 @@ mod tests { None, &programs_loaded_for_tx_batch, &mut programs_modified_by_tx, - &mut programs_updated_only_for_global_cache, Arc::new(FeatureSet::all_enabled()), ComputeBudget::default(), &mut ExecuteTimings::default(), @@ -396,7 +390,6 @@ mod tests { ]), ))); let mut programs_modified_by_tx = LoadedProgramsForTxBatch::default(); - let mut programs_updated_only_for_global_cache = LoadedProgramsForTxBatch::default(); let result = MessageProcessor::process_message( &message, &program_indices, @@ -404,7 +397,6 @@ mod tests { None, &programs_loaded_for_tx_batch, &mut programs_modified_by_tx, - &mut programs_updated_only_for_global_cache, Arc::new(FeatureSet::all_enabled()), ComputeBudget::default(), &mut ExecuteTimings::default(), @@ -465,14 +457,14 @@ mod tests { MockSystemInstruction::DoWork { lamports, data } => { let mut dup_account = instruction_context .try_borrow_instruction_account(transaction_context, 2)?; - dup_account.checked_sub_lamports(lamports)?; - to_account.checked_add_lamports(lamports)?; - dup_account.set_data(vec![data])?; + dup_account.checked_sub_lamports(lamports, &invoke_context.feature_set)?; + to_account.checked_add_lamports(lamports, &invoke_context.feature_set)?; + dup_account.set_data(vec![data], &invoke_context.feature_set)?; drop(dup_account); let mut from_account = instruction_context .try_borrow_instruction_account(transaction_context, 0)?; - from_account.checked_sub_lamports(lamports)?; - to_account.checked_add_lamports(lamports)?; + from_account.checked_sub_lamports(lamports, &invoke_context.feature_set)?; + to_account.checked_add_lamports(lamports, &invoke_context.feature_set)?; Ok(()) } } @@ -528,7 +520,6 @@ mod tests { ))); let sysvar_cache = SysvarCache::default(); let mut programs_modified_by_tx = LoadedProgramsForTxBatch::default(); - let mut programs_updated_only_for_global_cache = LoadedProgramsForTxBatch::default(); let result = MessageProcessor::process_message( &message, &program_indices, @@ -536,7 +527,6 @@ mod tests { None, &programs_loaded_for_tx_batch, &mut programs_modified_by_tx, - &mut programs_updated_only_for_global_cache, Arc::new(FeatureSet::all_enabled()), ComputeBudget::default(), &mut ExecuteTimings::default(), @@ -564,7 +554,6 @@ mod tests { Some(transaction_context.get_key_of_account_at_index(0).unwrap()), ))); let mut programs_modified_by_tx = LoadedProgramsForTxBatch::default(); - let mut programs_updated_only_for_global_cache = LoadedProgramsForTxBatch::default(); let result = MessageProcessor::process_message( &message, &program_indices, @@ -572,7 +561,6 @@ mod tests { None, &programs_loaded_for_tx_batch, &mut programs_modified_by_tx, - &mut programs_updated_only_for_global_cache, Arc::new(FeatureSet::all_enabled()), ComputeBudget::default(), &mut ExecuteTimings::default(), @@ -597,7 +585,6 @@ mod tests { Some(transaction_context.get_key_of_account_at_index(0).unwrap()), ))); let mut programs_modified_by_tx = LoadedProgramsForTxBatch::default(); - let mut programs_updated_only_for_global_cache = LoadedProgramsForTxBatch::default(); let result = MessageProcessor::process_message( &message, &program_indices, @@ -605,7 +592,6 @@ mod tests { None, &programs_loaded_for_tx_batch, &mut programs_modified_by_tx, - &mut programs_updated_only_for_global_cache, Arc::new(FeatureSet::all_enabled()), ComputeBudget::default(), &mut ExecuteTimings::default(), @@ -687,7 +673,6 @@ mod tests { Arc::new(LoadedProgram::new_builtin(0, 0, MockBuiltin::vm)), ); let mut programs_modified_by_tx = LoadedProgramsForTxBatch::default(); - let mut programs_updated_only_for_global_cache = LoadedProgramsForTxBatch::default(); let result = MessageProcessor::process_message( &message, &[vec![0], vec![1]], @@ -695,7 +680,6 @@ mod tests { None, &programs_loaded_for_tx_batch, &mut programs_modified_by_tx, - &mut programs_updated_only_for_global_cache, Arc::new(FeatureSet::all_enabled()), ComputeBudget::default(), &mut ExecuteTimings::default(), diff --git a/program-runtime/src/prioritization_fee.rs b/program-runtime/src/prioritization_fee.rs index 0a0f76c8dc8e22..e77ae15aac7f21 100644 --- a/program-runtime/src/prioritization_fee.rs +++ b/program-runtime/src/prioritization_fee.rs @@ -5,8 +5,6 @@ type MicroLamports = u128; pub enum PrioritizationFeeType { ComputeUnitPrice(u64), - // TODO: remove 'Deprecated' after feature remove_deprecated_request_unit_ix::id() is activated - Deprecated(u64), } #[derive(Default, Debug, PartialEq, Eq)] @@ -18,17 +16,6 @@ pub struct PrioritizationFeeDetails { impl PrioritizationFeeDetails { pub fn new(fee_type: PrioritizationFeeType, compute_unit_limit: u64) -> Self { match fee_type { - // TODO: remove support of 'Deprecated' after feature remove_deprecated_request_unit_ix::id() is activated - PrioritizationFeeType::Deprecated(fee) => { - let micro_lamport_fee: MicroLamports = - (fee as u128).saturating_mul(MICRO_LAMPORTS_PER_LAMPORT as u128); - let priority = micro_lamport_fee - .checked_div(compute_unit_limit as u128) - .map(|priority| u64::try_from(priority).unwrap_or(u64::MAX)) - .unwrap_or(0); - - Self { fee, priority } - } PrioritizationFeeType::ComputeUnitPrice(cu_price) => { let micro_lamport_fee: MicroLamports = (cu_price as u128).saturating_mul(compute_unit_limit as u128); @@ -66,10 +53,6 @@ mod test { FeeDetails::new(FeeType::ComputeUnitPrice(0), compute_units), FeeDetails::default(), ); - assert_eq!( - FeeDetails::new(FeeType::Deprecated(0), compute_units), - FeeDetails::default(), - ); } } @@ -128,76 +111,4 @@ mod test { }, ); } - - #[test] - fn test_new_with_deprecated_fee() { - assert_eq!( - FeeDetails::new(FeeType::Deprecated(1), MICRO_LAMPORTS_PER_LAMPORT / 2 - 1), - FeeDetails { - fee: 1, - priority: 2, - }, - "should round down fee rate of (>2.0) to priority value 1" - ); - - assert_eq!( - FeeDetails::new(FeeType::Deprecated(1), MICRO_LAMPORTS_PER_LAMPORT / 2), - FeeDetails { - fee: 1, - priority: 2, - }, - ); - - assert_eq!( - FeeDetails::new(FeeType::Deprecated(1), MICRO_LAMPORTS_PER_LAMPORT / 2 + 1), - FeeDetails { - fee: 1, - priority: 1, - }, - "should round down fee rate of (<2.0) to priority value 1" - ); - - assert_eq!( - FeeDetails::new(FeeType::Deprecated(1), MICRO_LAMPORTS_PER_LAMPORT), - FeeDetails { - fee: 1, - priority: 1, - }, - ); - - assert_eq!( - FeeDetails::new(FeeType::Deprecated(42), 42 * MICRO_LAMPORTS_PER_LAMPORT), - FeeDetails { - fee: 42, - priority: 1, - }, - ); - - assert_eq!( - FeeDetails::new(FeeType::Deprecated(420), 42 * MICRO_LAMPORTS_PER_LAMPORT), - FeeDetails { - fee: 420, - priority: 10, - }, - ); - - assert_eq!( - FeeDetails::new( - FeeType::Deprecated(u64::MAX), - 2 * MICRO_LAMPORTS_PER_LAMPORT - ), - FeeDetails { - fee: u64::MAX, - priority: u64::MAX / 2, - }, - ); - - assert_eq!( - FeeDetails::new(FeeType::Deprecated(u64::MAX), u64::MAX), - FeeDetails { - fee: u64::MAX, - priority: MICRO_LAMPORTS_PER_LAMPORT, - }, - ); - } } diff --git a/program-runtime/src/timings.rs b/program-runtime/src/timings.rs index 0e2e4956a55889..8eeb9c5a005cde 100644 --- a/program-runtime/src/timings.rs +++ b/program-runtime/src/timings.rs @@ -300,13 +300,6 @@ impl ThreadExecuteTimings { } pub fn accumulate(&mut self, other: &ThreadExecuteTimings) { - self.execute_timings.saturating_add_in_place( - ExecuteTimingType::TotalBatchesLen, - *other - .execute_timings - .metrics - .index(ExecuteTimingType::TotalBatchesLen), - ); self.execute_timings.accumulate(&other.execute_timings); saturating_add_assign!(self.total_thread_us, other.total_thread_us); saturating_add_assign!( diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index 37e848471a8b3a..fb90a12c309dc0 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -8,7 +8,10 @@ use { base64::{prelude::BASE64_STANDARD, Engine}, chrono_humanize::{Accuracy, HumanTime, Tense}, log::*, - solana_accounts_db::epoch_accounts_hash::EpochAccountsHash, + solana_accounts_db::{ + accounts_db::AccountShrinkThreshold, accounts_index::AccountSecondaryIndexes, + epoch_accounts_hash::EpochAccountsHash, + }, solana_banks_client::start_client, solana_banks_server::banks_server::start_local_server, solana_bpf_loader_program::serialization::serialize_parameters, @@ -27,7 +30,7 @@ use { solana_sdk::{ account::{create_account_shared_data_for_test, Account, AccountSharedData}, account_info::AccountInfo, - clock::Slot, + clock::{Epoch, Slot}, entrypoint::{deserialize, ProgramResult, SUCCESS}, feature_set::FEATURE_NAMES, fee_calculator::{FeeCalculator, FeeRateGovernor, DEFAULT_TARGET_LAMPORTS_PER_SIGNATURE}, @@ -127,6 +130,7 @@ pub fn invoke_builtin_function( .transaction_context .get_current_instruction_context()?, true, // copy_account_data // There is no VM so direct mapping can not be implemented here + &invoke_context.feature_set, )?; // Deserialize data back into instruction params @@ -157,18 +161,25 @@ pub fn invoke_builtin_function( if borrowed_account.is_writable() { if let Some(account_info) = account_info_map.get(borrowed_account.get_key()) { if borrowed_account.get_lamports() != account_info.lamports() { - borrowed_account.set_lamports(account_info.lamports())?; + borrowed_account + .set_lamports(account_info.lamports(), &invoke_context.feature_set)?; } if borrowed_account .can_data_be_resized(account_info.data_len()) .is_ok() - && borrowed_account.can_data_be_changed().is_ok() + && borrowed_account + .can_data_be_changed(&invoke_context.feature_set) + .is_ok() { - borrowed_account.set_data_from_slice(&account_info.data.borrow())?; + borrowed_account.set_data_from_slice( + &account_info.data.borrow(), + &invoke_context.feature_set, + )?; } if borrowed_account.get_owner() != account_info.owner { - borrowed_account.set_owner(account_info.owner.as_ref())?; + borrowed_account + .set_owner(account_info.owner.as_ref(), &invoke_context.feature_set)?; } } } @@ -279,17 +290,17 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { .unwrap(); if borrowed_account.get_lamports() != account_info.lamports() { borrowed_account - .set_lamports(account_info.lamports()) + .set_lamports(account_info.lamports(), &invoke_context.feature_set) .unwrap(); } let account_info_data = account_info.try_borrow_data().unwrap(); // The redundant check helps to avoid the expensive data comparison if we can match borrowed_account .can_data_be_resized(account_info_data.len()) - .and_then(|_| borrowed_account.can_data_be_changed()) + .and_then(|_| borrowed_account.can_data_be_changed(&invoke_context.feature_set)) { Ok(()) => borrowed_account - .set_data_from_slice(&account_info_data) + .set_data_from_slice(&account_info_data, &invoke_context.feature_set) .unwrap(), Err(err) if borrowed_account.get_data() != *account_info_data => { panic!("{err:?}"); @@ -299,7 +310,7 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { // Change the owner at the end so that we are allowed to change the lamports and data before if borrowed_account.get_owner() != account_info.owner { borrowed_account - .set_owner(account_info.owner.as_ref()) + .set_owner(account_info.owner.as_ref(), &invoke_context.feature_set) .unwrap(); } if instruction_account.is_writable { @@ -805,7 +816,7 @@ impl ProgramTest { debug!("Payer address: {}", mint_keypair.pubkey()); debug!("Genesis config: {}", genesis_config); - let mut bank = Bank::new_with_runtime_config_for_tests( + let mut bank = Bank::new_with_paths( &genesis_config, Arc::new(RuntimeConfig { compute_budget: self.compute_max_units.map(|max_units| ComputeBudget { @@ -815,6 +826,15 @@ impl ProgramTest { transaction_account_lock_limit: self.transaction_account_lock_limit, ..RuntimeConfig::default() }), + Vec::default(), + None, + None, + AccountSecondaryIndexes::default(), + AccountShrinkThreshold::default(), + false, + None, + None, + Arc::default(), ); // Add commonly-used SPL programs as a convenience to the user @@ -1187,6 +1207,14 @@ impl ProgramTestContext { Ok(()) } + pub fn warp_to_epoch(&mut self, warp_epoch: Epoch) -> Result<(), ProgramTestError> { + let warp_slot = self + .genesis_config + .epoch_schedule + .get_first_slot_in_epoch(warp_epoch); + self.warp_to_slot(warp_slot) + } + /// warp forward one more slot and force reward interval end pub fn warp_forward_force_reward_interval_end(&mut self) -> Result<(), ProgramTestError> { let mut bank_forks = self.bank_forks.write().unwrap(); diff --git a/program-test/src/programs.rs b/program-test/src/programs.rs index ed96be7644f277..8d9a42790f7af2 100644 --- a/program-test/src/programs.rs +++ b/program-test/src/programs.rs @@ -30,7 +30,7 @@ static SPL_PROGRAMS: &[(Pubkey, Pubkey, &[u8])] = &[ ( spl_token_2022::ID, solana_sdk::bpf_loader_upgradeable::ID, - include_bytes!("programs/spl_token_2022-0.9.0.so"), + include_bytes!("programs/spl_token_2022-1.0.0.so"), ), ( spl_memo_1_0::ID, diff --git a/program-test/src/programs/spl_token_2022-0.9.0.so b/program-test/src/programs/spl_token_2022-0.9.0.so deleted file mode 100644 index 704fce11908771..00000000000000 Binary files a/program-test/src/programs/spl_token_2022-0.9.0.so and /dev/null differ diff --git a/program-test/src/programs/spl_token_2022-1.0.0.so b/program-test/src/programs/spl_token_2022-1.0.0.so new file mode 100755 index 00000000000000..796fafc4cc13ab Binary files /dev/null and b/program-test/src/programs/spl_token_2022-1.0.0.so differ diff --git a/program-test/tests/warp.rs b/program-test/tests/warp.rs index da0b632ad66759..2728fcfd98a66a 100644 --- a/program-test/tests/warp.rs +++ b/program-test/tests/warp.rs @@ -205,7 +205,7 @@ async fn stake_rewards_from_warp() { assert_eq!( stake .delegation - .stake_activating_and_deactivating(clock.epoch, Some(&stake_history), None), + .stake_activating_and_deactivating(clock.epoch, &stake_history, None), StakeActivationStatus::with_effective(stake.delegation.stake), ); } @@ -321,7 +321,7 @@ async fn stake_rewards_filter_bench_core(num_stake_accounts: u64) { assert_eq!( stake .delegation - .stake_activating_and_deactivating(clock.epoch, Some(&stake_history), None), + .stake_activating_and_deactivating(clock.epoch, &stake_history, None), StakeActivationStatus::with_effective(stake.delegation.stake), ); } @@ -369,8 +369,15 @@ async fn stake_merge_immediately_after_activation() { check_credits_observed(&mut context.banks_client, base_stake_address, 100).await; context.increment_vote_account_credits(&vote_address, 100); + let clock_account = context + .banks_client + .get_account(clock::id()) + .await + .expect("account exists") + .unwrap(); + let clock: Clock = deserialize(&clock_account.data).unwrap(); + context.warp_to_epoch(clock.epoch + 1).unwrap(); current_slot += slots_per_epoch; - context.warp_to_slot(current_slot).unwrap(); context.warp_forward_force_reward_interval_end().unwrap(); // make another stake which will just have its credits observed advanced diff --git a/programs/address-lookup-table/src/lib.rs b/programs/address-lookup-table/src/lib.rs index 11d9b4b0dd34e4..737ec32c8f6782 100644 --- a/programs/address-lookup-table/src/lib.rs +++ b/programs/address-lookup-table/src/lib.rs @@ -2,13 +2,14 @@ #![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))] #![cfg_attr(RUSTC_NEEDS_PROC_MACRO_HYGIENE, feature(proc_macro_hygiene))] +#[cfg(not(target_os = "solana"))] pub mod processor; #[deprecated( since = "1.17.0", - note = "Please use `solana_sdk::address_lookup_table` instead" + note = "Please use `solana_program::address_lookup_table` instead" )] -pub use solana_sdk::address_lookup_table::{ +pub use solana_program::address_lookup_table::{ error, instruction, program::{check_id, id, ID}, state, diff --git a/programs/address-lookup-table/src/processor.rs b/programs/address-lookup-table/src/processor.rs index 4db568c71a1a20..643310d316bf83 100644 --- a/programs/address-lookup-table/src/processor.rs +++ b/programs/address-lookup-table/src/processor.rs @@ -162,9 +162,10 @@ impl Processor { let instruction_context = transaction_context.get_current_instruction_context()?; let mut lookup_table_account = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - lookup_table_account.set_state(&ProgramState::LookupTable(LookupTableMeta::new( - authority_key, - )))?; + lookup_table_account.set_state( + &ProgramState::LookupTable(LookupTableMeta::new(authority_key)), + &invoke_context.feature_set, + )?; Ok(()) } @@ -213,7 +214,7 @@ impl Processor { let mut lookup_table_meta = lookup_table.meta; lookup_table_meta.authority = None; AddressLookupTable::overwrite_meta_data( - lookup_table_account.get_data_mut()?, + lookup_table_account.get_data_mut(&invoke_context.feature_set)?, lookup_table_meta, )?; @@ -305,11 +306,12 @@ impl Processor { )?; { AddressLookupTable::overwrite_meta_data( - lookup_table_account.get_data_mut()?, + lookup_table_account.get_data_mut(&invoke_context.feature_set)?, lookup_table_meta, )?; for new_address in new_addresses { - lookup_table_account.extend_from_slice(new_address.as_ref())?; + lookup_table_account + .extend_from_slice(new_address.as_ref(), &invoke_context.feature_set)?; } } drop(lookup_table_account); @@ -381,7 +383,7 @@ impl Processor { lookup_table_meta.deactivation_slot = clock.slot; AddressLookupTable::overwrite_meta_data( - lookup_table_account.get_data_mut()?, + lookup_table_account.get_data_mut(&invoke_context.feature_set)?, lookup_table_meta, )?; @@ -456,13 +458,13 @@ impl Processor { let mut recipient_account = instruction_context.try_borrow_instruction_account(transaction_context, 2)?; - recipient_account.checked_add_lamports(withdrawn_lamports)?; + recipient_account.checked_add_lamports(withdrawn_lamports, &invoke_context.feature_set)?; drop(recipient_account); let mut lookup_table_account = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - lookup_table_account.set_data_length(0)?; - lookup_table_account.set_lamports(0)?; + lookup_table_account.set_data_length(0, &invoke_context.feature_set)?; + lookup_table_account.set_lamports(0, &invoke_context.feature_set)?; Ok(()) } diff --git a/programs/bpf_loader/Cargo.toml b/programs/bpf_loader/Cargo.toml index 16a52c07928620..48d771b8656828 100644 --- a/programs/bpf_loader/Cargo.toml +++ b/programs/bpf_loader/Cargo.toml @@ -27,6 +27,7 @@ assert_matches = { workspace = true } memoffset = { workspace = true } rand = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } +test-case = { workspace = true } [lib] crate-type = ["lib"] diff --git a/programs/bpf_loader/benches/serialization.rs b/programs/bpf_loader/benches/serialization.rs index 5d3c55a165e399..abd0823b83497e 100644 --- a/programs/bpf_loader/benches/serialization.rs +++ b/programs/bpf_loader/benches/serialization.rs @@ -7,6 +7,7 @@ use { solana_sdk::{ account::{Account, AccountSharedData}, bpf_loader, bpf_loader_deprecated, + feature_set::FeatureSet, pubkey::Pubkey, sysvar::rent::Rent, transaction_context::{IndexOfAccount, InstructionAccount, TransactionContext}, @@ -126,7 +127,13 @@ fn bench_serialize_unaligned(bencher: &mut Bencher) { .get_current_instruction_context() .unwrap(); bencher.iter(|| { - let _ = serialize_parameters(&transaction_context, instruction_context, false).unwrap(); + let _ = serialize_parameters( + &transaction_context, + instruction_context, + false, + &FeatureSet::all_enabled(), + ) + .unwrap(); }); } @@ -137,7 +144,13 @@ fn bench_serialize_unaligned_copy_account_data(bencher: &mut Bencher) { .get_current_instruction_context() .unwrap(); bencher.iter(|| { - let _ = serialize_parameters(&transaction_context, instruction_context, true).unwrap(); + let _ = serialize_parameters( + &transaction_context, + instruction_context, + true, + &FeatureSet::all_enabled(), + ) + .unwrap(); }); } @@ -149,7 +162,13 @@ fn bench_serialize_aligned(bencher: &mut Bencher) { .unwrap(); bencher.iter(|| { - let _ = serialize_parameters(&transaction_context, instruction_context, false).unwrap(); + let _ = serialize_parameters( + &transaction_context, + instruction_context, + false, + &FeatureSet::all_enabled(), + ) + .unwrap(); }); } @@ -161,7 +180,13 @@ fn bench_serialize_aligned_copy_account_data(bencher: &mut Bencher) { .unwrap(); bencher.iter(|| { - let _ = serialize_parameters(&transaction_context, instruction_context, true).unwrap(); + let _ = serialize_parameters( + &transaction_context, + instruction_context, + true, + &FeatureSet::all_enabled(), + ) + .unwrap(); }); } @@ -172,7 +197,13 @@ fn bench_serialize_unaligned_max_accounts(bencher: &mut Bencher) { .get_current_instruction_context() .unwrap(); bencher.iter(|| { - let _ = serialize_parameters(&transaction_context, instruction_context, false).unwrap(); + let _ = serialize_parameters( + &transaction_context, + instruction_context, + false, + &FeatureSet::all_enabled(), + ) + .unwrap(); }); } @@ -184,6 +215,12 @@ fn bench_serialize_aligned_max_accounts(bencher: &mut Bencher) { .unwrap(); bencher.iter(|| { - let _ = serialize_parameters(&transaction_context, instruction_context, false).unwrap(); + let _ = serialize_parameters( + &transaction_context, + instruction_context, + false, + &FeatureSet::all_enabled(), + ) + .unwrap(); }); } diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index dd944b7c8a1513..18ca167607c2f3 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -34,16 +34,15 @@ use { clock::Slot, entrypoint::{MAX_PERMITTED_DATA_INCREASE, SUCCESS}, feature_set::{ - bpf_account_data_direct_mapping, delay_visibility_of_program_deployment, - enable_bpf_loader_extend_program_ix, enable_bpf_loader_set_authority_checked_ix, - enable_program_redeployment_cooldown, limit_max_instruction_trace_length, - native_programs_consume_cu, remove_bpf_loader_incorrect_program_id, + bpf_account_data_direct_mapping, deprecate_executable_meta_update_in_bpf_loader, + disable_bpf_loader_instructions, enable_bpf_loader_extend_program_ix, + enable_bpf_loader_set_authority_checked_ix, native_programs_consume_cu, + remove_bpf_loader_incorrect_program_id, FeatureSet, }, instruction::{AccountMeta, InstructionError}, loader_instruction::LoaderInstruction, loader_upgradeable_instruction::UpgradeableLoaderInstruction, native_loader, - program_error::MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED, program_utils::limited_deserialize, pubkey::Pubkey, saturating_add_assign, @@ -67,7 +66,6 @@ pub const UPGRADEABLE_LOADER_COMPUTE_UNITS: u64 = 2_370; #[allow(clippy::too_many_arguments)] pub fn load_program_from_bytes( - delay_visibility_of_program_deployment: bool, log_collector: Option>>, load_program_metrics: &mut LoadProgramMetrics, programdata: &[u8], @@ -77,11 +75,7 @@ pub fn load_program_from_bytes( program_runtime_environment: Arc>>, reloading: bool, ) -> Result { - let effective_slot = if delay_visibility_of_program_deployment { - deployment_slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET) - } else { - deployment_slot - }; + let effective_slot = deployment_slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET); let loaded_program = if reloading { // Safety: this is safe because the program is being reloaded in the cache. unsafe { @@ -151,7 +145,6 @@ macro_rules! deploy_program { load_program_metrics.verify_code_us = verify_code_time.as_us(); // Reload but with environments.program_runtime_v1 let executor = load_program_from_bytes( - $invoke_context.feature_set.is_active(&delay_visibility_of_program_deployment::id()), $invoke_context.get_log_collector(), &mut load_program_metrics, $new_programdata, @@ -186,7 +179,7 @@ fn write_program_data( let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; let mut program = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - let data = program.get_data_mut()?; + let data = program.get_data_mut(&invoke_context.feature_set)?; let write_offset = program_data_offset.saturating_add(bytes.len()); if data.len() < write_offset { ic_msg!( @@ -210,14 +203,12 @@ pub fn check_loader_id(id: &Pubkey) -> bool { } /// Only used in macro, do not use directly! -pub fn calculate_heap_cost(heap_size: u32, heap_cost: u64, enable_rounding_fix: bool) -> u64 { +pub fn calculate_heap_cost(heap_size: u32, heap_cost: u64) -> u64 { const KIBIBYTE: u64 = 1024; const PAGE_SIZE_KB: u64 = 32; let mut rounded_heap_size = u64::from(heap_size); - if enable_rounding_fix { - rounded_heap_size = rounded_heap_size - .saturating_add(PAGE_SIZE_KB.saturating_mul(KIBIBYTE).saturating_sub(1)); - } + rounded_heap_size = + rounded_heap_size.saturating_add(PAGE_SIZE_KB.saturating_mul(KIBIBYTE).saturating_sub(1)); rounded_heap_size .checked_div(PAGE_SIZE_KB.saturating_mul(KIBIBYTE)) .expect("PAGE_SIZE_KB * KIBIBYTE > 0") @@ -282,17 +273,10 @@ macro_rules! create_vm { let invoke_context = &*$invoke_context; let stack_size = $program.get_config().stack_size(); let heap_size = invoke_context.get_compute_budget().heap_size; - let round_up_heap_size = invoke_context - .feature_set - .is_active(&solana_sdk::feature_set::round_up_heap_size::id()); - let mut heap_cost_result = invoke_context.consume_checked($crate::calculate_heap_cost( + let heap_cost_result = invoke_context.consume_checked($crate::calculate_heap_cost( heap_size, invoke_context.get_compute_budget().heap_cost, - round_up_heap_size, )); - if !round_up_heap_size { - heap_cost_result = Ok(()); - } let mut allocations = None; let $vm = heap_cost_result.and_then(|_| { let mut stack = solana_rbpf::aligned_memory::AlignedMemory::< @@ -465,7 +449,7 @@ pub fn process_instruction_inner( instruction_context, first_instruction_account, )?; - if first_account.is_executable() { + if first_account.is_executable(&invoke_context.feature_set) { ic_logger_msg!(log_collector, "BPF loader is executable"); return Err(Box::new(InstructionError::IncorrectProgramId)); } @@ -476,7 +460,9 @@ pub fn process_instruction_inner( instruction_context, program_account_index, )?; - if program.is_executable() && !check_loader_id(program.get_owner()) { + if program.is_executable(&invoke_context.feature_set) + && !check_loader_id(program.get_owner()) + { ic_logger_msg!( log_collector, "Executable account not owned by the BPF loader" @@ -522,7 +508,7 @@ pub fn process_instruction_inner( } // Program Invocation - if !program_account.is_executable() { + if !program_account.is_executable(&invoke_context.feature_set) { ic_logger_msg!(log_collector, "Program is not executable"); return Err(Box::new(InstructionError::IncorrectProgramId)); } @@ -580,9 +566,12 @@ fn process_loader_upgradeable_instruction( instruction_context.get_index_of_instruction_account_in_transaction(1)?, )?); - buffer.set_state(&UpgradeableLoaderState::Buffer { - authority_address: authority_key, - })?; + buffer.set_state( + &UpgradeableLoaderState::Buffer { + authority_address: authority_key, + }, + &invoke_context.feature_set, + )?; } UpgradeableLoaderInstruction::Write { offset, bytes } => { instruction_context.check_number_of_instruction_accounts(2)?; @@ -706,8 +695,8 @@ fn process_loader_upgradeable_instruction( instruction_context.try_borrow_instruction_account(transaction_context, 3)?; let mut payer = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - payer.checked_add_lamports(buffer.get_lamports())?; - buffer.set_lamports(0)?; + payer.checked_add_lamports(buffer.get_lamports(), &invoke_context.feature_set)?; + buffer.set_lamports(0, &invoke_context.feature_set)?; } let owner_id = *program_id; @@ -761,12 +750,15 @@ fn process_loader_upgradeable_instruction( { let mut programdata = instruction_context.try_borrow_instruction_account(transaction_context, 1)?; - programdata.set_state(&UpgradeableLoaderState::ProgramData { - slot: clock.slot, - upgrade_authority_address: authority_key, - })?; + programdata.set_state( + &UpgradeableLoaderState::ProgramData { + slot: clock.slot, + upgrade_authority_address: authority_key, + }, + &invoke_context.feature_set, + )?; let dst_slice = programdata - .get_data_mut()? + .get_data_mut(&invoke_context.feature_set)? .get_mut( programdata_data_offset ..programdata_data_offset.saturating_add(buffer_data_len), @@ -779,21 +771,30 @@ fn process_loader_upgradeable_instruction( .get(buffer_data_offset..) .ok_or(InstructionError::AccountDataTooSmall)?; dst_slice.copy_from_slice(src_slice); - if invoke_context - .feature_set - .is_active(&enable_program_redeployment_cooldown::id()) - { - buffer.set_data_length(UpgradeableLoaderState::size_of_buffer(0))?; - } + buffer.set_data_length( + UpgradeableLoaderState::size_of_buffer(0), + &invoke_context.feature_set, + )?; } // Update the Program account let mut program = instruction_context.try_borrow_instruction_account(transaction_context, 2)?; - program.set_state(&UpgradeableLoaderState::Program { - programdata_address: programdata_key, - })?; - program.set_executable(true)?; + program.set_state( + &UpgradeableLoaderState::Program { + programdata_address: programdata_key, + }, + &invoke_context.feature_set, + )?; + + // Skip writing true to executable meta after bpf program deployment when + // `deprecate_executable_meta_update_in_bpf_loader` feature is activated. + if !invoke_context + .feature_set + .is_active(&deprecate_executable_meta_update_in_bpf_loader::id()) + { + program.set_executable(true)?; + } drop(program); ic_logger_msg!(log_collector, "Deployed program {:?}", new_program_id); @@ -815,7 +816,7 @@ fn process_loader_upgradeable_instruction( let program = instruction_context.try_borrow_instruction_account(transaction_context, 1)?; - if !program.is_executable() { + if !program.is_executable(&invoke_context.feature_set) { ic_logger_msg!(log_collector, "Program account not executable"); return Err(InstructionError::AccountNotExecutable); } @@ -897,11 +898,7 @@ fn process_loader_upgradeable_instruction( upgrade_authority_address, } = programdata.get_state()? { - if invoke_context - .feature_set - .is_active(&enable_program_redeployment_cooldown::id()) - && clock.slot == slot - { + if clock.slot == slot { ic_logger_msg!(log_collector, "Program was deployed in this block already"); return Err(InstructionError::InvalidArgument); } @@ -950,12 +947,15 @@ fn process_loader_upgradeable_instruction( let mut programdata = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; { - programdata.set_state(&UpgradeableLoaderState::ProgramData { - slot: clock.slot, - upgrade_authority_address: authority_key, - })?; + programdata.set_state( + &UpgradeableLoaderState::ProgramData { + slot: clock.slot, + upgrade_authority_address: authority_key, + }, + &invoke_context.feature_set, + )?; let dst_slice = programdata - .get_data_mut()? + .get_data_mut(&invoke_context.feature_set)? .get_mut( programdata_data_offset ..programdata_data_offset.saturating_add(buffer_data_len), @@ -970,7 +970,7 @@ fn process_loader_upgradeable_instruction( dst_slice.copy_from_slice(src_slice); } programdata - .get_data_mut()? + .get_data_mut(&invoke_context.feature_set)? .get_mut(programdata_data_offset.saturating_add(buffer_data_len)..) .ok_or(InstructionError::AccountDataTooSmall)? .fill(0); @@ -985,15 +985,14 @@ fn process_loader_upgradeable_instruction( .get_lamports() .saturating_add(buffer_lamports) .saturating_sub(programdata_balance_required), + &invoke_context.feature_set, + )?; + buffer.set_lamports(0, &invoke_context.feature_set)?; + programdata.set_lamports(programdata_balance_required, &invoke_context.feature_set)?; + buffer.set_data_length( + UpgradeableLoaderState::size_of_buffer(0), + &invoke_context.feature_set, )?; - buffer.set_lamports(0)?; - programdata.set_lamports(programdata_balance_required)?; - if invoke_context - .feature_set - .is_active(&enable_program_redeployment_cooldown::id()) - { - buffer.set_data_length(UpgradeableLoaderState::size_of_buffer(0))?; - } ic_logger_msg!(log_collector, "Upgraded program {:?}", new_program_id); } @@ -1029,9 +1028,12 @@ fn process_loader_upgradeable_instruction( ic_logger_msg!(log_collector, "Buffer authority did not sign"); return Err(InstructionError::MissingRequiredSignature); } - account.set_state(&UpgradeableLoaderState::Buffer { - authority_address: new_authority.cloned(), - })?; + account.set_state( + &UpgradeableLoaderState::Buffer { + authority_address: new_authority.cloned(), + }, + &invoke_context.feature_set, + )?; } UpgradeableLoaderState::ProgramData { slot, @@ -1049,10 +1051,13 @@ fn process_loader_upgradeable_instruction( ic_logger_msg!(log_collector, "Upgrade authority did not sign"); return Err(InstructionError::MissingRequiredSignature); } - account.set_state(&UpgradeableLoaderState::ProgramData { - slot, - upgrade_authority_address: new_authority.cloned(), - })?; + account.set_state( + &UpgradeableLoaderState::ProgramData { + slot, + upgrade_authority_address: new_authority.cloned(), + }, + &invoke_context.feature_set, + )?; } _ => { ic_logger_msg!(log_collector, "Account does not support authorities"); @@ -1098,9 +1103,12 @@ fn process_loader_upgradeable_instruction( ic_logger_msg!(log_collector, "New authority did not sign"); return Err(InstructionError::MissingRequiredSignature); } - account.set_state(&UpgradeableLoaderState::Buffer { - authority_address: Some(*new_authority_key), - })?; + account.set_state( + &UpgradeableLoaderState::Buffer { + authority_address: Some(*new_authority_key), + }, + &invoke_context.feature_set, + )?; } UpgradeableLoaderState::ProgramData { slot, @@ -1122,10 +1130,13 @@ fn process_loader_upgradeable_instruction( ic_logger_msg!(log_collector, "New authority did not sign"); return Err(InstructionError::MissingRequiredSignature); } - account.set_state(&UpgradeableLoaderState::ProgramData { - slot, - upgrade_authority_address: Some(*new_authority_key), - })?; + account.set_state( + &UpgradeableLoaderState::ProgramData { + slot, + upgrade_authority_address: Some(*new_authority_key), + }, + &invoke_context.feature_set, + )?; } _ => { ic_logger_msg!(log_collector, "Account does not support authorities"); @@ -1150,18 +1161,19 @@ fn process_loader_upgradeable_instruction( instruction_context.try_borrow_instruction_account(transaction_context, 0)?; let close_key = *close_account.get_key(); let close_account_state = close_account.get_state()?; - if invoke_context - .feature_set - .is_active(&enable_program_redeployment_cooldown::id()) - { - close_account.set_data_length(UpgradeableLoaderState::size_of_uninitialized())?; - } + close_account.set_data_length( + UpgradeableLoaderState::size_of_uninitialized(), + &invoke_context.feature_set, + )?; match close_account_state { UpgradeableLoaderState::Uninitialized => { let mut recipient_account = instruction_context .try_borrow_instruction_account(transaction_context, 1)?; - recipient_account.checked_add_lamports(close_account.get_lamports())?; - close_account.set_lamports(0)?; + recipient_account.checked_add_lamports( + close_account.get_lamports(), + &invoke_context.feature_set, + )?; + close_account.set_lamports(0, &invoke_context.feature_set)?; ic_logger_msg!(log_collector, "Closed Uninitialized {}", close_key); } @@ -1173,6 +1185,7 @@ fn process_loader_upgradeable_instruction( transaction_context, instruction_context, &log_collector, + &invoke_context.feature_set, )?; ic_logger_msg!(log_collector, "Closed Buffer {}", close_key); @@ -1195,18 +1208,10 @@ fn process_loader_upgradeable_instruction( ic_logger_msg!(log_collector, "Program account not owned by loader"); return Err(InstructionError::IncorrectProgramId); } - if invoke_context - .feature_set - .is_active(&enable_program_redeployment_cooldown::id()) - { - let clock = invoke_context.get_sysvar_cache().get_clock()?; - if clock.slot == slot { - ic_logger_msg!( - log_collector, - "Program was deployed in this block already" - ); - return Err(InstructionError::InvalidArgument); - } + let clock = invoke_context.get_sysvar_cache().get_clock()?; + if clock.slot == slot { + ic_logger_msg!(log_collector, "Program was deployed in this block already"); + return Err(InstructionError::InvalidArgument); } match program_account.get_state()? { @@ -1227,30 +1232,16 @@ fn process_loader_upgradeable_instruction( transaction_context, instruction_context, &log_collector, + &invoke_context.feature_set, )?; let clock = invoke_context.get_sysvar_cache().get_clock()?; - if invoke_context - .feature_set - .is_active(&delay_visibility_of_program_deployment::id()) - { - invoke_context.programs_modified_by_tx.replenish( - program_key, - Arc::new(LoadedProgram::new_tombstone( - clock.slot, - LoadedProgramType::Closed, - )), - ); - } else { - invoke_context - .programs_updated_only_for_global_cache - .replenish( - program_key, - Arc::new(LoadedProgram::new_tombstone( - clock.slot, - LoadedProgramType::Closed, - )), - ); - } + invoke_context.programs_modified_by_tx.replenish( + program_key, + Arc::new(LoadedProgram::new_tombstone( + clock.slot, + LoadedProgramType::Closed, + )), + ); } _ => { ic_logger_msg!(log_collector, "Invalid Program account"); @@ -1399,7 +1390,7 @@ fn process_loader_upgradeable_instruction( let instruction_context = transaction_context.get_current_instruction_context()?; let mut programdata_account = instruction_context .try_borrow_instruction_account(transaction_context, PROGRAM_DATA_ACCOUNT_INDEX)?; - programdata_account.set_data_length(new_len)?; + programdata_account.set_data_length(new_len, &invoke_context.feature_set)?; let programdata_data_offset = UpgradeableLoaderState::size_of_programdata_metadata(); @@ -1420,10 +1411,13 @@ fn process_loader_upgradeable_instruction( let mut programdata_account = instruction_context .try_borrow_instruction_account(transaction_context, PROGRAM_DATA_ACCOUNT_INDEX)?; - programdata_account.set_state(&UpgradeableLoaderState::ProgramData { - slot: clock_slot, - upgrade_authority_address, - })?; + programdata_account.set_state( + &UpgradeableLoaderState::ProgramData { + slot: clock_slot, + upgrade_authority_address, + }, + &invoke_context.feature_set, + )?; ic_logger_msg!( log_collector, @@ -1441,6 +1435,7 @@ fn common_close_account( transaction_context: &TransactionContext, instruction_context: &InstructionContext, log_collector: &Option>>, + feature_set: &FeatureSet, ) -> Result<(), InstructionError> { if authority_address.is_none() { ic_logger_msg!(log_collector, "Account is immutable"); @@ -1463,9 +1458,10 @@ fn common_close_account( instruction_context.try_borrow_instruction_account(transaction_context, 0)?; let mut recipient_account = instruction_context.try_borrow_instruction_account(transaction_context, 1)?; - recipient_account.checked_add_lamports(close_account.get_lamports())?; - close_account.set_lamports(0)?; - close_account.set_state(&UpgradeableLoaderState::Uninitialized)?; + + recipient_account.checked_add_lamports(close_account.get_lamports(), feature_set)?; + close_account.set_lamports(0, feature_set)?; + close_account.set_state(&UpgradeableLoaderState::Uninitialized, feature_set)?; Ok(()) } @@ -1482,6 +1478,20 @@ fn process_loader_instruction(invoke_context: &mut InvokeContext) -> Result<(), ); return Err(InstructionError::IncorrectProgramId); } + + // Return `UnsupportedProgramId` error for bpf_loader when + // `disable_bpf_loader_instruction` feature is activated. + if invoke_context + .feature_set + .is_active(&disable_bpf_loader_instructions::id()) + { + ic_msg!( + invoke_context, + "BPF loader management instructions are no longer supported" + ); + return Err(InstructionError::UnsupportedProgramId); + } + let is_program_signer = program.is_signer(); match limited_deserialize(instruction_data)? { LoaderInstruction::Write { offset, bytes } => { @@ -1506,6 +1516,13 @@ fn process_loader_instruction(invoke_context: &mut InvokeContext) -> Result<(), {}, program.get_data(), ); + + // `deprecate_executable_meta_update_in_bpf_loader` feature doesn't + // apply to bpf_loader v2. Instead, the deployment by bpf_loader + // will be deprecated by its own feature + // `disable_bpf_loader_instructions`. Before we activate + // deprecate_executable_meta_update_in_bpf_loader, we should + // activate `disable_bpf_loader_instructions` first. program.set_executable(true)?; ic_msg!(invoke_context, "Finalized account {:?}", program.get_key()); } @@ -1545,6 +1562,7 @@ fn execute<'a, 'b: 'a>( invoke_context.transaction_context, instruction_context, !direct_mapping, + &invoke_context.feature_set, )?; serialize_time.stop(); @@ -1596,17 +1614,7 @@ fn execute<'a, 'b: 'a>( } match result { ProgramResult::Ok(status) if status != SUCCESS => { - let error: InstructionError = if status == MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED - && !invoke_context - .feature_set - .is_active(&limit_max_instruction_trace_length::id()) - { - // Until the limit_max_instruction_trace_length feature is - // enabled, map the `MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED` error to `InvalidError`. - InstructionError::InvalidError - } else { - status.into() - }; + let error: InstructionError = status.into(); Err(Box::new(error) as Box) } ProgramResult::Err(mut error) => { @@ -1635,13 +1643,15 @@ fn execute<'a, 'b: 'a>( instruction_account_index as IndexOfAccount, )?; - error = EbpfError::SyscallError(Box::new(if account.is_executable() { - InstructionError::ExecutableDataModified - } else if account.is_writable() { - InstructionError::ExternalAccountDataModified - } else { - InstructionError::ReadonlyDataModified - })); + error = EbpfError::SyscallError(Box::new( + if account.is_executable(&invoke_context.feature_set) { + InstructionError::ExecutableDataModified + } else if account.is_writable() { + InstructionError::ExternalAccountDataModified + } else { + InstructionError::ReadonlyDataModified + }, + )); } } } @@ -1669,6 +1679,7 @@ fn execute<'a, 'b: 'a>( copy_account_data, parameter_bytes, &invoke_context.get_syscall_context()?.accounts_metadata, + &invoke_context.feature_set, ) } @@ -1722,7 +1733,6 @@ pub mod test_utils { .expect("Failed to get account key"); if let Ok(loaded_program) = load_program_from_bytes( - true, None, &mut load_program_metrics, account.data(), @@ -1799,6 +1809,10 @@ mod tests { expected_result, Entrypoint::vm, |invoke_context| { + let mut features = FeatureSet::all_enabled(); + features.deactivate(&disable_bpf_loader_instructions::id()); + features.deactivate(&deprecate_executable_meta_update_in_bpf_loader::id()); + invoke_context.feature_set = Arc::new(features); test_utils::load_all_invoked_programs(invoke_context); }, |_invoke_context| {}, @@ -2018,6 +2032,10 @@ mod tests { Err(InstructionError::ProgramFailedToComplete), Entrypoint::vm, |invoke_context| { + let mut features = FeatureSet::all_enabled(); + features.deactivate(&disable_bpf_loader_instructions::id()); + features.deactivate(&deprecate_executable_meta_update_in_bpf_loader::id()); + invoke_context.feature_set = Arc::new(features); invoke_context.mock_set_remaining(0); test_utils::load_all_invoked_programs(invoke_context); }, @@ -2563,7 +2581,12 @@ mod tests { instruction_accounts, expected_result, Entrypoint::vm, - |_invoke_context| {}, + |invoke_context| { + let mut features = FeatureSet::all_enabled(); + features.deactivate(&disable_bpf_loader_instructions::id()); + features.deactivate(&deprecate_executable_meta_update_in_bpf_loader::id()); + invoke_context.feature_set = Arc::new(features); + }, |_invoke_context| {}, ) } @@ -2687,7 +2710,7 @@ mod tests { &elf_orig, &elf_new, ); - *instruction_accounts.get_mut(3).unwrap() = instruction_accounts.get(0).unwrap().clone(); + *instruction_accounts.get_mut(3).unwrap() = instruction_accounts.first().unwrap().clone(); process_instruction( transaction_accounts, instruction_accounts, @@ -4045,35 +4068,17 @@ mod tests { // heap allocations are in 32K block, `heap_cost` of CU is consumed per additional 32k - // when `enable_heap_size_round_up` not enabled: - { - // assert less than 32K heap should cost zero unit - assert_eq!(0, calculate_heap_cost(31 * 1024, heap_cost, false)); - - // assert exact 32K heap should be cost zero unit - assert_eq!(0, calculate_heap_cost(32 * 1024, heap_cost, false)); - - // assert slightly more than 32K heap is mistakenly cost zero unit - assert_eq!(0, calculate_heap_cost(33 * 1024, heap_cost, false)); - - // assert exact 64K heap should cost 1 * heap_cost - assert_eq!(heap_cost, calculate_heap_cost(64 * 1024, heap_cost, false)); - } - - // when `enable_heap_size_round_up` is enabled: - { - // assert less than 32K heap should cost zero unit - assert_eq!(0, calculate_heap_cost(31 * 1024, heap_cost, true)); + // assert less than 32K heap should cost zero unit + assert_eq!(0, calculate_heap_cost(31 * 1024, heap_cost)); - // assert exact 32K heap should be cost zero unit - assert_eq!(0, calculate_heap_cost(32 * 1024, heap_cost, true)); + // assert exact 32K heap should be cost zero unit + assert_eq!(0, calculate_heap_cost(32 * 1024, heap_cost)); - // assert slightly more than 32K heap should cost 1 * heap_cost - assert_eq!(heap_cost, calculate_heap_cost(33 * 1024, heap_cost, true)); + // assert slightly more than 32K heap should cost 1 * heap_cost + assert_eq!(heap_cost, calculate_heap_cost(33 * 1024, heap_cost)); - // assert exact 64K heap should cost 1 * heap_cost - assert_eq!(heap_cost, calculate_heap_cost(64 * 1024, heap_cost, true)); - } + // assert exact 64K heap should cost 1 * heap_cost + assert_eq!(heap_cost, calculate_heap_cost(64 * 1024, heap_cost)); } fn deploy_test_program( @@ -4109,6 +4114,7 @@ mod tests { maybe_expiration_slot: None, tx_usage_counter: AtomicU64::new(100), ix_usage_counter: AtomicU64::new(100), + latest_access_slot: AtomicU64::new(0), }; invoke_context .programs_modified_by_tx @@ -4149,6 +4155,7 @@ mod tests { maybe_expiration_slot: None, tx_usage_counter: AtomicU64::new(100), ix_usage_counter: AtomicU64::new(100), + latest_access_slot: AtomicU64::new(0), }; invoke_context .programs_modified_by_tx diff --git a/programs/bpf_loader/src/serialization.rs b/programs/bpf_loader/src/serialization.rs index f9cbc2e752c54d..d4cbd09642f47c 100644 --- a/programs/bpf_loader/src/serialization.rs +++ b/programs/bpf_loader/src/serialization.rs @@ -11,6 +11,7 @@ use { solana_sdk::{ bpf_loader_deprecated, entrypoint::{BPF_ALIGN_OF_U128, MAX_PERMITTED_DATA_INCREASE, NON_DUP_MARKER}, + feature_set::FeatureSet, instruction::InstructionError, pubkey::Pubkey, system_instruction::MAX_PERMITTED_DATA_LENGTH, @@ -93,6 +94,7 @@ impl Serializer { fn write_account( &mut self, account: &mut BorrowedAccount<'_>, + feature_set: &FeatureSet, ) -> Result { let vm_data_addr = if self.copy_account_data { let vm_data_addr = self.vaddr.saturating_add(self.buffer.len() as u64); @@ -101,7 +103,7 @@ impl Serializer { } else { self.push_region(true); let vaddr = self.vaddr; - self.push_account_data_region(account)?; + self.push_account_data_region(account, feature_set)?; vaddr }; @@ -121,7 +123,7 @@ impl Serializer { .map_err(|_| InstructionError::InvalidArgument)?; self.region_start += BPF_ALIGN_OF_U128.saturating_sub(align_offset); // put the realloc padding in its own region - self.push_region(account.can_data_be_changed().is_ok()); + self.push_region(account.can_data_be_changed(feature_set).is_ok()); } } @@ -131,12 +133,13 @@ impl Serializer { fn push_account_data_region( &mut self, account: &mut BorrowedAccount<'_>, + feature_set: &FeatureSet, ) -> Result<(), InstructionError> { if !account.get_data().is_empty() { - let region = match account_data_region_memory_state(account) { + let region = match account_data_region_memory_state(account, feature_set) { MemoryState::Readable => MemoryRegion::new_readonly(account.get_data(), self.vaddr), MemoryState::Writable => { - MemoryRegion::new_writable(account.get_data_mut()?, self.vaddr) + MemoryRegion::new_writable(account.get_data_mut(feature_set)?, self.vaddr) } MemoryState::Cow(index_in_transaction) => { MemoryRegion::new_cow(account.get_data(), self.vaddr, index_in_transaction) @@ -191,6 +194,7 @@ pub fn serialize_parameters( transaction_context: &TransactionContext, instruction_context: &InstructionContext, copy_account_data: bool, + feature_set: &FeatureSet, ) -> Result< ( AlignedMemory, @@ -239,6 +243,7 @@ pub fn serialize_parameters( instruction_context.get_instruction_data(), &program_id, copy_account_data, + feature_set, ) } else { serialize_parameters_aligned( @@ -246,6 +251,7 @@ pub fn serialize_parameters( instruction_context.get_instruction_data(), &program_id, copy_account_data, + feature_set, ) } } @@ -256,6 +262,7 @@ pub fn deserialize_parameters( copy_account_data: bool, buffer: &[u8], accounts_metadata: &[SerializedAccountMetadata], + feature_set: &FeatureSet, ) -> Result<(), InstructionError> { let is_loader_deprecated = *instruction_context .try_borrow_last_program_account(transaction_context)? @@ -269,6 +276,7 @@ pub fn deserialize_parameters( copy_account_data, buffer, account_lengths, + feature_set, ) } else { deserialize_parameters_aligned( @@ -277,6 +285,7 @@ pub fn deserialize_parameters( copy_account_data, buffer, account_lengths, + feature_set, ) } } @@ -286,6 +295,7 @@ fn serialize_parameters_unaligned( instruction_data: &[u8], program_id: &Pubkey, copy_account_data: bool, + feature_set: &FeatureSet, ) -> Result< ( AlignedMemory, @@ -336,9 +346,9 @@ fn serialize_parameters_unaligned( let vm_key_addr = s.write_all(account.get_key().as_ref()); let vm_lamports_addr = s.write::(account.get_lamports().to_le()); s.write::((account.get_data().len() as u64).to_le()); - let vm_data_addr = s.write_account(&mut account)?; + let vm_data_addr = s.write_account(&mut account, feature_set)?; let vm_owner_addr = s.write_all(account.get_owner().as_ref()); - s.write::(account.is_executable() as u8); + s.write::(account.is_executable(feature_set) as u8); s.write::((account.get_rent_epoch()).to_le()); accounts_metadata.push(SerializedAccountMetadata { original_data_len: account.get_data().len(), @@ -364,6 +374,7 @@ pub fn deserialize_parameters_unaligned>( copy_account_data: bool, buffer: &[u8], account_lengths: I, + feature_set: &FeatureSet, ) -> Result<(), InstructionError> { let mut start = size_of::(); // number of accounts for (instruction_account_index, pre_len) in (0..instruction_context @@ -385,7 +396,7 @@ pub fn deserialize_parameters_unaligned>( .ok_or(InstructionError::InvalidArgument)?, ); if borrowed_account.get_lamports() != lamports { - borrowed_account.set_lamports(lamports)?; + borrowed_account.set_lamports(lamports, feature_set)?; } start += size_of::() // lamports + size_of::(); // data length @@ -396,9 +407,9 @@ pub fn deserialize_parameters_unaligned>( // The redundant check helps to avoid the expensive data comparison if we can match borrowed_account .can_data_be_resized(data.len()) - .and_then(|_| borrowed_account.can_data_be_changed()) + .and_then(|_| borrowed_account.can_data_be_changed(feature_set)) { - Ok(()) => borrowed_account.set_data_from_slice(data)?, + Ok(()) => borrowed_account.set_data_from_slice(data, feature_set)?, Err(err) if borrowed_account.get_data() != data => return Err(err), _ => {} } @@ -417,6 +428,7 @@ fn serialize_parameters_aligned( instruction_data: &[u8], program_id: &Pubkey, copy_account_data: bool, + feature_set: &FeatureSet, ) -> Result< ( AlignedMemory, @@ -466,13 +478,13 @@ fn serialize_parameters_aligned( s.write::(NON_DUP_MARKER); s.write::(borrowed_account.is_signer() as u8); s.write::(borrowed_account.is_writable() as u8); - s.write::(borrowed_account.is_executable() as u8); + s.write::(borrowed_account.is_executable(feature_set) as u8); s.write_all(&[0u8, 0, 0, 0]); let vm_key_addr = s.write_all(borrowed_account.get_key().as_ref()); let vm_owner_addr = s.write_all(borrowed_account.get_owner().as_ref()); let vm_lamports_addr = s.write::(borrowed_account.get_lamports().to_le()); s.write::((borrowed_account.get_data().len() as u64).to_le()); - let vm_data_addr = s.write_account(&mut borrowed_account)?; + let vm_data_addr = s.write_account(&mut borrowed_account, feature_set)?; s.write::((borrowed_account.get_rent_epoch()).to_le()); accounts_metadata.push(SerializedAccountMetadata { original_data_len: borrowed_account.get_data().len(), @@ -503,6 +515,7 @@ pub fn deserialize_parameters_aligned>( copy_account_data: bool, buffer: &[u8], account_lengths: I, + feature_set: &FeatureSet, ) -> Result<(), InstructionError> { let mut start = size_of::(); // number of accounts for (instruction_account_index, pre_len) in (0..instruction_context @@ -532,7 +545,7 @@ pub fn deserialize_parameters_aligned>( .ok_or(InstructionError::InvalidArgument)?, ); if borrowed_account.get_lamports() != lamports { - borrowed_account.set_lamports(lamports)?; + borrowed_account.set_lamports(lamports, feature_set)?; } start += size_of::(); // lamports let post_len = LittleEndian::read_u64( @@ -554,9 +567,9 @@ pub fn deserialize_parameters_aligned>( .ok_or(InstructionError::InvalidArgument)?; match borrowed_account .can_data_be_resized(post_len) - .and_then(|_| borrowed_account.can_data_be_changed()) + .and_then(|_| borrowed_account.can_data_be_changed(feature_set)) { - Ok(()) => borrowed_account.set_data_from_slice(data)?, + Ok(()) => borrowed_account.set_data_from_slice(data, feature_set)?, Err(err) if borrowed_account.get_data() != data => return Err(err), _ => {} } @@ -570,14 +583,14 @@ pub fn deserialize_parameters_aligned>( .ok_or(InstructionError::InvalidArgument)?; match borrowed_account .can_data_be_resized(post_len) - .and_then(|_| borrowed_account.can_data_be_changed()) + .and_then(|_| borrowed_account.can_data_be_changed(feature_set)) { Ok(()) => { - borrowed_account.set_data_length(post_len)?; + borrowed_account.set_data_length(post_len, feature_set)?; let allocated_bytes = post_len.saturating_sub(pre_len); if allocated_bytes > 0 { borrowed_account - .get_data_mut()? + .get_data_mut(feature_set)? .get_mut(pre_len..pre_len.saturating_add(allocated_bytes)) .ok_or(InstructionError::InvalidArgument)? .copy_from_slice( @@ -595,15 +608,18 @@ pub fn deserialize_parameters_aligned>( start += size_of::(); // rent_epoch if borrowed_account.get_owner().to_bytes() != owner { // Change the owner at the end so that we are allowed to change the lamports and data before - borrowed_account.set_owner(owner)?; + borrowed_account.set_owner(owner, feature_set)?; } } } Ok(()) } -pub(crate) fn account_data_region_memory_state(account: &BorrowedAccount<'_>) -> MemoryState { - if account.can_data_be_changed().is_ok() { +pub(crate) fn account_data_region_memory_state( + account: &BorrowedAccount<'_>, + feature_set: &FeatureSet, +) -> MemoryState { + if account.can_data_be_changed(feature_set).is_ok() { if account.is_shared() { MemoryState::Cow(account.get_index_in_transaction() as u64) } else { @@ -728,6 +744,7 @@ mod tests { invoke_context.transaction_context, instruction_context, copy_account_data, + &invoke_context.feature_set, ); assert_eq!( serialization_result.as_ref().err(), @@ -882,6 +899,7 @@ mod tests { invoke_context.transaction_context, instruction_context, copy_account_data, + &invoke_context.feature_set, ) .unwrap(); @@ -920,7 +938,7 @@ mod tests { assert_eq!(account.lamports(), account_info.lamports()); assert_eq!(account.data(), &account_info.data.borrow()[..]); assert_eq!(account.owner(), account_info.owner); - assert_eq!(account.executable(), account_info.executable); + assert!(account_info.executable); assert_eq!(account.rent_epoch(), account_info.rent_epoch); assert_eq!( @@ -943,6 +961,7 @@ mod tests { copy_account_data, serialized.as_slice(), &accounts_metadata, + &invoke_context.feature_set, ) .unwrap(); for (index_in_transaction, (_key, original_account)) in @@ -973,6 +992,7 @@ mod tests { invoke_context.transaction_context, instruction_context, copy_account_data, + &invoke_context.feature_set, ) .unwrap(); let mut serialized_regions = concat_regions(®ions); @@ -1003,7 +1023,7 @@ mod tests { assert_eq!(account.lamports(), account_info.lamports()); assert_eq!(account.data(), &account_info.data.borrow()[..]); assert_eq!(account.owner(), account_info.owner); - assert_eq!(account.executable(), account_info.executable); + assert!(account_info.executable); assert_eq!(account.rent_epoch(), account_info.rent_epoch); } @@ -1013,6 +1033,7 @@ mod tests { copy_account_data, serialized.as_slice(), &account_lengths, + &invoke_context.feature_set, ) .unwrap(); for (index_in_transaction, (_key, original_account)) in diff --git a/programs/bpf_loader/src/syscalls/cpi.rs b/programs/bpf_loader/src/syscalls/cpi.rs index c2264b95c294fc..b4368f2172e04f 100644 --- a/programs/bpf_loader/src/syscalls/cpi.rs +++ b/programs/bpf_loader/src/syscalls/cpi.rs @@ -8,7 +8,7 @@ use { memory_region::{MemoryRegion, MemoryState}, }, solana_sdk::{ - feature_set::enable_bpf_loader_set_authority_checked_ix, + feature_set::{enable_bpf_loader_set_authority_checked_ix, FeatureSet}, stable_layout::stable_instruction::StableInstruction, syscalls::{ MAX_CPI_ACCOUNT_INFOS, MAX_CPI_INSTRUCTION_ACCOUNTS, MAX_CPI_INSTRUCTION_DATA_LEN, @@ -95,10 +95,6 @@ struct CallerAccount<'a, 'b> { // the pointer field and ref_to_len_in_vm points to the length field. vm_data_addr: u64, ref_to_len_in_vm: VmValue<'b, 'a, u64>, - // To be removed once `feature_set::move_serialized_len_ptr_in_cpi` is active everywhere - serialized_len_ptr: *mut u64, - executable: bool, - rent_epoch: u64, } impl<'a, 'b> CallerAccount<'a, 'b> { @@ -106,7 +102,6 @@ impl<'a, 'b> CallerAccount<'a, 'b> { fn from_account_info( invoke_context: &InvokeContext, memory_mapping: &'b MemoryMapping<'a>, - is_disable_cpi_setting_executable_and_rent_epoch_active: bool, _vm_addr: u64, account_info: &AccountInfo, account_metadata: &SerializedAccountMetadata, @@ -156,7 +151,7 @@ impl<'a, 'b> CallerAccount<'a, 'b> { invoke_context.get_check_aligned(), )?; - let (serialized_data, vm_data_addr, ref_to_len_in_vm, serialized_len_ptr) = { + let (serialized_data, vm_data_addr, ref_to_len_in_vm) = { // Double translate data out of RefCell let data = *translate_type::<&[u8]>( memory_mapping, @@ -203,20 +198,6 @@ impl<'a, 'b> CallerAccount<'a, 'b> { )? as *mut u64; VmValue::Translated(unsafe { &mut *translated }) }; - let serialized_len_ptr = if invoke_context - .feature_set - .is_active(&feature_set::move_serialized_len_ptr_in_cpi::id()) - { - std::ptr::null_mut() - } else { - let ref_of_len_in_input_buffer = - (data.as_ptr() as *const _ as u64).saturating_sub(8); - translate_type_mut::( - memory_mapping, - ref_of_len_in_input_buffer, - invoke_context.get_check_aligned(), - )? - }; let vm_data_addr = data.as_ptr() as u64; let serialized_data = if direct_mapping { @@ -239,15 +220,9 @@ impl<'a, 'b> CallerAccount<'a, 'b> { vm_data_addr, data.len() as u64, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )? }; - ( - serialized_data, - vm_data_addr, - ref_to_len_in_vm, - serialized_len_ptr, - ) + (serialized_data, vm_data_addr, ref_to_len_in_vm) }; Ok(CallerAccount { @@ -257,17 +232,6 @@ impl<'a, 'b> CallerAccount<'a, 'b> { serialized_data, vm_data_addr, ref_to_len_in_vm, - serialized_len_ptr, - executable: if is_disable_cpi_setting_executable_and_rent_epoch_active { - false - } else { - account_info.executable - }, - rent_epoch: if is_disable_cpi_setting_executable_and_rent_epoch_active { - 0 - } else { - account_info.rent_epoch - }, }) } @@ -275,7 +239,6 @@ impl<'a, 'b> CallerAccount<'a, 'b> { fn from_sol_account_info( invoke_context: &InvokeContext, memory_mapping: &'b MemoryMapping<'a>, - is_disable_cpi_setting_executable_and_rent_epoch_active: bool, vm_addr: u64, account_info: &SolAccountInfo, account_metadata: &SerializedAccountMetadata, @@ -344,7 +307,6 @@ impl<'a, 'b> CallerAccount<'a, 'b> { account_info.data_addr, account_info.data_len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )? }; @@ -378,21 +340,6 @@ impl<'a, 'b> CallerAccount<'a, 'b> { VmValue::Translated(unsafe { &mut *(data_len_addr as *mut u64) }) }; - let ref_of_len_in_input_buffer = - (account_info.data_addr as *mut u8 as u64).saturating_sub(8); - let serialized_len_ptr = if invoke_context - .feature_set - .is_active(&feature_set::move_serialized_len_ptr_in_cpi::id()) - { - std::ptr::null_mut() - } else { - translate_type_mut::( - memory_mapping, - ref_of_len_in_input_buffer, - invoke_context.get_check_aligned(), - )? - }; - Ok(CallerAccount { lamports, owner, @@ -400,17 +347,6 @@ impl<'a, 'b> CallerAccount<'a, 'b> { serialized_data, vm_data_addr: account_info.data_addr, ref_to_len_in_vm, - serialized_len_ptr, - executable: if is_disable_cpi_setting_executable_and_rent_epoch_active { - false - } else { - account_info.executable - }, - rent_epoch: if is_disable_cpi_setting_executable_and_rent_epoch_active { - 0 - } else { - account_info.rent_epoch - }, }) } @@ -498,31 +434,21 @@ impl SyscallInvokeSigned for SyscallInvokeSignedRust { ix.accounts.as_ptr() as u64, ix.accounts.len() as u64, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; - let accounts = if invoke_context - .feature_set - .is_active(&feature_set::disable_cpi_setting_executable_and_rent_epoch::id()) - { - let mut accounts = Vec::with_capacity(ix.accounts.len()); - #[allow(clippy::needless_range_loop)] - for account_index in 0..ix.accounts.len() { - #[allow(clippy::indexing_slicing)] - let account_meta = &account_metas[account_index]; - if unsafe { - std::ptr::read_volatile(&account_meta.is_signer as *const _ as *const u8) > 1 - || std::ptr::read_volatile( - &account_meta.is_writable as *const _ as *const u8, - ) > 1 - } { - return Err(Box::new(InstructionError::InvalidArgument)); - } - accounts.push(account_meta.clone()); + let mut accounts = Vec::with_capacity(ix.accounts.len()); + #[allow(clippy::needless_range_loop)] + for account_index in 0..ix.accounts.len() { + #[allow(clippy::indexing_slicing)] + let account_meta = &account_metas[account_index]; + if unsafe { + std::ptr::read_volatile(&account_meta.is_signer as *const _ as *const u8) > 1 + || std::ptr::read_volatile(&account_meta.is_writable as *const _ as *const u8) + > 1 + } { + return Err(Box::new(InstructionError::InvalidArgument)); } - accounts - } else { - account_metas.to_vec() - }; + accounts.push(account_meta.clone()); + } let ix_data_len = ix.data.len() as u64; if invoke_context @@ -542,7 +468,6 @@ impl SyscallInvokeSigned for SyscallInvokeSignedRust { ix.data.as_ptr() as u64, ix_data_len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )? .to_vec(); @@ -597,7 +522,6 @@ impl SyscallInvokeSigned for SyscallInvokeSignedRust { signers_seeds_addr, signers_seeds_len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; if signers_seeds.len() > MAX_SIGNERS { return Err(Box::new(SyscallError::TooManySigners)); @@ -608,7 +532,6 @@ impl SyscallInvokeSigned for SyscallInvokeSignedRust { signer_seeds.as_ptr() as *const _ as u64, signer_seeds.len() as u64, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; if untranslated_seeds.len() > MAX_SEEDS { return Err(Box::new(InstructionError::MaxSeedLengthExceeded)); @@ -621,7 +544,6 @@ impl SyscallInvokeSigned for SyscallInvokeSignedRust { untranslated_seed.as_ptr() as *const _ as u64, untranslated_seed.len() as u64, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), ) }) .collect::, Error>>()?; @@ -740,7 +662,6 @@ impl SyscallInvokeSigned for SyscallInvokeSignedC { ix_c.accounts_addr, ix_c.accounts_len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; let ix_data_len = ix_c.data_len; @@ -761,56 +682,32 @@ impl SyscallInvokeSigned for SyscallInvokeSignedC { ix_c.data_addr, ix_data_len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )? .to_vec(); - let accounts = if invoke_context - .feature_set - .is_active(&feature_set::disable_cpi_setting_executable_and_rent_epoch::id()) - { - let mut accounts = Vec::with_capacity(ix_c.accounts_len as usize); - #[allow(clippy::needless_range_loop)] - for account_index in 0..ix_c.accounts_len as usize { - #[allow(clippy::indexing_slicing)] - let account_meta = &account_metas[account_index]; - if unsafe { - std::ptr::read_volatile(&account_meta.is_signer as *const _ as *const u8) > 1 - || std::ptr::read_volatile( - &account_meta.is_writable as *const _ as *const u8, - ) > 1 - } { - return Err(Box::new(InstructionError::InvalidArgument)); - } - let pubkey = translate_type::( - memory_mapping, - account_meta.pubkey_addr, - invoke_context.get_check_aligned(), - )?; - accounts.push(AccountMeta { - pubkey: *pubkey, - is_signer: account_meta.is_signer, - is_writable: account_meta.is_writable, - }); + let mut accounts = Vec::with_capacity(ix_c.accounts_len as usize); + #[allow(clippy::needless_range_loop)] + for account_index in 0..ix_c.accounts_len as usize { + #[allow(clippy::indexing_slicing)] + let account_meta = &account_metas[account_index]; + if unsafe { + std::ptr::read_volatile(&account_meta.is_signer as *const _ as *const u8) > 1 + || std::ptr::read_volatile(&account_meta.is_writable as *const _ as *const u8) + > 1 + } { + return Err(Box::new(InstructionError::InvalidArgument)); } - accounts - } else { - account_metas - .iter() - .map(|account_meta| { - let pubkey = translate_type::( - memory_mapping, - account_meta.pubkey_addr, - invoke_context.get_check_aligned(), - )?; - Ok(AccountMeta { - pubkey: *pubkey, - is_signer: account_meta.is_signer, - is_writable: account_meta.is_writable, - }) - }) - .collect::, Error>>()? - }; + let pubkey = translate_type::( + memory_mapping, + account_meta.pubkey_addr, + invoke_context.get_check_aligned(), + )?; + accounts.push(AccountMeta { + pubkey: *pubkey, + is_signer: account_meta.is_signer, + is_writable: account_meta.is_writable, + }); + } Ok(StableInstruction { accounts: accounts.into(), @@ -862,7 +759,6 @@ impl SyscallInvokeSigned for SyscallInvokeSignedC { signers_seeds_addr, signers_seeds_len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; if signers_seeds.len() > MAX_SIGNERS { return Err(Box::new(SyscallError::TooManySigners)); @@ -875,7 +771,6 @@ impl SyscallInvokeSigned for SyscallInvokeSignedC { signer_seeds.addr, signer_seeds.len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; if seeds.len() > MAX_SEEDS { return Err(Box::new(InstructionError::MaxSeedLengthExceeded) as Error); @@ -888,7 +783,6 @@ impl SyscallInvokeSigned for SyscallInvokeSignedC { seed.addr, seed.len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), ) }) .collect::, Error>>()?; @@ -917,37 +811,19 @@ where account_infos_addr, account_infos_len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; check_account_infos(account_infos.len(), invoke_context)?; - let account_info_keys = if invoke_context - .feature_set - .is_active(&feature_set::disable_cpi_setting_executable_and_rent_epoch::id()) - { - let mut account_info_keys = Vec::with_capacity(account_infos_len as usize); - #[allow(clippy::needless_range_loop)] - for account_index in 0..account_infos_len as usize { - #[allow(clippy::indexing_slicing)] - let account_info = &account_infos[account_index]; - account_info_keys.push(translate_type::( - memory_mapping, - key_addr(account_info), - invoke_context.get_check_aligned(), - )?); - } - account_info_keys - } else { - account_infos - .iter() - .map(|account_info| { - translate_type::( - memory_mapping, - key_addr(account_info), - invoke_context.get_check_aligned(), - ) - }) - .collect::, Error>>()? - }; + let mut account_info_keys = Vec::with_capacity(account_infos_len as usize); + #[allow(clippy::needless_range_loop)] + for account_index in 0..account_infos_len as usize { + #[allow(clippy::indexing_slicing)] + let account_info = &account_infos[account_index]; + account_info_keys.push(translate_type::( + memory_mapping, + key_addr(account_info), + invoke_context.get_check_aligned(), + )?); + } Ok((account_infos, account_info_keys)) } @@ -968,7 +844,6 @@ where F: Fn( &InvokeContext, &'b MemoryMapping<'a>, - bool, u64, &T, &SerializedAccountMetadata, @@ -977,9 +852,6 @@ where let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; let mut accounts = Vec::with_capacity(instruction_accounts.len().saturating_add(1)); - let is_disable_cpi_setting_executable_and_rent_epoch_active = invoke_context - .feature_set - .is_active(&disable_cpi_setting_executable_and_rent_epoch::id()); let program_account_index = program_indices .last() @@ -1011,7 +883,7 @@ where .transaction_context .get_key_of_account_at_index(instruction_account.index_in_transaction)?; - if callee_account.is_executable() { + if callee_account.is_executable(&invoke_context.feature_set) { // Use the known account consume_compute_meter( invoke_context, @@ -1044,7 +916,6 @@ where do_translate( invoke_context, memory_mapping, - is_disable_cpi_setting_executable_and_rent_epoch_active, account_infos_addr.saturating_add( caller_account_index.saturating_mul(mem::size_of::()) as u64, ), @@ -1268,6 +1139,7 @@ fn cpi_common( caller_account, &callee_account, is_loader_deprecated, + &invoke_context.feature_set, )?; } } @@ -1307,11 +1179,8 @@ fn update_callee_account( mut callee_account: BorrowedAccount<'_>, direct_mapping: bool, ) -> Result<(), Error> { - let is_disable_cpi_setting_executable_and_rent_epoch_active = invoke_context - .feature_set - .is_active(&disable_cpi_setting_executable_and_rent_epoch::id()); if callee_account.get_lamports() != *caller_account.lamports { - callee_account.set_lamports(*caller_account.lamports)?; + callee_account.set_lamports(*caller_account.lamports, &invoke_context.feature_set)?; } if direct_mapping { @@ -1319,7 +1188,7 @@ fn update_callee_account( let post_len = *caller_account.ref_to_len_in_vm.get()? as usize; match callee_account .can_data_be_resized(post_len) - .and_then(|_| callee_account.can_data_be_changed()) + .and_then(|_| callee_account.can_data_be_changed(&invoke_context.feature_set)) { Ok(()) => { let realloc_bytes_used = post_len.saturating_sub(caller_account.original_data_len); @@ -1327,7 +1196,7 @@ fn update_callee_account( if is_loader_deprecated && realloc_bytes_used > 0 { return Err(InstructionError::InvalidRealloc.into()); } - callee_account.set_data_length(post_len)?; + callee_account.set_data_length(post_len, &invoke_context.feature_set)?; if realloc_bytes_used > 0 { let serialized_data = translate_slice::( memory_mapping, @@ -1336,10 +1205,9 @@ fn update_callee_account( .saturating_add(caller_account.original_data_len as u64), realloc_bytes_used as u64, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; callee_account - .get_data_mut()? + .get_data_mut(&invoke_context.feature_set)? .get_mut(caller_account.original_data_len..post_len) .ok_or(SyscallError::InvalidLength)? .copy_from_slice(serialized_data); @@ -1354,9 +1222,10 @@ fn update_callee_account( // The redundant check helps to avoid the expensive data comparison if we can match callee_account .can_data_be_resized(caller_account.serialized_data.len()) - .and_then(|_| callee_account.can_data_be_changed()) + .and_then(|_| callee_account.can_data_be_changed(&invoke_context.feature_set)) { - Ok(()) => callee_account.set_data_from_slice(caller_account.serialized_data)?, + Ok(()) => callee_account + .set_data_from_slice(caller_account.serialized_data, &invoke_context.feature_set)?, Err(err) if callee_account.get_data() != caller_account.serialized_data => { return Err(Box::new(err)); } @@ -1364,28 +1233,9 @@ fn update_callee_account( } } - if !is_disable_cpi_setting_executable_and_rent_epoch_active - && callee_account.is_executable() != caller_account.executable - { - callee_account.set_executable(caller_account.executable)?; - } - // Change the owner at the end so that we are allowed to change the lamports and data before if callee_account.get_owner() != caller_account.owner { - callee_account.set_owner(caller_account.owner.as_ref())?; - } - - // BorrowedAccount doesn't allow changing the rent epoch. Drop it and use - // AccountSharedData directly. - let index_in_transaction = callee_account.get_index_in_transaction(); - drop(callee_account); - let callee_account = invoke_context - .transaction_context - .get_account_at_index(index_in_transaction)?; - if !is_disable_cpi_setting_executable_and_rent_epoch_active - && callee_account.borrow().rent_epoch() != caller_account.rent_epoch - { - return Err(Box::new(InstructionError::RentEpochModified)); + callee_account.set_owner(caller_account.owner.as_ref(), &invoke_context.feature_set)?; } Ok(()) @@ -1396,6 +1246,7 @@ fn update_caller_account_perms( caller_account: &CallerAccount, callee_account: &BorrowedAccount<'_>, is_loader_deprecated: bool, + feature_set: &FeatureSet, ) -> Result<(), Error> { let CallerAccount { original_data_len, @@ -1405,9 +1256,10 @@ fn update_caller_account_perms( let data_region = account_data_region(memory_mapping, *vm_data_addr, *original_data_len)?; if let Some(region) = data_region { - region - .state - .set(account_data_region_memory_state(callee_account)); + region.state.set(account_data_region_memory_state( + callee_account, + feature_set, + )); } let realloc_region = account_realloc_region( memory_mapping, @@ -1418,7 +1270,7 @@ fn update_caller_account_perms( if let Some(region) = realloc_region { region .state - .set(if callee_account.can_data_be_changed().is_ok() { + .set(if callee_account.can_data_be_changed(feature_set).is_ok() { MemoryState::Writable } else { MemoryState::Readable @@ -1454,21 +1306,28 @@ fn update_caller_account( caller_account.vm_data_addr, caller_account.original_data_len, )? { - // Since each instruction account is directly mapped in a memory region - // with a *fixed* length, upon returning from CPI we must ensure that the - // current capacity is at least the original length (what is mapped in - // memory), so that the account's memory region never points to an - // invalid address. + // Since each instruction account is directly mapped in a memory region with a *fixed* + // length, upon returning from CPI we must ensure that the current capacity is at least + // the original length (what is mapped in memory), so that the account's memory region + // never points to an invalid address. + // + // Note that the capacity can be smaller than the original length only if the account is + // reallocated using the AccountSharedData API directly (deprecated). BorrowedAccount + // and CoW don't trigger this, see BorrowedAccount::make_data_mut. let min_capacity = caller_account.original_data_len; if callee_account.capacity() < min_capacity { - callee_account.reserve(min_capacity.saturating_sub(callee_account.capacity()))?; + callee_account + .reserve(min_capacity.saturating_sub(callee_account.get_data().len()))?; zero_all_mapped_spare_capacity = true; } - // If an account's data pointer has changed - because of CoW, reserve() as called above - // or because of using AccountSharedData directly (deprecated) - we must update the - // corresponding MemoryRegion in the caller's address space. Address spaces are fixed so - // we don't need to update the MemoryRegion's length. + // If an account's data pointer has changed we must update the corresponding + // MemoryRegion in the caller's address space. Address spaces are fixed so we don't need + // to update the MemoryRegion's length. + // + // An account's data pointer can change if the account is reallocated because of CoW, + // because of BorrowedAccount::make_data_mut or by a program that uses the + // AccountSharedData API directly (deprecated). let callee_ptr = callee_account.get_data().as_ptr() as u64; if region.host_addr.get() != callee_ptr { region.host_addr.set(callee_ptr); @@ -1479,7 +1338,6 @@ fn update_caller_account( let prev_len = *caller_account.ref_to_len_in_vm.get()? as usize; let post_len = callee_account.get_data().len(); - let realloc_bytes_used = post_len.saturating_sub(caller_account.original_data_len); if prev_len != post_len { let max_increase = if direct_mapping && !invoke_context.get_check_aligned() { 0 @@ -1503,37 +1361,8 @@ fn update_caller_account( if post_len < prev_len { if direct_mapping { // We have two separate regions to zero out: the account data - // and the realloc region. - // - // Here we zero the account data region. - let spare_len = if zero_all_mapped_spare_capacity { - // In the unlikely case where the account data vector has - // changed - which can happen during CoW - we zero the whole - // extra capacity up to the original data length. - // - // The extra capacity up to original data length is - // accessible from the vm and since it's uninitialized - // memory, it could be a source of non determinism. - caller_account.original_data_len - } else { - // If the allocation has not changed, we only zero the - // difference between the previous and current lengths. The - // rest of the memory contains whatever it contained before, - // which is deterministic. - prev_len - } - .saturating_sub(post_len); - if spare_len > 0 { - let dst = callee_account - .spare_data_capacity_mut()? - .get_mut(..spare_len) - .ok_or_else(|| Box::new(InstructionError::AccountDataTooSmall))? - .as_mut_ptr(); - // Safety: we check bounds above - unsafe { ptr::write_bytes(dst, 0, spare_len) }; - } - - // Here we zero the realloc region. + // and the realloc region. Here we zero the realloc region, the + // data region is zeroed further down below. // // This is done for compatibility but really only necessary for // the fringe case of a program calling itself, see @@ -1580,7 +1409,6 @@ fn update_caller_account( .saturating_add(dirty_realloc_start as u64), dirty_realloc_len as u64, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; serialized_data.fill(0); } @@ -1601,32 +1429,102 @@ fn update_caller_account( caller_account.vm_data_addr, post_len as u64, false, // Don't care since it is byte aligned - invoke_context.get_check_size(), )?; } // this is the len field in the AccountInfo::data slice *caller_account.ref_to_len_in_vm.get_mut()? = post_len as u64; // this is the len field in the serialized parameters - if invoke_context - .feature_set - .is_active(&feature_set::move_serialized_len_ptr_in_cpi::id()) - { - let serialized_len_ptr = translate_type_mut::( - memory_mapping, - caller_account - .vm_data_addr - .saturating_sub(std::mem::size_of::() as u64), - invoke_context.get_check_aligned(), - )?; - *serialized_len_ptr = post_len as u64; + let serialized_len_ptr = translate_type_mut::( + memory_mapping, + caller_account + .vm_data_addr + .saturating_sub(std::mem::size_of::() as u64), + invoke_context.get_check_aligned(), + )?; + *serialized_len_ptr = post_len as u64; + } + + if direct_mapping { + // Here we zero the account data region. + // + // If zero_all_mapped_spare_capacity=true, we need to zero regardless of whether the account + // size changed, because the underlying vector holding the account might have been + // reallocated and contain uninitialized memory in the spare capacity. + // + // See TEST_CPI_CHANGE_ACCOUNT_DATA_MEMORY_ALLOCATION for an example of + // this case. + let spare_len = if zero_all_mapped_spare_capacity { + // In the unlikely case where the account data vector has + // changed - which can happen during CoW - we zero the whole + // extra capacity up to the original data length. + // + // The extra capacity up to original data length is + // accessible from the vm and since it's uninitialized + // memory, it could be a source of non determinism. + caller_account.original_data_len } else { - unsafe { - *caller_account.serialized_len_ptr = post_len as u64; + // If the allocation has not changed, we only zero the + // difference between the previous and current lengths. The + // rest of the memory contains whatever it contained before, + // which is deterministic. + prev_len + } + .saturating_sub(post_len); + + if spare_len > 0 { + let dst = callee_account + .spare_data_capacity_mut()? + .get_mut(..spare_len) + .ok_or_else(|| Box::new(InstructionError::AccountDataTooSmall))? + .as_mut_ptr(); + // Safety: we check bounds above + unsafe { ptr::write_bytes(dst, 0, spare_len) }; + } + + // Propagate changes to the realloc region in the callee up to the caller. + let realloc_bytes_used = post_len.saturating_sub(caller_account.original_data_len); + if realloc_bytes_used > 0 { + // In the is_loader_deprecated case, we must have failed with + // InvalidRealloc by now. + debug_assert!(!is_loader_deprecated); + + let to_slice = { + // If a callee reallocs an account, we write into the caller's + // realloc region regardless of whether the caller has write + // permissions to the account or not. If the callee has been able to + // make changes, it means they had permissions to do so, and here + // we're just going to reflect those changes to the caller's frame. + // + // Therefore we temporarily configure the realloc region as writable + // then set it back to whatever state it had. + let realloc_region = caller_account + .realloc_region(memory_mapping, is_loader_deprecated)? + .unwrap(); // unwrapping here is fine, we asserted !is_loader_deprecated + let original_state = realloc_region.state.replace(MemoryState::Writable); + defer! { + realloc_region.state.set(original_state); + }; + + translate_slice_mut::( + memory_mapping, + caller_account + .vm_data_addr + .saturating_add(caller_account.original_data_len as u64), + realloc_bytes_used as u64, + invoke_context.get_check_aligned(), + )? + }; + let from_slice = callee_account + .get_data() + .get(caller_account.original_data_len..post_len) + .ok_or(SyscallError::InvalidLength)?; + if to_slice.len() != from_slice.len() { + return Err(Box::new(InstructionError::AccountDataTooSmall)); } + to_slice.copy_from_slice(from_slice); } - } - if !direct_mapping { + } else { let to_slice = &mut caller_account.serialized_data; let from_slice = callee_account .get_data() @@ -1636,46 +1534,6 @@ fn update_caller_account( return Err(Box::new(InstructionError::AccountDataTooSmall)); } to_slice.copy_from_slice(from_slice); - } else if realloc_bytes_used > 0 { - // In the is_loader_deprecated case, we must have failed with - // InvalidRealloc by now. - debug_assert!(!is_loader_deprecated); - - let to_slice = { - // If a callee reallocs an account, we write into the caller's - // realloc region regardless of whether the caller has write - // permissions to the account or not. If the callee has been able to - // make changes, it means they had permissions to do so, and here - // we're just going to reflect those changes to the caller's frame. - // - // Therefore we temporarily configure the realloc region as writable - // then set it back to whatever state it had. - let realloc_region = caller_account - .realloc_region(memory_mapping, is_loader_deprecated)? - .unwrap(); // unwrapping here is fine, we asserted !is_loader_deprecated - let original_state = realloc_region.state.replace(MemoryState::Writable); - defer! { - realloc_region.state.set(original_state); - }; - - translate_slice_mut::( - memory_mapping, - caller_account - .vm_data_addr - .saturating_add(caller_account.original_data_len as u64), - realloc_bytes_used as u64, - invoke_context.get_check_aligned(), - invoke_context.get_check_size(), - )? - }; - let from_slice = callee_account - .get_data() - .get(caller_account.original_data_len..post_len) - .ok_or(SyscallError::InvalidLength)?; - if to_slice.len() != from_slice.len() { - return Err(Box::new(InstructionError::AccountDataTooSmall)); - } - to_slice.copy_from_slice(from_slice); } Ok(()) @@ -1733,7 +1591,7 @@ mod tests { ebpf::MM_INPUT_START, memory_region::MemoryRegion, program::SBPFVersion, vm::Config, }, solana_sdk::{ - account::{Account, AccountSharedData}, + account::{Account, AccountSharedData, ReadableAccount}, clock::Epoch, feature_set::bpf_account_data_direct_mapping, instruction::Instruction, @@ -1909,7 +1767,6 @@ mod tests { let caller_account = CallerAccount::from_account_info( &invoke_context, &memory_mapping, - false, vm_addr, account_info, &account_metadata, @@ -1923,8 +1780,6 @@ mod tests { account.data().len() ); assert_eq!(caller_account.serialized_data, account.data()); - assert_eq!(caller_account.executable, account.executable()); - assert_eq!(caller_account.rent_epoch, account.rent_epoch()); } #[test] @@ -1963,9 +1818,11 @@ mod tests { let mut callee_account = borrow_instruction_account!(invoke_context, 0); - callee_account.set_lamports(42).unwrap(); callee_account - .set_owner(Pubkey::new_unique().as_ref()) + .set_lamports(42, &invoke_context.feature_set) + .unwrap(); + callee_account + .set_owner(Pubkey::new_unique().as_ref(), &invoke_context.feature_set) .unwrap(); update_caller_account( @@ -2034,7 +1891,9 @@ mod tests { (b"foobazbad".to_vec(), MAX_PERMITTED_DATA_INCREASE - 3), ] { assert_eq!(caller_account.serialized_data, callee_account.get_data()); - callee_account.set_data_from_slice(&new_value).unwrap(); + callee_account + .set_data_from_slice(&new_value, &invoke_context.feature_set) + .unwrap(); update_caller_account( &invoke_context, @@ -2062,7 +1921,10 @@ mod tests { } callee_account - .set_data_length(original_data_len + MAX_PERMITTED_DATA_INCREASE) + .set_data_length( + original_data_len + MAX_PERMITTED_DATA_INCREASE, + &invoke_context.feature_set, + ) .unwrap(); update_caller_account( &invoke_context, @@ -2078,7 +1940,10 @@ mod tests { assert!(is_zeroed(&data_slice[data_len..])); callee_account - .set_data_length(original_data_len + MAX_PERMITTED_DATA_INCREASE + 1) + .set_data_length( + original_data_len + MAX_PERMITTED_DATA_INCREASE + 1, + &invoke_context.feature_set, + ) .unwrap(); assert_matches!( update_caller_account( @@ -2093,9 +1958,11 @@ mod tests { ); // close the account - callee_account.set_data_length(0).unwrap(); callee_account - .set_owner(system_program::id().as_ref()) + .set_data_length(0, &invoke_context.feature_set) + .unwrap(); + callee_account + .set_owner(system_program::id().as_ref(), &invoke_context.feature_set) .unwrap(); update_caller_account( &invoke_context, @@ -2164,9 +2031,13 @@ mod tests { (vec![], 0), // check lower bound ] { if change_ptr { - callee_account.set_data(new_value).unwrap(); + callee_account + .set_data(new_value, &invoke_context.feature_set) + .unwrap(); } else { - callee_account.set_data_from_slice(&new_value).unwrap(); + callee_account + .set_data_from_slice(&new_value, &invoke_context.feature_set) + .unwrap(); } update_caller_account( @@ -2181,15 +2052,9 @@ mod tests { // check that the caller account data pointer always matches the callee account data pointer assert_eq!( - translate_slice::( - &memory_mapping, - caller_account.vm_data_addr, - 1, - true, - true - ) - .unwrap() - .as_ptr(), + translate_slice::(&memory_mapping, caller_account.vm_data_addr, 1, true,) + .unwrap() + .as_ptr(), callee_account.get_data().as_ptr() ); @@ -2209,7 +2074,6 @@ mod tests { .saturating_add(caller_account.original_data_len as u64), MAX_PERMITTED_DATA_INCREASE as u64, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), ) .unwrap(); @@ -2243,7 +2107,10 @@ mod tests { } callee_account - .set_data_length(original_data_len + MAX_PERMITTED_DATA_INCREASE) + .set_data_length( + original_data_len + MAX_PERMITTED_DATA_INCREASE, + &invoke_context.feature_set, + ) .unwrap(); update_caller_account( &invoke_context, @@ -2261,7 +2128,10 @@ mod tests { ); callee_account - .set_data_length(original_data_len + MAX_PERMITTED_DATA_INCREASE + 1) + .set_data_length( + original_data_len + MAX_PERMITTED_DATA_INCREASE + 1, + &invoke_context.feature_set, + ) .unwrap(); assert_matches!( update_caller_account( @@ -2276,9 +2146,11 @@ mod tests { ); // close the account - callee_account.set_data_length(0).unwrap(); callee_account - .set_owner(system_program::id().as_ref()) + .set_data_length(0, &invoke_context.feature_set) + .unwrap(); + callee_account + .set_owner(system_program::id().as_ref(), &invoke_context.feature_set) .unwrap(); update_caller_account( &invoke_context, @@ -2359,7 +2231,6 @@ mod tests { caller_account.vm_data_addr, callee_account.get_data().len() as u64, true, - true, ) .unwrap(); assert_eq!(data, callee_account.get_data()); @@ -2622,7 +2493,9 @@ mod tests { // this is done when a writable account is mapped, and it ensures // through make_data_mut() that the account is made writable and resized // with enough padding to hold the realloc padding - callee_account.get_data_mut().unwrap(); + callee_account + .get_data_mut(&invoke_context.feature_set) + .unwrap(); let serialized_data = translate_slice_mut::( &memory_mapping, @@ -2631,7 +2504,6 @@ mod tests { .saturating_add(caller_account.original_data_len as u64), 3, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), ) .unwrap(); serialized_data.copy_from_slice(b"baz"); @@ -2830,9 +2702,6 @@ mod tests { serialized_data: data, vm_data_addr: self.vm_addr + mem::size_of::() as u64, ref_to_len_in_vm: VmValue::Translated(&mut self.len), - serialized_len_ptr: std::ptr::null_mut(), - executable: false, - rent_epoch: 0, } } } diff --git a/programs/bpf_loader/src/syscalls/logging.rs b/programs/bpf_loader/src/syscalls/logging.rs index c5faf0a1057fde..fd3994fd88ee75 100644 --- a/programs/bpf_loader/src/syscalls/logging.rs +++ b/programs/bpf_loader/src/syscalls/logging.rs @@ -23,7 +23,6 @@ declare_builtin_function!( addr, len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), invoke_context .feature_set .is_active(&stop_truncating_strings_in_syscalls::id()), @@ -129,7 +128,6 @@ declare_builtin_function!( addr, len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; consume_compute_meter( @@ -153,7 +151,6 @@ declare_builtin_function!( untranslated_field.as_ptr() as *const _ as u64, untranslated_field.len() as u64, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?); } diff --git a/programs/bpf_loader/src/syscalls/mem_ops.rs b/programs/bpf_loader/src/syscalls/mem_ops.rs index 9354270ac2f0b7..f02f1935d96c9f 100644 --- a/programs/bpf_loader/src/syscalls/mem_ops.rs +++ b/programs/bpf_loader/src/syscalls/mem_ops.rs @@ -84,14 +84,12 @@ declare_builtin_function!( s1_addr, n, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; let s2 = translate_slice::( memory_mapping, s2_addr, n, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; let cmp_result = translate_type_mut::( memory_mapping, @@ -137,7 +135,6 @@ declare_builtin_function!( dst_addr, n, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; s.fill(c as u8); Ok(0) @@ -163,7 +160,6 @@ fn memmove( dst_addr, n, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )? .as_mut_ptr(); let src_ptr = translate_slice::( @@ -171,7 +167,6 @@ fn memmove( src_addr, n, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )? .as_ptr(); @@ -221,6 +216,21 @@ fn memcmp_non_contiguous( n: u64, memory_mapping: &MemoryMapping, ) -> Result { + let memcmp_chunk = |s1_addr, s2_addr, chunk_len| { + let res = unsafe { + let s1 = slice::from_raw_parts(s1_addr, chunk_len); + let s2 = slice::from_raw_parts(s2_addr, chunk_len); + // Safety: + // memcmp is marked unsafe since it assumes that s1 and s2 are exactly chunk_len + // long. The whole point of iter_memory_pair_chunks is to find same length chunks + // across two memory regions. + memcmp(s1, s2, chunk_len) + }; + if res != 0 { + return Err(MemcmpError::Diff(res).into()); + } + Ok(0) + }; match iter_memory_pair_chunks( AccessType::Load, src_addr, @@ -229,21 +239,7 @@ fn memcmp_non_contiguous( n, memory_mapping, false, - |s1_addr, s2_addr, chunk_len| { - let res = unsafe { - let s1 = slice::from_raw_parts(s1_addr, chunk_len); - let s2 = slice::from_raw_parts(s2_addr, chunk_len); - // Safety: - // memcmp is marked unsafe since it assumes that s1 and s2 are exactly chunk_len - // long. The whole point of iter_memory_pair_chunks is to find same length chunks - // across two memory regions. - memcmp(s1, s2, chunk_len) - }; - if res != 0 { - return Err(MemcmpError::Diff(res).into()); - } - Ok(0) - }, + memcmp_chunk, ) { Ok(res) => Ok(res), Err(error) => match error.downcast_ref() { @@ -294,8 +290,8 @@ fn iter_memory_pair_chunks( src_access: AccessType, src_addr: u64, dst_access: AccessType, - mut dst_addr: u64, - n: u64, + dst_addr: u64, + n_bytes: u64, memory_mapping: &MemoryMapping, reverse: bool, mut fun: F, @@ -304,52 +300,90 @@ where T: Default, F: FnMut(*const u8, *const u8, usize) -> Result, { - let mut src_chunk_iter = MemoryChunkIterator::new(memory_mapping, src_access, src_addr, n) - .map_err(EbpfError::from)?; - loop { - // iterate source chunks - let (src_region, src_vm_addr, mut src_len) = match if reverse { - src_chunk_iter.next_back() - } else { - src_chunk_iter.next() - } { - Some(item) => item?, - None => break, - }; - - let mut src_host_addr = Result::from(src_region.vm_to_host(src_vm_addr, src_len as u64))?; - let mut dst_chunk_iter = MemoryChunkIterator::new(memory_mapping, dst_access, dst_addr, n) + let mut src_chunk_iter = + MemoryChunkIterator::new(memory_mapping, src_access, src_addr, n_bytes) .map_err(EbpfError::from)?; - // iterate over destination chunks until this source chunk has been completely copied - while src_len > 0 { - loop { - let (dst_region, dst_vm_addr, dst_len) = match if reverse { - dst_chunk_iter.next_back() + let mut dst_chunk_iter = + MemoryChunkIterator::new(memory_mapping, dst_access, dst_addr, n_bytes) + .map_err(EbpfError::from)?; + + let mut src_chunk = None; + let mut dst_chunk = None; + + macro_rules! memory_chunk { + ($chunk_iter:ident, $chunk:ident) => { + if let Some($chunk) = &mut $chunk { + // Keep processing the current chunk + $chunk + } else { + // This is either the first call or we've processed all the bytes in the current + // chunk. Move to the next one. + let chunk = match if reverse { + $chunk_iter.next_back() } else { - dst_chunk_iter.next() + $chunk_iter.next() } { Some(item) => item?, None => break, }; - let dst_host_addr = - Result::from(dst_region.vm_to_host(dst_vm_addr, dst_len as u64))?; - let chunk_len = src_len.min(dst_len); - fun( - src_host_addr as *const u8, - dst_host_addr as *const u8, - chunk_len, - )?; - src_len = src_len.saturating_sub(chunk_len); - if reverse { - dst_addr = dst_addr.saturating_sub(chunk_len as u64); - } else { - dst_addr = dst_addr.saturating_add(chunk_len as u64); - } - if src_len == 0 { - break; - } - src_host_addr = src_host_addr.saturating_add(chunk_len as u64); + $chunk.insert(chunk) } + }; + } + + loop { + let (src_region, src_chunk_addr, src_remaining) = memory_chunk!(src_chunk_iter, src_chunk); + let (dst_region, dst_chunk_addr, dst_remaining) = memory_chunk!(dst_chunk_iter, dst_chunk); + + // We always process same-length pairs + let chunk_len = *src_remaining.min(dst_remaining); + + let (src_host_addr, dst_host_addr) = { + let (src_addr, dst_addr) = if reverse { + // When scanning backwards not only we want to scan regions from the end, + // we want to process the memory within regions backwards as well. + ( + src_chunk_addr + .saturating_add(*src_remaining as u64) + .saturating_sub(chunk_len as u64), + dst_chunk_addr + .saturating_add(*dst_remaining as u64) + .saturating_sub(chunk_len as u64), + ) + } else { + (*src_chunk_addr, *dst_chunk_addr) + }; + + ( + Result::from(src_region.vm_to_host(src_addr, chunk_len as u64))?, + Result::from(dst_region.vm_to_host(dst_addr, chunk_len as u64))?, + ) + }; + + fun( + src_host_addr as *const u8, + dst_host_addr as *const u8, + chunk_len, + )?; + + // Update how many bytes we have left to scan in each chunk + *src_remaining = src_remaining.saturating_sub(chunk_len); + *dst_remaining = dst_remaining.saturating_sub(chunk_len); + + if !reverse { + // We've scanned `chunk_len` bytes so we move the vm address forward. In reverse + // mode we don't do this since we make progress by decreasing src_len and + // dst_len. + *src_chunk_addr = src_chunk_addr.saturating_add(chunk_len as u64); + *dst_chunk_addr = dst_chunk_addr.saturating_add(chunk_len as u64); + } + + if *src_remaining == 0 { + src_chunk = None; + } + + if *dst_remaining == 0 { + dst_chunk = None; } } @@ -476,11 +510,13 @@ impl<'a> DoubleEndedIterator for MemoryChunkIterator<'a> { #[cfg(test)] #[allow(clippy::indexing_slicing)] +#[allow(clippy::arithmetic_side_effects)] mod tests { use { super::*, assert_matches::assert_matches, solana_rbpf::{ebpf::MM_PROGRAM_START, program::SBPFVersion}, + test_case::test_case, }; fn to_chunk_vec<'a>( @@ -739,72 +775,59 @@ mod tests { memmove_non_contiguous(MM_PROGRAM_START, MM_PROGRAM_START + 8, 4, &memory_mapping).unwrap(); } - #[test] - fn test_overlapping_memmove_non_contiguous_right() { + #[test_case(&[], (0, 0, 0); "no regions")] + #[test_case(&[10], (1, 10, 0); "single region 0 len")] + #[test_case(&[10], (0, 5, 5); "single region no overlap")] + #[test_case(&[10], (0, 0, 10) ; "single region complete overlap")] + #[test_case(&[10], (2, 0, 5); "single region partial overlap start")] + #[test_case(&[10], (0, 1, 6); "single region partial overlap middle")] + #[test_case(&[10], (2, 5, 5); "single region partial overlap end")] + #[test_case(&[3, 5], (0, 5, 2) ; "two regions no overlap, single source region")] + #[test_case(&[4, 7], (0, 5, 5) ; "two regions no overlap, multiple source regions")] + #[test_case(&[3, 8], (0, 0, 11) ; "two regions complete overlap")] + #[test_case(&[2, 9], (3, 0, 5) ; "two regions partial overlap start")] + #[test_case(&[3, 9], (1, 2, 5) ; "two regions partial overlap middle")] + #[test_case(&[7, 3], (2, 6, 4) ; "two regions partial overlap end")] + #[test_case(&[2, 6, 3, 4], (0, 10, 2) ; "many regions no overlap, single source region")] + #[test_case(&[2, 1, 2, 5, 6], (2, 10, 4) ; "many regions no overlap, multiple source regions")] + #[test_case(&[8, 1, 3, 6], (0, 0, 18) ; "many regions complete overlap")] + #[test_case(&[7, 3, 1, 4, 5], (5, 0, 8) ; "many regions overlap start")] + #[test_case(&[1, 5, 2, 9, 3], (5, 4, 8) ; "many regions overlap middle")] + #[test_case(&[3, 9, 1, 1, 2, 1], (2, 9, 8) ; "many regions overlap end")] + fn test_memmove_non_contiguous( + regions: &[usize], + (src_offset, dst_offset, len): (usize, usize, usize), + ) { let config = Config { aligned_memory_mapping: false, ..Config::default() }; - let mem1 = vec![0x11; 1]; - let mut mem2 = vec![0x22; 2]; - let mut mem3 = vec![0x33; 3]; - let mut mem4 = vec![0x44; 4]; - let memory_mapping = MemoryMapping::new( - vec![ - MemoryRegion::new_readonly(&mem1, MM_PROGRAM_START), - MemoryRegion::new_writable(&mut mem2, MM_PROGRAM_START + 1), - MemoryRegion::new_writable(&mut mem3, MM_PROGRAM_START + 3), - MemoryRegion::new_writable(&mut mem4, MM_PROGRAM_START + 6), - ], - &config, - &SBPFVersion::V2, - ) - .unwrap(); - - // overlapping memmove right - the implementation will copy backwards - assert_eq!( - memmove_non_contiguous(MM_PROGRAM_START + 1, MM_PROGRAM_START, 7, &memory_mapping) - .unwrap(), - 0 - ); - assert_eq!(&mem1, &[0x11]); - assert_eq!(&mem2, &[0x11, 0x22]); - assert_eq!(&mem3, &[0x22, 0x33, 0x33]); - assert_eq!(&mem4, &[0x33, 0x44, 0x44, 0x44]); - } - - #[test] - fn test_overlapping_memmove_non_contiguous_left() { - let config = Config { - aligned_memory_mapping: false, - ..Config::default() + let (mem, memory_mapping) = build_memory_mapping(regions, &config); + + // flatten the memory so we can memmove it with ptr::copy + let mut expected_memory = flatten_memory(&mem); + unsafe { + std::ptr::copy( + expected_memory.as_ptr().add(src_offset), + expected_memory.as_mut_ptr().add(dst_offset), + len, + ) }; - let mut mem1 = vec![0x11; 1]; - let mut mem2 = vec![0x22; 2]; - let mut mem3 = vec![0x33; 3]; - let mut mem4 = vec![0x44; 4]; - let memory_mapping = MemoryMapping::new( - vec![ - MemoryRegion::new_writable(&mut mem1, MM_PROGRAM_START), - MemoryRegion::new_writable(&mut mem2, MM_PROGRAM_START + 1), - MemoryRegion::new_writable(&mut mem3, MM_PROGRAM_START + 3), - MemoryRegion::new_writable(&mut mem4, MM_PROGRAM_START + 6), - ], - &config, - &SBPFVersion::V2, + + // do our memmove + memmove_non_contiguous( + MM_PROGRAM_START + dst_offset as u64, + MM_PROGRAM_START + src_offset as u64, + len as u64, + &memory_mapping, ) .unwrap(); - // overlapping memmove left - the implementation will copy forward - assert_eq!( - memmove_non_contiguous(MM_PROGRAM_START, MM_PROGRAM_START + 1, 7, &memory_mapping) - .unwrap(), - 0 - ); - assert_eq!(&mem1, &[0x22]); - assert_eq!(&mem2, &[0x22, 0x33]); - assert_eq!(&mem3, &[0x33, 0x33, 0x44]); - assert_eq!(&mem4, &[0x44, 0x44, 0x44, 0x44]); + // flatten memory post our memmove + let memory = flatten_memory(&mem); + + // compare libc's memmove with ours + assert_eq!(expected_memory, memory); } #[test] @@ -915,4 +938,33 @@ mod tests { unsafe { memcmp(b"oobar", b"obarb", 5) } ); } + + fn build_memory_mapping<'a>( + regions: &[usize], + config: &'a Config, + ) -> (Vec>, MemoryMapping<'a>) { + let mut regs = vec![]; + let mut mem = Vec::new(); + let mut offset = 0; + for (i, region_len) in regions.iter().enumerate() { + mem.push( + (0..*region_len) + .map(|x| (i * 10 + x) as u8) + .collect::>(), + ); + regs.push(MemoryRegion::new_writable( + &mut mem[i], + MM_PROGRAM_START + offset as u64, + )); + offset += *region_len; + } + + let memory_mapping = MemoryMapping::new(regs, config, &SBPFVersion::V2).unwrap(); + + (mem, memory_mapping) + } + + fn flatten_memory(mem: &[Vec]) -> Vec { + mem.iter().flatten().copied().collect() + } } diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index 5519ed3aa5db7b..3e6562b8ed7b8a 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -22,7 +22,6 @@ use { vm::Config, }, solana_sdk::{ - account::ReadableAccount, account_info::AccountInfo, alt_bn128::prelude::{ alt_bn128_addition, alt_bn128_multiplication, alt_bn128_pairing, AltBn128Error, @@ -36,13 +35,13 @@ use { feature_set::FeatureSet, feature_set::{ self, blake3_syscall_enabled, curve25519_syscall_enabled, - disable_cpi_setting_executable_and_rent_epoch, disable_deploy_of_alloc_free_syscall, - disable_fees_sysvar, enable_alt_bn128_compression_syscall, enable_alt_bn128_syscall, + disable_deploy_of_alloc_free_syscall, disable_fees_sysvar, + enable_alt_bn128_compression_syscall, enable_alt_bn128_syscall, enable_big_mod_exp_syscall, enable_partitioned_epoch_reward, enable_poseidon_syscall, error_on_syscall_bpf_function_hash_collisions, last_restart_slot_sysvar, - libsecp256k1_0_5_upgrade_enabled, reject_callx_r10, - remaining_compute_units_syscall_enabled, stop_sibling_instruction_search_at_parent, - stop_truncating_strings_in_syscalls, switch_to_new_elf_parser, + reject_callx_r10, remaining_compute_units_syscall_enabled, + stop_sibling_instruction_search_at_parent, stop_truncating_strings_in_syscalls, + switch_to_new_elf_parser, }, hash::{Hash, Hasher}, instruction::{ @@ -274,7 +273,7 @@ pub fn create_program_runtime_environment_v1<'a>( max_call_depth: compute_budget.max_call_depth, stack_frame_size: compute_budget.stack_frame_size, enable_address_translation: true, - enable_stack_frame_gaps: true, + enable_stack_frame_gaps: !feature_set.is_active(&bpf_account_data_direct_mapping::id()), instruction_meter_checkpoint_distance: 10000, enable_instruction_meter: true, enable_instruction_tracing: debugging_features, @@ -514,14 +513,13 @@ fn translate_slice_inner<'a, T>( vm_addr: u64, len: u64, check_aligned: bool, - check_size: bool, ) -> Result<&'a mut [T], Error> { if len == 0 { return Ok(&mut []); } let total_size = len.saturating_mul(size_of::() as u64); - if check_size && isize::try_from(total_size).is_err() { + if isize::try_from(total_size).is_err() { return Err(SyscallError::InvalidLength.into()); } @@ -537,7 +535,6 @@ fn translate_slice_mut<'a, T>( vm_addr: u64, len: u64, check_aligned: bool, - check_size: bool, ) -> Result<&'a mut [T], Error> { translate_slice_inner::( memory_mapping, @@ -545,7 +542,6 @@ fn translate_slice_mut<'a, T>( vm_addr, len, check_aligned, - check_size, ) } fn translate_slice<'a, T>( @@ -553,7 +549,6 @@ fn translate_slice<'a, T>( vm_addr: u64, len: u64, check_aligned: bool, - check_size: bool, ) -> Result<&'a [T], Error> { translate_slice_inner::( memory_mapping, @@ -561,7 +556,6 @@ fn translate_slice<'a, T>( vm_addr, len, check_aligned, - check_size, ) .map(|value| &*value) } @@ -573,11 +567,10 @@ fn translate_string_and_do( addr: u64, len: u64, check_aligned: bool, - check_size: bool, stop_truncating_strings_in_syscalls: bool, work: &mut dyn FnMut(&str) -> Result, ) -> Result { - let buf = translate_slice::(memory_mapping, addr, len, check_aligned, check_size)?; + let buf = translate_slice::(memory_mapping, addr, len, check_aligned)?; let msg = if stop_truncating_strings_in_syscalls { buf } else { @@ -632,7 +625,6 @@ declare_builtin_function!( file, len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), invoke_context .feature_set .is_active(&stop_truncating_strings_in_syscalls::id()), @@ -685,15 +677,9 @@ fn translate_and_check_program_address_inputs<'a>( program_id_addr: u64, memory_mapping: &mut MemoryMapping, check_aligned: bool, - check_size: bool, ) -> Result<(Vec<&'a [u8]>, &'a Pubkey), Error> { - let untranslated_seeds = translate_slice::<&[u8]>( - memory_mapping, - seeds_addr, - seeds_len, - check_aligned, - check_size, - )?; + let untranslated_seeds = + translate_slice::<&[u8]>(memory_mapping, seeds_addr, seeds_len, check_aligned)?; if untranslated_seeds.len() > MAX_SEEDS { return Err(SyscallError::BadSeeds(PubkeyError::MaxSeedLengthExceeded).into()); } @@ -708,7 +694,6 @@ fn translate_and_check_program_address_inputs<'a>( untranslated_seed.as_ptr() as *const _ as u64, untranslated_seed.len() as u64, check_aligned, - check_size, ) }) .collect::, Error>>()?; @@ -739,7 +724,6 @@ declare_builtin_function!( program_id_addr, memory_mapping, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; let Ok(new_address) = Pubkey::create_program_address(&seeds, program_id) else { @@ -750,7 +734,6 @@ declare_builtin_function!( address_addr, 32, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; address.copy_from_slice(new_address.as_ref()); Ok(0) @@ -780,7 +763,6 @@ declare_builtin_function!( program_id_addr, memory_mapping, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; let mut bump_seed = [std::u8::MAX]; @@ -802,7 +784,6 @@ declare_builtin_function!( address_addr, std::mem::size_of::() as u64, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; if !is_nonoverlapping( bump_seed_ref as *const _ as usize, @@ -844,21 +825,18 @@ declare_builtin_function!( hash_addr, keccak::HASH_BYTES as u64, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; let signature = translate_slice::( memory_mapping, signature_addr, SECP256K1_SIGNATURE_LENGTH as u64, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; let secp256k1_recover_result = translate_slice_mut::( memory_mapping, result_addr, SECP256K1_PUBLIC_KEY_LENGTH as u64, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; let Ok(message) = libsecp256k1::Message::parse_slice(hash) else { @@ -870,16 +848,7 @@ declare_builtin_function!( let Ok(recovery_id) = libsecp256k1::RecoveryId::parse(adjusted_recover_id_val) else { return Ok(Secp256k1RecoverError::InvalidRecoveryId.into()); }; - let sig_parse_result = if invoke_context - .feature_set - .is_active(&libsecp256k1_0_5_upgrade_enabled::id()) - { - libsecp256k1::Signature::parse_standard_slice(signature) - } else { - libsecp256k1::Signature::parse_overflowing_slice(signature) - }; - - let Ok(signature) = sig_parse_result else { + let Ok(signature) = libsecp256k1::Signature::parse_standard_slice(signature) else { return Ok(Secp256k1RecoverError::InvalidSignature.into()); }; @@ -1188,7 +1157,6 @@ declare_builtin_function!( scalars_addr, points_len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; let points = translate_slice::( @@ -1196,7 +1164,6 @@ declare_builtin_function!( points_addr, points_len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; if let Some(result_point) = edwards::multiscalar_multiply_edwards(scalars, points) { @@ -1228,7 +1195,6 @@ declare_builtin_function!( scalars_addr, points_len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; let points = translate_slice::( @@ -1236,7 +1202,6 @@ declare_builtin_function!( points_addr, points_len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; if let Some(result_point) = @@ -1290,7 +1255,6 @@ declare_builtin_function!( addr, len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )? .to_vec() }; @@ -1337,7 +1301,6 @@ declare_builtin_function!( return_data_addr, length, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; let to_slice = return_data_result; @@ -1439,14 +1402,12 @@ declare_builtin_function!( data_addr, result_header.data_len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; let accounts = translate_slice_mut::( memory_mapping, accounts_addr, result_header.accounts_len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; if !is_nonoverlapping( @@ -1589,7 +1550,6 @@ declare_builtin_function!( input_addr, input_size, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; let call_result = translate_slice_mut::( @@ -1597,7 +1557,6 @@ declare_builtin_function!( result_addr, output as u64, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; let calculation = match group_op { @@ -1642,9 +1601,8 @@ declare_builtin_function!( params, 1, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )? - .get(0) + .first() .ok_or(SyscallError::InvalidLength)?; if params.base_len > 512 || params.exponent_len > 512 || params.modulus_len > 512 { @@ -1670,7 +1628,6 @@ declare_builtin_function!( params.base as *const _ as u64, params.base_len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; let exponent = translate_slice::( @@ -1678,7 +1635,6 @@ declare_builtin_function!( params.exponent as *const _ as u64, params.exponent_len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; let modulus = translate_slice::( @@ -1686,7 +1642,6 @@ declare_builtin_function!( params.modulus as *const _ as u64, params.modulus_len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; let value = big_mod_exp(base, exponent, modulus); @@ -1696,7 +1651,6 @@ declare_builtin_function!( return_value, params.modulus_len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; return_value.copy_from_slice(value.as_slice()); @@ -1743,14 +1697,12 @@ declare_builtin_function!( result_addr, poseidon::HASH_BYTES as u64, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; let inputs = translate_slice::<&[u8]>( memory_mapping, vals_addr, vals_len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; let inputs = inputs .iter() @@ -1760,7 +1712,6 @@ declare_builtin_function!( input.as_ptr() as *const _ as u64, input.len() as u64, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), ) }) .collect::, Error>>()?; @@ -1842,7 +1793,6 @@ declare_builtin_function!( input_addr, input_size, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; let call_result = translate_slice_mut::( @@ -1850,7 +1800,6 @@ declare_builtin_function!( result_addr, output as u64, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; match op { @@ -1933,7 +1882,6 @@ declare_builtin_function!( result_addr, std::mem::size_of::() as u64, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; let mut hasher = H::create_hasher(); if vals_len > 0 { @@ -1942,7 +1890,6 @@ declare_builtin_function!( vals_addr, vals_len, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; for val in vals.iter() { let bytes = translate_slice::( @@ -1950,7 +1897,6 @@ declare_builtin_function!( val.as_ptr() as u64, val.len() as u64, invoke_context.get_check_aligned(), - invoke_context.get_check_size(), )?; let cost = compute_budget.mem_op_base_cost.max( hash_byte_cost.saturating_mul( @@ -2130,7 +2076,7 @@ mod tests { ) .unwrap(); let translated_data = - translate_slice::(&memory_mapping, data.as_ptr() as u64, 0, true, true).unwrap(); + translate_slice::(&memory_mapping, data.as_ptr() as u64, 0, true).unwrap(); assert_eq!(data, translated_data); assert_eq!(0, translated_data.len()); @@ -2143,24 +2089,18 @@ mod tests { ) .unwrap(); let translated_data = - translate_slice::(&memory_mapping, 0x100000000, data.len() as u64, true, true) - .unwrap(); + translate_slice::(&memory_mapping, 0x100000000, data.len() as u64, true).unwrap(); assert_eq!(data, translated_data); *data.first_mut().unwrap() = 10; assert_eq!(data, translated_data); assert!( - translate_slice::(&memory_mapping, data.as_ptr() as u64, u64::MAX, true, true) - .is_err() + translate_slice::(&memory_mapping, data.as_ptr() as u64, u64::MAX, true).is_err() ); - assert!(translate_slice::( - &memory_mapping, - 0x100000000 - 1, - data.len() as u64, - true, - true - ) - .is_err()); + assert!( + translate_slice::(&memory_mapping, 0x100000000 - 1, data.len() as u64, true,) + .is_err() + ); // u64 let mut data = vec![1u64, 2, 3, 4, 5]; @@ -2174,14 +2114,11 @@ mod tests { ) .unwrap(); let translated_data = - translate_slice::(&memory_mapping, 0x100000000, data.len() as u64, true, true) - .unwrap(); + translate_slice::(&memory_mapping, 0x100000000, data.len() as u64, true).unwrap(); assert_eq!(data, translated_data); *data.first_mut().unwrap() = 10; assert_eq!(data, translated_data); - assert!( - translate_slice::(&memory_mapping, 0x100000000, u64::MAX, true, true).is_err() - ); + assert!(translate_slice::(&memory_mapping, 0x100000000, u64::MAX, true).is_err()); // Pubkeys let mut data = vec![solana_sdk::pubkey::new_rand(); 5]; @@ -2197,7 +2134,7 @@ mod tests { ) .unwrap(); let translated_data = - translate_slice::(&memory_mapping, 0x100000000, data.len() as u64, true, true) + translate_slice::(&memory_mapping, 0x100000000, data.len() as u64, true) .unwrap(); assert_eq!(data, translated_data); *data.first_mut().unwrap() = solana_sdk::pubkey::new_rand(); // Both should point to same place @@ -2222,7 +2159,6 @@ mod tests { string.len() as u64, true, true, - true, &mut |string: &str| { assert_eq!(string, "Gaggablaghblagh!"); Ok(42) @@ -3267,9 +3203,9 @@ mod tests { let mut sysvar_cache = SysvarCache::default(); sysvar_cache.set_clock(src_clock.clone()); - sysvar_cache.set_epoch_schedule(src_epochschedule); + sysvar_cache.set_epoch_schedule(src_epochschedule.clone()); sysvar_cache.set_fees(src_fees.clone()); - sysvar_cache.set_rent(src_rent); + sysvar_cache.set_rent(src_rent.clone()); sysvar_cache.set_epoch_rewards(src_rewards); let transaction_accounts = vec![ @@ -3703,7 +3639,6 @@ mod tests { VM_BASE_ADDRESS.saturating_add(DATA_OFFSET as u64), processed_sibling_instruction.data_len, true, - true, ) .unwrap(); let accounts = translate_slice_mut::( @@ -3711,7 +3646,6 @@ mod tests { VM_BASE_ADDRESS.saturating_add(ACCOUNTS_OFFSET as u64), processed_sibling_instruction.accounts_len, true, - true, ) .unwrap(); diff --git a/programs/config/src/config_processor.rs b/programs/config/src/config_processor.rs index d053405698452a..b85715eb171391 100644 --- a/programs/config/src/config_processor.rs +++ b/programs/config/src/config_processor.rs @@ -5,8 +5,8 @@ use { bincode::deserialize, solana_program_runtime::{declare_process_instruction, ic_msg}, solana_sdk::{ - feature_set, instruction::InstructionError, program_utils::limited_deserialize, - pubkey::Pubkey, transaction_context::IndexOfAccount, + instruction::InstructionError, program_utils::limited_deserialize, pubkey::Pubkey, + transaction_context::IndexOfAccount, }, std::collections::BTreeSet, }; @@ -102,16 +102,12 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| } } - if invoke_context - .feature_set - .is_active(&feature_set::dedupe_config_program_signers::id()) - { - let total_new_keys = key_list.keys.len(); - let unique_new_keys = key_list.keys.into_iter().collect::>(); - if unique_new_keys.len() != total_new_keys { - ic_msg!(invoke_context, "new config contains duplicate keys"); - return Err(InstructionError::InvalidArgument); - } + // dedupe signers + let total_new_keys = key_list.keys.len(); + let unique_new_keys = key_list.keys.into_iter().collect::>(); + if unique_new_keys.len() != total_new_keys { + ic_msg!(invoke_context, "new config contains duplicate keys"); + return Err(InstructionError::InvalidArgument); } // Check for Config data signers not present in incoming account update @@ -131,7 +127,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| ic_msg!(invoke_context, "instruction data too large"); return Err(InstructionError::InvalidInstructionData); } - config_account.get_data_mut()?[..data.len()].copy_from_slice(data); + config_account.get_data_mut(&invoke_context.feature_set)?[..data.len()].copy_from_slice(data); Ok(()) }); diff --git a/programs/loader-v4/src/lib.rs b/programs/loader-v4/src/lib.rs index 5372975e18e0c8..881dc7a1bb594e 100644 --- a/programs/loader-v4/src/lib.rs +++ b/programs/loader-v4/src/lib.rs @@ -248,7 +248,7 @@ pub fn process_instruction_write( } let end_offset = (offset as usize).saturating_add(bytes.len()); program - .get_data_mut()? + .get_data_mut(&invoke_context.feature_set)? .get_mut( LoaderV4State::program_data_offset().saturating_add(offset as usize) ..LoaderV4State::program_data_offset().saturating_add(end_offset), @@ -326,19 +326,20 @@ pub fn process_instruction_truncate( return Err(InstructionError::InvalidArgument); } let lamports_to_receive = program.get_lamports().saturating_sub(required_lamports); - program.checked_sub_lamports(lamports_to_receive)?; - recipient.checked_add_lamports(lamports_to_receive)?; + program.checked_sub_lamports(lamports_to_receive, &invoke_context.feature_set)?; + recipient.checked_add_lamports(lamports_to_receive, &invoke_context.feature_set)?; } std::cmp::Ordering::Equal => {} } if new_size == 0 { - program.set_data_length(0)?; + program.set_data_length(0, &invoke_context.feature_set)?; } else { program.set_data_length( LoaderV4State::program_data_offset().saturating_add(new_size as usize), + &invoke_context.feature_set, )?; if is_initialization { - let state = get_state_mut(program.get_data_mut()?)?; + let state = get_state_mut(program.get_data_mut(&invoke_context.feature_set)?)?; state.slot = 0; state.status = LoaderV4Status::Retracted; state.authority_address = *authority_address; @@ -433,12 +434,12 @@ pub fn process_instruction_deploy( let rent = invoke_context.get_sysvar_cache().get_rent()?; let required_lamports = rent.minimum_balance(source_program.get_data().len()); let transfer_lamports = required_lamports.saturating_sub(program.get_lamports()); - program.set_data_from_slice(source_program.get_data())?; - source_program.set_data_length(0)?; - source_program.checked_sub_lamports(transfer_lamports)?; - program.checked_add_lamports(transfer_lamports)?; + program.set_data_from_slice(source_program.get_data(), &invoke_context.feature_set)?; + source_program.set_data_length(0, &invoke_context.feature_set)?; + source_program.checked_sub_lamports(transfer_lamports, &invoke_context.feature_set)?; + program.checked_add_lamports(transfer_lamports, &invoke_context.feature_set)?; } - let state = get_state_mut(program.get_data_mut()?)?; + let state = get_state_mut(program.get_data_mut(&invoke_context.feature_set)?)?; state.slot = current_slot; state.status = LoaderV4Status::Deployed; @@ -465,6 +466,7 @@ pub fn process_instruction_retract( let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; let mut program = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let authority_address = instruction_context .get_index_of_instruction_account_in_transaction(1) .and_then(|index| transaction_context.get_key_of_account_at_index(index))?; @@ -486,7 +488,7 @@ pub fn process_instruction_retract( ic_logger_msg!(log_collector, "Program is not deployed"); return Err(InstructionError::InvalidArgument); } - let state = get_state_mut(program.get_data_mut()?)?; + let state = get_state_mut(program.get_data_mut(&invoke_context.feature_set)?)?; state.status = LoaderV4Status::Retracted; Ok(()) } @@ -516,7 +518,7 @@ pub fn process_instruction_transfer_authority( ic_logger_msg!(log_collector, "New authority did not sign"); return Err(InstructionError::MissingRequiredSignature); } - let state = get_state_mut(program.get_data_mut()?)?; + let state = get_state_mut(program.get_data_mut(&invoke_context.feature_set)?)?; if let Some(new_authority_address) = new_authority_address { state.authority_address = new_authority_address; } else if matches!(state.status, LoaderV4Status::Deployed) { diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index dba8a46be23942..c3eec5d4325a1e 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -76,9 +76,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" +checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" dependencies = [ "cfg-if 1.0.0", "getrandom 0.2.10", @@ -152,15 +152,15 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" [[package]] name = "aquamarine" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df752953c49ce90719c7bf1fc587bc8227aed04732ea0c0f85e5397d7fdbd1a1" +checksum = "d1da02abba9f9063d786eab1509833ebb2fac0f966862ca59439c76b9c566760" dependencies = [ "include_dir", "itertools", @@ -419,13 +419,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.74" +version = "0.1.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" +checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -472,7 +472,7 @@ dependencies = [ "matchit", "memchr", "mime", - "percent-encoding 2.3.0", + "percent-encoding 2.3.1", "pin-project-lite", "rustversion", "serde", @@ -579,7 +579,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -686,6 +686,16 @@ dependencies = [ "hashbrown 0.13.2", ] +[[package]] +name = "borsh" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9897ef0f1bd2362169de6d7e436ea2237dc1085d7d1e4db75f4be34d86f309d1" +dependencies = [ + "borsh-derive 1.2.1", + "cfg_aliases", +] + [[package]] name = "borsh-derive" version = "0.9.3" @@ -712,6 +722,20 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "borsh-derive" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "478b41ff04256c5c8330f3dfdaaae2a5cc976a8e75088bafa4625b0d0208de8c" +dependencies = [ + "once_cell", + "proc-macro-crate 2.0.1", + "proc-macro2", + "quote", + "syn 2.0.46", + "syn_derive", +] + [[package]] name = "borsh-derive-internal" version = "0.9.3" @@ -914,6 +938,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "chrono" version = "0.4.31" @@ -1110,9 +1140,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.8" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" +checksum = "82a9b73a36529d9c47029b9fb3a6f0ea3cc916a261195352ba19e770fc1748b2" dependencies = [ "cfg-if 1.0.0", "crossbeam-utils", @@ -1144,9 +1174,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "c3a430a770ebd84726f584a90ee7f020d28db52c6d02138900f22341f866d39c" dependencies = [ "cfg-if 1.0.0", ] @@ -1221,7 +1251,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -1232,7 +1262,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -1357,9 +1387,9 @@ dependencies = [ [[package]] name = "dir-diff" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2860407d7d7e2e004bb2128510ad9e8d669e76fa005ccf567977b5d71b8b4a0b" +checksum = "a7ad16bf5f84253b50d6557681c58c3ab67c47c77d39fed9aeb56e947290bd10" dependencies = [ "walkdir", ] @@ -1416,7 +1446,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -1525,7 +1555,7 @@ checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -1562,23 +1592,12 @@ checksum = "88bffebc5d80432c9b140ee17875ff173a8ab62faad5b257da912bd2f6c1c0a1" [[package]] name = "errno" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" -dependencies = [ - "errno-dragonfly", - "libc", - "windows-sys 0.48.0", -] - -[[package]] -name = "errno-dragonfly" -version = "0.1.2" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" dependencies = [ - "cc", "libc", + "windows-sys 0.52.0", ] [[package]] @@ -1701,11 +1720,11 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ - "percent-encoding 2.3.0", + "percent-encoding 2.3.1", ] [[package]] @@ -1716,9 +1735,12 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "fs-err" -version = "2.9.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0845fa252299212f0389d64ba26f34fa32cfe41588355f21ed507c59a0f64541" +checksum = "88a41f105fe1d5b6b34b2055e3dc59bb79b46b48b2040b9e6c7b4b5de097aa41" +dependencies = [ + "autocfg", +] [[package]] name = "fs_extra" @@ -1734,9 +1756,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -1749,9 +1771,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -1759,15 +1781,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -1777,38 +1799,38 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-macro" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] name = "futures-sink" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-util" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures 0.1.31", "futures-channel", @@ -1910,7 +1932,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8af59a261bcf42f45d1b261232847b9b850ba0a1419d6100698246fb66e9240" dependencies = [ "arc-swap", - "futures 0.3.29", + "futures 0.3.30", "log", "reqwest", "serde", @@ -1985,7 +2007,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.6", + "ahash 0.8.7", ] [[package]] @@ -2078,9 +2100,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" +checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" dependencies = [ "bytes", "fnv", @@ -2118,9 +2140,9 @@ checksum = "3c1ad908cc71012b7bea4d0c53ba96a8cba9962f048fa68d143376143d863b7a" [[package]] name = "hyper" -version = "0.14.27" +version = "0.14.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" dependencies = [ "bytes", "futures-channel", @@ -2133,7 +2155,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.9", + "socket2 0.5.5", "tokio", "tower-service", "tracing", @@ -2147,7 +2169,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca815a891b24fdfb243fa3239c86154392b0953ee584aa1a2a1f66d20cbe75cc" dependencies = [ "bytes", - "futures 0.3.29", + "futures 0.3.30", "headers", "http", "hyper", @@ -2229,9 +2251,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -2359,9 +2381,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.65" +version = "0.3.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54c0c35952f67de54bb584e9fd912b3023117cbafc0a77d8f3dee1fb5f572fe8" +checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" dependencies = [ "wasm-bindgen", ] @@ -2384,7 +2406,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2b99d4207e2a04fb4581746903c2bb7eb376f88de9c699d0f3e10feeac0cd3a" dependencies = [ "derive_more", - "futures 0.3.29", + "futures 0.3.30", "jsonrpc-core", "jsonrpc-pubsub", "jsonrpc-server-utils", @@ -2402,7 +2424,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" dependencies = [ - "futures 0.3.29", + "futures 0.3.30", "futures-executor", "futures-util", "log", @@ -2417,7 +2439,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b51da17abecbdab3e3d4f26b01c5ec075e88d3abe3ab3b05dc9aa69392764ec0" dependencies = [ - "futures 0.3.29", + "futures 0.3.30", "jsonrpc-client-transports", ] @@ -2439,7 +2461,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1dea6e07251d9ce6a552abfb5d7ad6bc290a4596c8dcc3d795fae2bbdc1f3ff" dependencies = [ - "futures 0.3.29", + "futures 0.3.30", "hyper", "jsonrpc-core", "jsonrpc-server-utils", @@ -2455,7 +2477,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "382bb0206323ca7cda3dcd7e245cea86d37d02457a02a975e3378fb149a48845" dependencies = [ - "futures 0.3.29", + "futures 0.3.30", "jsonrpc-core", "jsonrpc-server-utils", "log", @@ -2470,7 +2492,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240f87695e6c6f62fb37f05c02c04953cf68d6408b8c1c89de85c7a0125b1011" dependencies = [ - "futures 0.3.29", + "futures 0.3.30", "jsonrpc-core", "lazy_static", "log", @@ -2486,7 +2508,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4fdea130485b572c39a460d50888beb00afb3e35de23ccd7fad8ff19f0e0d4" dependencies = [ "bytes", - "futures 0.3.29", + "futures 0.3.30", "globset", "jsonrpc-core", "lazy_static", @@ -2527,9 +2549,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.149" +version = "0.2.151" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" +checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" [[package]] name = "libloading" @@ -2680,9 +2702,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.10" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" +checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" [[package]] name = "lock_api" @@ -3025,7 +3047,7 @@ checksum = "cfb77679af88f8b125209d354a202862602672222e7f2313fdd6dc349bad4712" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -3107,7 +3129,7 @@ dependencies = [ "proc-macro-crate 1.1.3", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -3116,10 +3138,10 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c11e44798ad209ccdd91fc192f0526a369a01234f7373e1b141c96d7cee4f0e" dependencies = [ - "proc-macro-crate 1.1.3", + "proc-macro-crate 2.0.1", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -3175,9 +3197,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.59" +version = "0.10.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a257ad03cd8fb16ad4172fedf8094451e1af1c4b70097636ef2eac9a5f0cc33" +checksum = "8cde4d2d9200ad5909f8dac647e29482e07c3a35de8a13fce7c9c7747ad9f671" dependencies = [ "bitflags 2.4.1", "cfg-if 1.0.0", @@ -3216,9 +3238,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.95" +version = "0.9.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40a4130519a360279579c2053038317e40eff64d13fd3f004f9e1b72b8a6aaf9" +checksum = "c1665caf8ab2dc9aef43d1c0023bd904633a6a05cb30b0ad59bec2ae986e57a7" dependencies = [ "cc", "libc", @@ -3240,7 +3262,7 @@ dependencies = [ "futures-util", "js-sys", "lazy_static", - "percent-encoding 2.3.0", + "percent-encoding 2.3.1", "pin-project", "rand 0.8.5", "thiserror", @@ -3284,7 +3306,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9981e32fb75e004cc148f5fb70342f393830e0a4aa62e3cc93b50976218d42b6" dependencies = [ - "futures 0.3.29", + "futures 0.3.30", "libc", "log", "rand 0.7.3", @@ -3387,9 +3409,9 @@ checksum = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "percentage" @@ -3585,14 +3607,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] name = "prio-graph" -version = "0.1.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78dd2fa9ca0901b4d0dbf51d9862d7e3fb004605e4f4b4132472c3d08e7d901b" +checksum = "6492a75ca57066a4479af45efa302bed448680182b0563f96300645d5f896097" [[package]] name = "proc-macro-crate" @@ -3613,6 +3635,16 @@ dependencies = [ "toml", ] +[[package]] +name = "proc-macro-crate" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97dc5fea232fc28d2f597b37c4876b348a40e33f3b02cc975c8d006d78d94b1a" +dependencies = [ + "toml_datetime", + "toml_edit", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -3639,9 +3671,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.69" +version = "1.0.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" +checksum = "2de98502f212cfcea8d0bb305bd0f49d7ebdd75b64ba0a68f937d888f4e0d6db" dependencies = [ "unicode-ident", ] @@ -3715,7 +3747,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d464fae65fff2680baf48019211ce37aaec0c78e9264c84a3e484717f965104e" dependencies = [ - "percent-encoding 2.3.0", + "percent-encoding 2.3.1", ] [[package]] @@ -3726,7 +3758,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -3779,9 +3811,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.33" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ "proc-macro2", ] @@ -3987,9 +4019,9 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "reqwest" -version = "0.11.22" +version = "0.11.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" +checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" dependencies = [ "async-compression", "base64 0.21.5", @@ -4009,7 +4041,7 @@ dependencies = [ "mime", "native-tls", "once_cell", - "percent-encoding 2.3.0", + "percent-encoding 2.3.1", "pin-project-lite", "rustls", "rustls-pemfile 1.0.0", @@ -4022,7 +4054,7 @@ dependencies = [ "tokio-rustls", "tokio-util 0.7.1", "tower-service", - "url 2.4.1", + "url 2.5.0", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -4080,13 +4112,13 @@ dependencies = [ [[package]] name = "rpassword" -version = "7.2.0" +version = "7.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6678cf63ab3491898c0d021b493c94c9b221d91295294a2a5746eacbe5928322" +checksum = "80472be3c897911d0137b2d2b9055faf6eeac5b14e324073d83bc17b191d7e3f" dependencies = [ "libc", "rtoolbox", - "winapi 0.3.9", + "windows-sys 0.48.0", ] [[package]] @@ -4131,22 +4163,22 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.21" +version = "0.38.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" +checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" dependencies = [ "bitflags 2.4.1", "errno", "libc", "linux-raw-sys", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "rustls" -version = "0.21.8" +version = "0.21.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "446e14c5cda4f3f30fe71863c34ec70f5ac79d6087097ad0bb433e1be5edf04c" +checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" dependencies = [ "log", "ring 0.17.3", @@ -4301,38 +4333,38 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.192" +version = "1.0.194" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bca2a08484b285dcb282d0f67b26cadc0df8b19f8c12502c13d966bf9482f001" +checksum = "0b114498256798c94a0689e1a15fec6005dee8ac1f41de56404b67afc2a4b773" dependencies = [ "serde_derive", ] [[package]] name = "serde_bytes" -version = "0.11.12" +version = "0.11.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab33ec92f677585af6d88c65593ae2375adde54efdbf16d597f2cbc7a6d368ff" +checksum = "8b8497c313fd43ab992087548117643f6fcd935cbf36f176ffda0aacf9591734" dependencies = [ "serde", ] [[package]] name = "serde_derive" -version = "1.0.192" +version = "1.0.194" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1" +checksum = "a3385e45322e8f9931410f01b3031ec534c3947d0e94c18049af4d9f9907d4e0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] name = "serde_json" -version = "1.0.108" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" +checksum = "cb0652c533506ad7a2e353cce269330d6afd8bdfb6d75e0ace5b35aacbd7b9e9" dependencies = [ "itoa", "ryu", @@ -4370,14 +4402,14 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] name = "serde_yaml" -version = "0.9.25" +version = "0.9.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a49e178e4452f45cb61d0cd8cebc1b0fafd3e41929e996cef79aa3aca91f574" +checksum = "a15e0ef66bf939a7c890a0bf6d5a733c70202225f9888a89ed5c62298b019129" dependencies = [ "indexmap 2.1.0", "itoa", @@ -4555,9 +4587,9 @@ checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" [[package]] name = "smallvec" -version = "1.11.1" +version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" +checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" [[package]] name = "smpl_jwt" @@ -4603,7 +4635,7 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.1", "bytes", - "futures 0.3.29", + "futures 0.3.30", "httparse", "log", "rand 0.8.5", @@ -4627,6 +4659,7 @@ dependencies = [ "solana-sdk", "spl-token", "spl-token-2022", + "spl-token-group-interface", "spl-token-metadata-interface", "thiserror", "zstd", @@ -4715,8 +4748,8 @@ dependencies = [ name = "solana-banks-client" version = "1.18.0" dependencies = [ - "borsh 0.10.3", - "futures 0.3.29", + "borsh 1.2.1", + "futures 0.3.30", "solana-banks-interface", "solana-program", "solana-sdk", @@ -4741,7 +4774,7 @@ version = "1.18.0" dependencies = [ "bincode", "crossbeam-channel", - "futures 0.3.29", + "futures 0.3.30", "solana-accounts-db", "solana-banks-interface", "solana-client", @@ -4825,7 +4858,7 @@ dependencies = [ "thiserror", "tiny-bip39", "uriparse", - "url 2.4.1", + "url 2.5.0", ] [[package]] @@ -4839,7 +4872,7 @@ dependencies = [ "serde_yaml", "solana-clap-utils", "solana-sdk", - "url 2.4.1", + "url 2.5.0", ] [[package]] @@ -4874,7 +4907,7 @@ dependencies = [ "async-trait", "bincode", "dashmap", - "futures 0.3.29", + "futures 0.3.30", "futures-util", "indexmap 2.1.0", "indicatif", @@ -4951,7 +4984,7 @@ dependencies = [ "dashmap", "eager", "etcd-client", - "futures 0.3.29", + "futures 0.3.30", "histogram", "itertools", "lazy_static", @@ -4998,6 +5031,7 @@ dependencies = [ "solana-tpu-client", "solana-transaction-status", "solana-turbine", + "solana-unified-scheduler-pool", "solana-version", "solana-vote", "solana-vote-program", @@ -5118,7 +5152,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -5228,7 +5262,7 @@ dependencies = [ "crossbeam-channel", "dashmap", "fs_extra", - "futures 0.3.29", + "futures 0.3.30", "itertools", "lazy_static", "libc", @@ -5346,7 +5380,7 @@ dependencies = [ "solana-sdk", "solana-version", "tokio", - "url 2.4.1", + "url 2.5.0", ] [[package]] @@ -5359,7 +5393,7 @@ checksum = "8b8a731ed60e89177c8a7ab05fe0f1511cedd3e70e773f288f9de33a9cfdc21e" name = "solana-perf" version = "1.18.0" dependencies = [ - "ahash 0.8.6", + "ahash 0.8.7", "bincode", "bv", "caps", @@ -5412,6 +5446,7 @@ dependencies = [ "blake3", "borsh 0.10.3", "borsh 0.9.3", + "borsh 1.2.1", "bs58", "bv", "bytemuck", @@ -5524,7 +5559,7 @@ dependencies = [ "tokio-stream", "tokio-tungstenite", "tungstenite", - "url 2.4.1", + "url 2.5.0", ] [[package]] @@ -5533,7 +5568,7 @@ version = "1.18.0" dependencies = [ "async-mutex", "async-trait", - "futures 0.3.29", + "futures 0.3.30", "itertools", "lazy_static", "log", @@ -6194,7 +6229,7 @@ dependencies = [ "base64 0.21.5", "bincode", "bitflags 2.4.1", - "borsh 0.10.3", + "borsh 1.2.1", "bs58", "bytemuck", "byteorder 1.5.0", @@ -6246,9 +6281,15 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.39", + "syn 2.0.46", ] +[[package]] +name = "solana-security-txt" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "468aa43b7edb1f9b7b7b686d5c3aeb6630dc1708e86e31343499dd5c4d775183" + [[package]] name = "solana-send-transaction-service" version = "1.18.0" @@ -6286,7 +6327,7 @@ dependencies = [ "bzip2", "enum-iterator", "flate2", - "futures 0.3.29", + "futures 0.3.30", "goauth", "http", "hyper", @@ -6460,7 +6501,7 @@ dependencies = [ "bincode", "bytes", "crossbeam-channel", - "futures 0.3.29", + "futures 0.3.30", "itertools", "log", "lru", @@ -6501,6 +6542,22 @@ dependencies = [ "tokio", ] +[[package]] +name = "solana-unified-scheduler-logic" +version = "1.18.0" + +[[package]] +name = "solana-unified-scheduler-pool" +version = "1.18.0" +dependencies = [ + "solana-ledger", + "solana-program-runtime", + "solana-runtime", + "solana-sdk", + "solana-unified-scheduler-logic", + "solana-vote", +] + [[package]] name = "solana-validator" version = "1.18.0" @@ -6715,9 +6772,9 @@ dependencies = [ [[package]] name = "spl-associated-token-account" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "385e31c29981488f2820b2022d8e731aae3b02e6e18e2fd854e4c9a94dc44fc3" +checksum = "992d9c64c2564cc8f63a4b508bf3ebcdf2254b0429b13cd1d31adb6162432a5f" dependencies = [ "assert_matches", "borsh 0.10.3", @@ -6748,7 +6805,7 @@ checksum = "fadbefec4f3c678215ca72bd71862697bb06b41fd77c0088902dd3203354387b" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -6760,7 +6817,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.39", + "syn 2.0.46", "thiserror", ] @@ -6808,14 +6865,14 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] name = "spl-tlv-account-resolution" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "062e148d3eab7b165582757453632ffeef490c02c86a48bfdb4988f63eefb3b9" +checksum = "3f7020347c07892c08560d230fbb8a980316c9e198e22b198b7b9d951ff96047" dependencies = [ "bytemuck", "solana-program", @@ -6842,9 +6899,9 @@ dependencies = [ [[package]] name = "spl-token-2022" -version = "0.9.0" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4abf34a65ba420584a0c35f3903f8d727d1f13ababbdc3f714c6b065a686e86" +checksum = "d697fac19fd74ff472dfcc13f0b442dd71403178ce1de7b5d16f83a33561c059" dependencies = [ "arrayref", "bytemuck", @@ -6852,16 +6909,31 @@ dependencies = [ "num-traits", "num_enum 0.7.1", "solana-program", + "solana-security-txt", "solana-zk-token-sdk", "spl-memo", "spl-pod", "spl-token", + "spl-token-group-interface", "spl-token-metadata-interface", "spl-transfer-hook-interface", "spl-type-length-value", "thiserror", ] +[[package]] +name = "spl-token-group-interface" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b889509d49fa74a4a033ca5dae6c2307e9e918122d97e58562f5c4ffa795c75d" +dependencies = [ + "bytemuck", + "solana-program", + "spl-discriminator", + "spl-pod", + "spl-program-error", +] + [[package]] name = "spl-token-metadata-interface" version = "0.2.0" @@ -6878,9 +6950,9 @@ dependencies = [ [[package]] name = "spl-transfer-hook-interface" -version = "0.3.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "051d31803f873cabe71aec3c1b849f35248beae5d19a347d93a5c9cccc5d5a9b" +checksum = "7aabdb7c471566f6ddcee724beb8618449ea24b399e58d464d6b5bc7db550259" dependencies = [ "arrayref", "bytemuck", @@ -6913,9 +6985,9 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "stream-cancel" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0a9eb2715209fb8cc0d942fcdff45674bfc9f0090a0d897e85a22955ad159b" +checksum = "5f9fbf9bd71e4cf18d68a8a0951c0e5b7255920c0cd992c4ff51cddd6ef514a3" dependencies = [ "futures-core", "pin-project", @@ -6981,15 +7053,27 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.39" +version = "2.0.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" +checksum = "89456b690ff72fddcecf231caedbe615c59480c93358a93dfae7fc29e3ebbf0e" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] +[[package]] +name = "syn_derive" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.46", +] + [[package]] name = "sync_wrapper" version = "0.1.1" @@ -7071,7 +7155,7 @@ checksum = "1c38a012bed6fb9681d3bf71ffaa4f88f3b4b9ed3198cda6e4c8462d24d4bb80" dependencies = [ "anyhow", "fnv", - "futures 0.3.29", + "futures 0.3.30", "humantime", "opentelemetry", "pin-project", @@ -7100,15 +7184,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.8.1" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" +checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" dependencies = [ "cfg-if 1.0.0", "fastrand", "redox_syscall 0.4.1", "rustix", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -7128,9 +7212,9 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test-case" -version = "3.2.1" +version = "3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8f1e820b7f1d95a0cdbf97a5df9de10e1be731983ab943e56703ac1b8e9d425" +checksum = "eb2550dd13afcd286853192af8601920d959b14c401fcece38071d53bf0768a8" dependencies = [ "test-case-macros", ] @@ -7145,7 +7229,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -7157,7 +7241,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", "test-case-core", ] @@ -7178,22 +7262,22 @@ checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" [[package]] name = "thiserror" -version = "1.0.50" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.50" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -7316,7 +7400,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -7420,6 +7504,23 @@ dependencies = [ "serde", ] +[[package]] +name = "toml_datetime" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" + +[[package]] +name = "toml_edit" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" +dependencies = [ + "indexmap 2.1.0", + "toml_datetime", + "winnow", +] + [[package]] name = "tonic" version = "0.9.2" @@ -7438,7 +7539,7 @@ dependencies = [ "http-body", "hyper", "hyper-timeout", - "percent-encoding 2.3.0", + "percent-encoding 2.3.1", "pin-project", "prost", "rustls-pemfile 1.0.0", @@ -7581,7 +7682,7 @@ dependencies = [ "rustls", "sha1", "thiserror", - "url 2.4.1", + "url 2.5.0", "utf-8", "webpki-roots 0.24.0", ] @@ -7661,9 +7762,9 @@ dependencies = [ [[package]] name = "unsafe-libyaml" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1865806a559042e51ab5414598446a5871b561d21b6764f2eabb0dd481d880a6" +checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" [[package]] name = "untrusted" @@ -7700,13 +7801,13 @@ dependencies = [ [[package]] name = "url" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", - "idna 0.4.0", - "percent-encoding 2.3.0", + "idna 0.5.0", + "percent-encoding 2.3.1", ] [[package]] @@ -7780,9 +7881,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7daec296f25a1bae309c0cd5c29c4b260e510e6d813c286b19eaadf409d40fce" +checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -7790,16 +7891,16 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e397f4664c0e4e428e8313a469aaa58310d302159845980fd23b0f22a847f217" +checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", "wasm-bindgen-shared", ] @@ -7817,9 +7918,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5961017b3b08ad5f3fe39f1e79877f8ee7c23c5e5fd5eb80de95abc41f1f16b2" +checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -7827,22 +7928,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907" +checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d046c5d029ba91a1ed14da14dca44b68bf2f124cfbaf741c54151fdb3e0750b" +checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" [[package]] name = "web-sys" @@ -7941,6 +8042,15 @@ dependencies = [ "windows-targets 0.48.0", ] +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -7971,6 +8081,21 @@ dependencies = [ "windows_x86_64_msvc 0.48.0", ] +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" @@ -7983,6 +8108,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" + [[package]] name = "windows_aarch64_msvc" version = "0.42.2" @@ -7995,6 +8126,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" + [[package]] name = "windows_i686_gnu" version = "0.42.2" @@ -8007,6 +8144,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" + [[package]] name = "windows_i686_msvc" version = "0.42.2" @@ -8019,6 +8162,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" + [[package]] name = "windows_x86_64_gnu" version = "0.42.2" @@ -8031,6 +8180,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" @@ -8043,6 +8198,12 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" + [[package]] name = "windows_x86_64_msvc" version = "0.42.2" @@ -8055,6 +8216,21 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + +[[package]] +name = "winnow" +version = "0.5.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7e87b8dfbe3baffbe687eef2e164e32286eff31a5ee16463ce03d991643ec94" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.50.0" @@ -8103,22 +8279,22 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.15" +version = "0.7.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81ba595b9f2772fbee2312de30eeb80ec773b4cb2f1e8098db024afadda6c06f" +checksum = "1c4061bedbb353041c12f413700357bec76df2c7e2ca8e4df8bac24c6bf68e3d" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.15" +version = "0.7.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "772666c41fb6dceaf520b564b962d738a8e1a83b41bd48945f50837aed78bb1d" +checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] @@ -8138,7 +8314,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.46", ] [[package]] diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index 7ab496de8eebd4..e61ad6e1aaf724 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -91,7 +91,7 @@ solana-cli-output = { workspace = true } solana-logger = { workspace = true } solana-measure = { workspace = true } solana-program-runtime = { workspace = true } -solana-runtime = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-sbf-rust-invoke = { workspace = true } solana-sbf-rust-realloc = { workspace = true, features = ["default"] } solana-sbf-rust-realloc-invoke = { workspace = true } @@ -186,15 +186,16 @@ targets = ["x86_64-unknown-linux-gnu"] # # in `../../Cargo.toml`. # -# `spl-token`, in turn, depends on `solana-program`, which we explicitly specify above as a local -# path dependency: +# `spl-token`, in turn, depends on `solana-program`, which we explicitly specify +# above as a local path dependency: # # solana-program = { path = "../../sdk/program", version = "=1.16.0" } # -# Unfortunately, Cargo will try to resolve the `spl-token` `solana-program` dependency only using -# what is available on crates.io. Crates.io normally contains a previous version of these crates, -# and we end up with two versions of `solana-program` and `solana-zk-token-sdk` and all of their -# dependencies in our build tree. +# Unfortunately, Cargo will try to resolve the `spl-token` `solana-program` +# dependency only using what is available on crates.io. Crates.io normally +# contains a previous version of these crates, and we end up with two versions +# of `solana-program` and `solana-zk-token-sdk` and all of their dependencies in +# our build tree. # # If you are developing downstream using non-crates-io solana-program (local or # forked repo, or from github rev, eg), duplicate the following patch statements @@ -203,7 +204,7 @@ targets = ["x86_64-unknown-linux-gnu"] # -p solana-zk-token-sdk` to remove extraneous versions from your Cargo.lock # file. # -# There is a similar override in `../../Cargo.toml`. Please keep both comments and the -# overrides in sync. +# There is a similar override in `../../Cargo.toml`. Please keep both comments +# and the overrides in sync. solana-program = { path = "../../sdk/program" } solana-zk-token-sdk = { path = "../../zk-token-sdk" } diff --git a/programs/sbf/benches/bpf_loader.rs b/programs/sbf/benches/bpf_loader.rs index 7ef6966a80dbe0..f433c8374d8e47 100644 --- a/programs/sbf/benches/bpf_loader.rs +++ b/programs/sbf/benches/bpf_loader.rs @@ -2,6 +2,7 @@ #![cfg(feature = "sbf_c")] #![allow(clippy::uninlined_format_args)] #![allow(clippy::arithmetic_side_effects)] +#![cfg_attr(not(target_arch = "x86_64"), allow(dead_code, unused_imports))] use { solana_rbpf::memory_region::MemoryState, @@ -33,7 +34,7 @@ use { bpf_loader, client::SyncClient, entrypoint::SUCCESS, - feature_set::FeatureSet, + feature_set::{self, FeatureSet}, instruction::{AccountMeta, Instruction}, message::Message, native_loader, @@ -101,6 +102,7 @@ fn bench_program_create_executable(bencher: &mut Bencher) { } #[bench] +#[cfg(target_arch = "x86_64")] fn bench_program_alu(bencher: &mut Bencher) { let ns_per_s = 1000000000; let one_million = 1000000; @@ -183,17 +185,28 @@ fn bench_program_alu(bencher: &mut Bencher) { #[bench] fn bench_program_execute_noop(bencher: &mut Bencher) { let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(50); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and benched. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + let bank = Bank::new_for_benches(&genesis_config); - let bank = Arc::new(bank); + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let mut bank_client = BankClient::new_shared(bank.clone()); let invoke_program_id = load_program(&bank_client, &bpf_loader::id(), &mint_keypair, "noop"); let bank = bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); let mint_pubkey = mint_keypair.pubkey(); @@ -244,7 +257,8 @@ fn bench_create_vm(bencher: &mut Bencher) { .transaction_context .get_current_instruction_context() .unwrap(), - !direct_mapping, // copy_account_data + !direct_mapping, // copy_account_data, + &invoke_context.feature_set, ) .unwrap(); @@ -279,6 +293,7 @@ fn bench_instruction_count_tuner(_bencher: &mut Bencher) { .get_current_instruction_context() .unwrap(), !direct_mapping, // copy_account_data + &invoke_context.feature_set, ) .unwrap(); diff --git a/programs/sbf/rust/invoke/src/processor.rs b/programs/sbf/rust/invoke/src/processor.rs index 36f4f2481f3808..1943f8f4b578db 100644 --- a/programs/sbf/rust/invoke/src/processor.rs +++ b/programs/sbf/rust/invoke/src/processor.rs @@ -1288,6 +1288,61 @@ fn process_instruction( }, &vec![0; original_data_len - new_len] ); + + // Realloc to [0xFC; 2] + invoke( + &create_instruction( + *callee_program_id, + &[ + (accounts[ARGUMENT_INDEX].key, true, false), + (callee_program_id, false, false), + ], + vec![0xFC; 2], + ), + accounts, + ) + .unwrap(); + + // Check that [2..20] is zeroed + let new_len = account.data_len(); + assert_eq!(&*account.data.borrow(), &[0xFC; 2]); + assert_eq!( + unsafe { + slice::from_raw_parts( + account.data.borrow().as_ptr().add(new_len), + original_data_len - new_len, + ) + }, + &vec![0; original_data_len - new_len] + ); + + // Realloc to [0xFC; 2]. Here we keep the same length, but realloc the underlying + // vector. CPI must zero even if the length is unchanged. + invoke( + &create_instruction( + *callee_program_id, + &[ + (accounts[ARGUMENT_INDEX].key, true, false), + (callee_program_id, false, false), + ], + vec![0xFC; 2], + ), + accounts, + ) + .unwrap(); + + // Check that [2..20] is zeroed + let new_len = account.data_len(); + assert_eq!(&*account.data.borrow(), &[0xFC; 2]); + assert_eq!( + unsafe { + slice::from_raw_parts( + account.data.borrow().as_ptr().add(new_len), + original_data_len - new_len, + ) + }, + &vec![0; original_data_len - new_len] + ); } TEST_WRITE_ACCOUNT => { msg!("TEST_WRITE_ACCOUNT"); diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index 97d5c2ceb58756..c4006f2055e1ef 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -43,7 +43,7 @@ use { clock::MAX_PROCESSING_AGE, compute_budget::ComputeBudgetInstruction, entrypoint::MAX_PERMITTED_DATA_INCREASE, - feature_set::{self, remove_deprecated_request_unit_ix, FeatureSet}, + feature_set::{self, FeatureSet}, fee::FeeStructure, loader_instruction, message::{v0::LoadedAddresses, SanitizedMessage}, @@ -54,7 +54,7 @@ use { transaction::VersionedTransaction, }, solana_transaction_status::{ - ConfirmedTransactionWithStatusMeta, InnerInstructions, TransactionStatusMeta, + map_inner_instructions, ConfirmedTransactionWithStatusMeta, TransactionStatusMeta, TransactionWithStatusMeta, VersionedTransactionWithStatusMeta, }, std::collections::HashMap, @@ -64,6 +64,7 @@ use { solana_runtime::{ bank::Bank, bank_client::BankClient, + bank_forks::BankForks, genesis_utils::{ bootstrap_validator_stake_lamports, create_genesis_config, create_genesis_config_with_leader_ex, GenesisConfigInfo, @@ -85,7 +86,12 @@ use { system_program, transaction::{SanitizedTransaction, Transaction, TransactionError}, }, - std::{cell::RefCell, str::FromStr, sync::Arc, time::Duration}, + std::{ + cell::RefCell, + str::FromStr, + sync::{Arc, RwLock}, + time::Duration, + }, }; #[cfg(feature = "sbf_rust")] @@ -97,7 +103,7 @@ fn process_transaction_and_record_inner( Vec>, Vec, ) { - let signature = tx.signatures.get(0).unwrap().clone(); + let signature = tx.signatures.first().unwrap().clone(); let txs = vec![tx]; let tx_batch = bank.prepare_batch_for_tests(txs); let mut results = bank @@ -206,21 +212,7 @@ fn execute_transactions( ); let inner_instructions = inner_instructions.map(|inner_instructions| { - inner_instructions - .into_iter() - .enumerate() - .map(|(index, instructions)| InnerInstructions { - index: index as u8, - instructions: instructions - .into_iter() - .map(|ix| solana_transaction_status::InnerInstruction { - instruction: ix.instruction, - stack_height: Some(u32::from(ix.stack_height)), - }) - .collect(), - }) - .filter(|i| !i.instructions.is_empty()) - .collect() + map_inner_instructions(inner_instructions).collect() }); let tx_status_meta = TransactionStatusMeta { @@ -258,6 +250,7 @@ fn execute_transactions( fn load_program_and_advance_slot( bank_client: &mut BankClient, + bank_forks: &RwLock, loader_id: &Pubkey, payer_keypair: &Keypair, name: &str, @@ -265,7 +258,7 @@ fn load_program_and_advance_slot( let pubkey = load_program(bank_client, loader_id, payer_keypair, name); ( bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks, &Pubkey::default()) .expect("Failed to advance the slot"), pubkey, ) @@ -330,17 +323,28 @@ fn test_program_sbf_sanity() { println!("Test program: {:?}", program.0); let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let mut bank_client = BankClient::new(bank); + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let mut bank_client = BankClient::new_shared(bank); // Call user program let (_, program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, program.0, @@ -386,12 +390,12 @@ fn test_program_sbf_loader_deprecated() { .accounts .remove(&solana_sdk::feature_set::disable_deploy_of_alloc_free_syscall::id()) .unwrap(); - let bank = Bank::new_for_tests(&genesis_config); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let program_id = create_program(&bank, &bpf_loader_deprecated::id(), program); - let mut bank_client = BankClient::new(bank); + let mut bank_client = BankClient::new_shared(bank); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); let account_metas = vec![AccountMeta::new(mint_keypair.pubkey(), true)]; let instruction = Instruction::new_with_bytes(program_id, &[255], account_metas); @@ -409,11 +413,22 @@ fn test_sol_alloc_free_no_longer_deployable() { let program_address = program_keypair.pubkey(); let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(50); - let mut bank = Bank::new_for_tests(&genesis_config); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); // Populate loader account with elf that depends on _sol_alloc_free syscall let elf = load_program_from_file("solana_sbf_rust_deprecated_loader"); @@ -457,21 +472,41 @@ fn test_sol_alloc_free_no_longer_deployable() { ); // Enable _sol_alloc_free syscall + let slot = bank.slot(); + drop(bank); + let mut bank = Arc::into_inner(bank_forks.write().unwrap().remove(slot).unwrap()).unwrap(); bank.deactivate_feature(&solana_sdk::feature_set::disable_deploy_of_alloc_free_syscall::id()); bank.clear_signatures(); bank.clear_program_cache(); + let bank = bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler(); // Try and finalize the program now that sol_alloc_free is re-enabled assert!(bank.process_transaction(&finalize_tx).is_ok()); let new_slot = bank.slot() + 1; - let mut bank = Bank::new_from_parent(Arc::new(bank), &Pubkey::default(), new_slot); + let bank = bank_forks + .write() + .unwrap() + .insert(Bank::new_from_parent(bank, &Pubkey::default(), new_slot)) + .clone_without_scheduler(); // invoke the program assert!(bank.process_transaction(&invoke_tx).is_ok()); // disable _sol_alloc_free + let slot = bank.slot(); + drop(bank); + let mut bank = Arc::try_unwrap(bank_forks.write().unwrap().remove(slot).unwrap()).unwrap(); bank.activate_feature(&solana_sdk::feature_set::disable_deploy_of_alloc_free_syscall::id()); bank.clear_signatures(); + let bank = bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler(); // invoke should still succeed because cached assert!(bank.process_transaction(&invoke_tx).is_ok()); @@ -502,15 +537,26 @@ fn test_program_sbf_duplicate_accounts() { println!("Test program: {:?}", program); let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); let (bank, program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, program, @@ -606,14 +652,26 @@ fn test_program_sbf_error_handling() { println!("Test program: {:?}", program); let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let mut bank_client = BankClient::new(bank); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let mut bank_client = BankClient::new_shared(bank); let (_, program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, program, @@ -711,16 +769,27 @@ fn test_return_data_and_log_data_syscall() { for program in programs.iter() { let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); let (bank, program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, program, @@ -737,7 +806,7 @@ fn test_return_data_and_log_data_syscall() { let transaction = Transaction::new(&[&mint_keypair], message, blockhash); let sanitized_tx = SanitizedTransaction::from_transaction_for_tests(transaction); - let result = bank.simulate_transaction(sanitized_tx); + let result = bank.simulate_transaction(&sanitized_tx, false); assert!(result.result.is_ok()); @@ -779,12 +848,22 @@ fn test_program_sbf_invoke_sanity() { println!("Test program: {:?}", program); let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); let invoke_program_id = @@ -793,6 +872,7 @@ fn test_program_sbf_invoke_sanity() { load_program(&bank_client, &bpf_loader::id(), &mint_keypair, program.2); let (bank, noop_program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, program.3, @@ -1176,12 +1256,21 @@ fn test_program_sbf_invoke_sanity() { #[cfg(feature = "sbf_rust")] fn test_program_sbf_program_id_spoofing() { let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); let malicious_swap_pubkey = load_program( @@ -1192,6 +1281,7 @@ fn test_program_sbf_program_id_spoofing() { ); let (bank, malicious_system_pubkey) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_spoof1_system", @@ -1227,12 +1317,21 @@ fn test_program_sbf_program_id_spoofing() { #[cfg(feature = "sbf_rust")] fn test_program_sbf_caller_has_access_to_cpi_program() { let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); let caller_pubkey = load_program( @@ -1243,6 +1342,7 @@ fn test_program_sbf_caller_has_access_to_cpi_program() { ); let (_, caller2_pubkey) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_caller_access", @@ -1265,16 +1365,26 @@ fn test_program_sbf_ro_modify() { solana_logger::setup(); let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); let (bank, program_pubkey) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_ro_modify", @@ -1320,14 +1430,25 @@ fn test_program_sbf_call_depth() { solana_logger::setup(); let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let mut bank_client = BankClient::new(bank); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let mut bank_client = BankClient::new_shared(bank); let (_, program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_call_depth", @@ -1353,14 +1474,25 @@ fn test_program_sbf_compute_budget() { solana_logger::setup(); let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let mut bank_client = BankClient::new(bank); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let mut bank_client = BankClient::new_shared(bank); let (_, program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_noop", @@ -1480,16 +1612,26 @@ fn test_program_sbf_instruction_introspection() { solana_logger::setup(); let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(50_000); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); let (bank, program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_instruction_introspection", @@ -1538,12 +1680,22 @@ fn test_program_sbf_test_use_latest_executor() { solana_logger::setup(); let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let mut bank_client = BankClient::new(bank); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let mut bank_client = BankClient::new_shared(bank); let panic_id = load_program( &bank_client, &bpf_loader::id(), @@ -1570,7 +1722,7 @@ fn test_program_sbf_test_use_latest_executor() { ); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); assert!(bank_client .send_and_confirm_message(&[&mint_keypair, &program_keypair], message) @@ -1585,7 +1737,7 @@ fn test_program_sbf_test_use_latest_executor() { "solana_sbf_rust_noop", ); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); let message = Message::new(&[instruction], Some(&mint_keypair.pubkey())); bank_client @@ -1602,7 +1754,7 @@ fn test_program_sbf_test_use_latest_executor() { Some(&mint_keypair.pubkey()), ); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); assert!(bank_client .send_and_confirm_message(&[&mint_keypair], message) @@ -1619,8 +1771,8 @@ fn test_program_sbf_upgrade() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let mut bank_client = BankClient::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let mut bank_client = BankClient::new_shared(bank); // Deploy upgrade program let buffer_keypair = Keypair::new(); @@ -1636,7 +1788,7 @@ fn test_program_sbf_upgrade() { "solana_sbf_rust_upgradeable", ); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); let mut instruction = @@ -1664,7 +1816,7 @@ fn test_program_sbf_upgrade() { ..clock::Clock::default() }); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); // Call upgraded program @@ -1697,7 +1849,7 @@ fn test_program_sbf_upgrade() { ); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); // Call original program @@ -1932,8 +2084,7 @@ fn test_program_sbf_invoke_in_same_tx_as_deployment() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); // Deploy upgradeable program @@ -1989,7 +2140,7 @@ fn test_program_sbf_invoke_in_same_tx_as_deployment() { .unwrap(); let bank = bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance slot"); // Deployment is invisible to both top-level-instructions and CPI instructions @@ -2030,8 +2181,7 @@ fn test_program_sbf_invoke_in_same_tx_as_redeployment() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); // Deploy upgradeable program @@ -2082,10 +2232,13 @@ fn test_program_sbf_invoke_in_same_tx_as_redeployment() { ); // load_upgradeable_program sets clock sysvar to 1, which causes the program to be effective - // after 2 slots. So we need to advance the bank client by 2 slots here. + // after 2 slots. They need to be called individually to create the correct fork graph in between. + bank_client + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) + .unwrap(); let bank = bank_client - .advance_slot(2, &Pubkey::default()) - .expect("Failed to advance slot"); + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) + .unwrap(); // Prepare redeployment let buffer_keypair = Keypair::new(); @@ -2138,8 +2291,7 @@ fn test_program_sbf_invoke_in_same_tx_as_undeployment() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); // Deploy upgradeable program @@ -2179,10 +2331,13 @@ fn test_program_sbf_invoke_in_same_tx_as_undeployment() { ); // load_upgradeable_program sets clock sysvar to 1, which causes the program to be effective - // after 2 slots. So we need to advance the bank client by 2 slots here. + // after 2 slots. They need to be called individually to create the correct fork graph in between. + bank_client + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) + .unwrap(); let bank = bank_client - .advance_slot(2, &Pubkey::default()) - .expect("Failed to advance slot"); + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) + .unwrap(); // Prepare undeployment let (programdata_address, _) = Pubkey::find_program_address( @@ -2227,12 +2382,23 @@ fn test_program_sbf_invoke_upgradeable_via_cpi() { solana_logger::setup(); let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let mut bank_client = BankClient::new(bank); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let mut bank_client = BankClient::new_shared(bank); let invoke_and_return = load_program( &bank_client, &bpf_loader::id(), @@ -2255,7 +2421,7 @@ fn test_program_sbf_invoke_upgradeable_via_cpi() { ); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance slot"); let mut instruction = Instruction::new_with_bytes( @@ -2290,7 +2456,7 @@ fn test_program_sbf_invoke_upgradeable_via_cpi() { ..clock::Clock::default() }); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance slot"); // Call the upgraded program @@ -2323,7 +2489,7 @@ fn test_program_sbf_invoke_upgradeable_via_cpi() { ); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance slot"); // Call original program @@ -2356,7 +2522,6 @@ fn test_program_sbf_disguised_as_sbf_loader() { mint_keypair, .. } = create_genesis_config(50); - let mut bank = Bank::new_for_tests(&genesis_config); // disable native_programs_consume_cu feature to allow test program // not consume units. @@ -2366,7 +2531,10 @@ fn test_program_sbf_disguised_as_sbf_loader() { bank.deactivate_feature( &solana_sdk::feature_set::remove_bpf_loader_incorrect_program_id::id(), ); - let bank_client = BankClient::new(bank); + bank.deactivate_feature(&feature_set::disable_bpf_loader_instructions::id()); + bank.deactivate_feature(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + let bank = bank.wrap_with_bank_forks_for_tests().0; + let bank_client = BankClient::new_shared(bank); let program_id = load_program(&bank_client, &bpf_loader::id(), &mint_keypair, program); let account_metas = vec![AccountMeta::new_readonly(program_id, false)]; @@ -2385,15 +2553,27 @@ fn test_program_reads_from_program_account() { solana_logger::setup(); let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let mut bank_client = BankClient::new(bank); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let mut bank_client = BankClient::new_shared(bank); let (_, program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "read_program", @@ -2411,20 +2591,36 @@ fn test_program_sbf_c_dup() { solana_logger::setup(); let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let account_address = Pubkey::new_unique(); let account = AccountSharedData::new_data(42, &[1_u8, 2, 3], &system_program::id()).unwrap(); bank.store_account(&account_address, &account); - let mut bank_client = BankClient::new(bank); + let mut bank_client = BankClient::new_shared(bank); - let (_, program_id) = - load_program_and_advance_slot(&mut bank_client, &bpf_loader::id(), &mint_keypair, "ser"); + let (_, program_id) = load_program_and_advance_slot( + &mut bank_client, + bank_forks.as_ref(), + &bpf_loader::id(), + &mint_keypair, + "ser", + ); let account_metas = vec![ AccountMeta::new_readonly(account_address, false), AccountMeta::new_readonly(account_address, false), @@ -2441,12 +2637,23 @@ fn test_program_sbf_upgrade_via_cpi() { solana_logger::setup(); let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let mut bank_client = BankClient::new(bank); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let mut bank_client = BankClient::new_shared(bank); let invoke_and_return = load_program( &bank_client, &bpf_loader::id(), @@ -2468,7 +2675,7 @@ fn test_program_sbf_upgrade_via_cpi() { "solana_sbf_rust_upgradeable", ); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); let program_account = bank_client.get_account(&program_id).unwrap().unwrap(); let Ok(bpf_loader_upgradeable::UpgradeableLoaderState::Program { @@ -2526,7 +2733,7 @@ fn test_program_sbf_upgrade_via_cpi() { .unwrap(); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); // Call the upgraded program @@ -2551,12 +2758,23 @@ fn test_program_sbf_set_upgrade_authority_via_cpi() { solana_logger::setup(); let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let mut bank_client = BankClient::new(bank); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let mut bank_client = BankClient::new_shared(bank); // Deploy CPI invoker program let invoke_and_return = load_program( @@ -2581,7 +2799,7 @@ fn test_program_sbf_set_upgrade_authority_via_cpi() { ); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); // Set program upgrade authority instruction to invoke via CPI @@ -2648,8 +2866,7 @@ fn test_program_upgradeable_locks() { mint_keypair, .. } = create_genesis_config(2_000_000_000); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); load_upgradeable_program( @@ -2671,7 +2888,7 @@ fn test_program_upgradeable_locks() { ); let bank = bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); bank_client @@ -2767,16 +2984,27 @@ fn test_program_sbf_finalize() { solana_logger::setup(); let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); let (_, program_pubkey) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_finalize", @@ -2792,7 +3020,7 @@ fn test_program_sbf_finalize() { ); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); let account_metas = vec![ @@ -2815,16 +3043,27 @@ fn test_program_sbf_ro_account_modify() { solana_logger::setup(); let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); let (bank, program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_ro_account_modify", @@ -2877,10 +3116,21 @@ fn test_program_sbf_realloc() { const START_BALANCE: u64 = 100_000_000_000; let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(1_000_000_000_000); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + let mint_pubkey = mint_keypair.pubkey(); let signer = &[&mint_keypair]; for direct_mapping in [false, true] { @@ -2891,11 +3141,12 @@ fn test_program_sbf_realloc() { if !direct_mapping { feature_set.deactivate(&feature_set::bpf_account_data_direct_mapping::id()); } - let bank = Arc::new(bank); + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let mut bank_client = BankClient::new_shared(bank.clone()); let (bank, program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_realloc", @@ -3215,11 +3466,21 @@ fn test_program_sbf_realloc_invoke() { .. } = create_genesis_config(1_000_000_000_000); genesis_config.rent = Rent::default(); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + let mint_pubkey = mint_keypair.pubkey(); let signer = &[&mint_keypair]; - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); let realloc_program_id = load_program( @@ -3231,6 +3492,7 @@ fn test_program_sbf_realloc_invoke() { let (bank, realloc_invoke_program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_realloc_invoke", @@ -3730,12 +3992,22 @@ fn test_program_sbf_processed_inner_instruction() { solana_logger::setup(); let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); let sibling_program_id = load_program( @@ -3758,6 +4030,7 @@ fn test_program_sbf_processed_inner_instruction() { ); let (_, invoke_and_return_program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_invoke_and_return", @@ -3810,16 +4083,28 @@ fn test_program_fees() { mint_keypair, .. } = create_genesis_config(500_000_000); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + genesis_config.fee_rate_governor = FeeRateGovernor::new(congestion_multiplier, 0); let mut bank = Bank::new_for_tests(&genesis_config); let fee_structure = FeeStructure::new(0.000005, 0.0, vec![(200, 0.0000005), (1400000, 0.000005)]); bank.fee_structure = fee_structure.clone(); - bank.feature_set = Arc::new(FeatureSet::all_enabled()); - let mut bank_client = BankClient::new(bank); + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); + let mut bank_client = BankClient::new_shared(bank); let (_, program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_noop", @@ -3831,20 +4116,13 @@ fn test_program_fees() { Some(&mint_keypair.pubkey()), ); - let mut feature_set = FeatureSet::all_enabled(); - feature_set.deactivate(&remove_deprecated_request_unit_ix::id()); - let sanitized_message = SanitizedMessage::try_from(message.clone()).unwrap(); let expected_normal_fee = fee_structure.calculate_fee( &sanitized_message, congestion_multiplier, - &process_compute_budget_instructions( - sanitized_message.program_instructions_iter(), - &feature_set, - ) - .unwrap_or_default() - .into(), - true, + &process_compute_budget_instructions(sanitized_message.program_instructions_iter()) + .unwrap_or_default() + .into(), false, ); bank_client @@ -3862,18 +4140,12 @@ fn test_program_fees() { Some(&mint_keypair.pubkey()), ); let sanitized_message = SanitizedMessage::try_from(message.clone()).unwrap(); - let mut feature_set = FeatureSet::all_enabled(); - feature_set.deactivate(&remove_deprecated_request_unit_ix::id()); let expected_prioritized_fee = fee_structure.calculate_fee( &sanitized_message, congestion_multiplier, - &process_compute_budget_instructions( - sanitized_message.program_instructions_iter(), - &feature_set, - ) - .unwrap_or_default() - .into(), - true, + &process_compute_budget_instructions(sanitized_message.program_instructions_iter()) + .unwrap_or_default() + .into(), false, ); assert!(expected_normal_fee < expected_prioritized_fee); @@ -3889,17 +4161,28 @@ fn test_program_fees() { #[cfg(feature = "sbf_rust")] fn test_get_minimum_delegation() { let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(100_123_456_789); - let mut bank = Bank::new_for_tests(&genesis_config); - bank.feature_set = Arc::new(FeatureSet::all_enabled()); - let bank = Arc::new(bank); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + + let bank = Bank::new_for_tests(&genesis_config); + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let mut bank_client = BankClient::new_shared(bank.clone()); let (_, program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_get_minimum_delegation", @@ -3921,7 +4204,7 @@ fn test_program_sbf_inner_instruction_alignment_checks() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let noop = create_program(&bank, &bpf_loader_deprecated::id(), "solana_sbf_rust_noop"); let inner_instruction_alignment_check = create_program( &bank, @@ -3931,9 +4214,9 @@ fn test_program_sbf_inner_instruction_alignment_checks() { // invoke unaligned program, which will call aligned program twice, // unaligned should be allowed once invoke completes - let mut bank_client = BankClient::new(bank); + let mut bank_client = BankClient::new_shared(bank); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); let mut instruction = Instruction::new_with_bytes( inner_instruction_alignment_check, @@ -3960,13 +4243,18 @@ fn test_cpi_account_ownership_writability() { mint_keypair, .. } = create_genesis_config(100_123_456_789); + let mut bank = Bank::new_for_tests(&genesis_config); let mut feature_set = FeatureSet::all_enabled(); if !direct_mapping { feature_set.deactivate(&feature_set::bpf_account_data_direct_mapping::id()); } + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + feature_set.deactivate(&feature_set::disable_bpf_loader_instructions::id()); + feature_set.deactivate(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); bank.feature_set = Arc::new(feature_set); - let bank = Arc::new(bank); + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let mut bank_client = BankClient::new_shared(bank); let invoke_program_id = load_program( @@ -3985,6 +4273,7 @@ fn test_cpi_account_ownership_writability() { let (bank, realloc_program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_realloc", @@ -4146,8 +4435,13 @@ fn test_cpi_account_data_updates() { if !direct_mapping { feature_set.deactivate(&feature_set::bpf_account_data_direct_mapping::id()); } + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + feature_set.deactivate(&feature_set::disable_bpf_loader_instructions::id()); + feature_set.deactivate(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + bank.feature_set = Arc::new(feature_set); - let bank = Arc::new(bank); + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let mut bank_client = BankClient::new_shared(bank); let invoke_program_id = load_program( @@ -4159,6 +4453,7 @@ fn test_cpi_account_data_updates() { let (bank, realloc_program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_realloc", @@ -4288,13 +4583,20 @@ fn test_cpi_deprecated_loader_realloc() { mint_keypair, .. } = create_genesis_config(100_123_456_789); + let mut bank = Bank::new_for_tests(&genesis_config); let mut feature_set = FeatureSet::all_enabled(); if !direct_mapping { feature_set.deactivate(&feature_set::bpf_account_data_direct_mapping::id()); } + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + feature_set.deactivate(&feature_set::disable_bpf_loader_instructions::id()); + feature_set.deactivate(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + bank.feature_set = Arc::new(feature_set); - let bank = Arc::new(bank); + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let deprecated_program_id = create_program( &bank, @@ -4306,6 +4608,7 @@ fn test_cpi_deprecated_loader_realloc() { let (bank, invoke_program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_invoke", @@ -4396,13 +4699,22 @@ fn test_cpi_change_account_data_memory_allocation() { solana_logger::setup(); let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(100_123_456_789); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + let mut bank = Bank::new_for_tests(&genesis_config); - let feature_set = FeatureSet::all_enabled(); - bank.feature_set = Arc::new(feature_set); declare_process_instruction!(MockBuiltin, 42, |invoke_context| { let transaction_context = &invoke_context.transaction_context; @@ -4420,12 +4732,18 @@ fn test_cpi_change_account_data_memory_allocation() { // Test changing the account data both in place and by changing the // underlying vector. CPI will have to detect the vector change and - // update the corresponding memory region. In both cases CPI will have + // update the corresponding memory region. In all cases CPI will have // to zero the spare bytes correctly. - if instruction_data[0] == 0xFE { - account.set_data(instruction_data.to_vec()); - } else { - account.set_data_from_slice(instruction_data); + match instruction_data[0] { + 0xFE => account.set_data(instruction_data.to_vec()), + 0xFD => account.set_data_from_slice(instruction_data), + 0xFC => { + // Exercise the update_caller_account capacity check where account len != capacity. + let mut data = instruction_data.to_vec(); + data.reserve_exact(1); + account.set_data(data) + } + _ => panic!(), } Ok(()) @@ -4438,11 +4756,12 @@ fn test_cpi_change_account_data_memory_allocation() { LoadedProgram::new_builtin(0, 42, MockBuiltin::vm), ); - let bank = Arc::new(bank); + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let mut bank_client = BankClient::new_shared(bank); let (bank, invoke_program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_invoke", @@ -4475,14 +4794,23 @@ fn test_cpi_invalid_account_info_pointers() { solana_logger::setup(); let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(100_123_456_789); - let mut bank = Bank::new_for_tests(&genesis_config); - let feature_set = FeatureSet::all_enabled(); - bank.feature_set = Arc::new(feature_set); - let bank = Arc::new(bank); + + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + + let bank = Bank::new_for_tests(&genesis_config); + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let mut bank_client = BankClient::new_shared(bank); let c_invoke_program_id = @@ -4490,6 +4818,7 @@ fn test_cpi_invalid_account_info_pointers() { let (bank, invoke_program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_invoke", @@ -4537,11 +4866,21 @@ fn test_deny_executable_write() { solana_logger::setup(); let GenesisConfigInfo { - genesis_config, + mut genesis_config, mint_keypair, .. } = create_genesis_config(100_123_456_789); + // deactivate `disable_bpf_loader_instructions` feature so that the program + // can be loaded, finalized and tested. + genesis_config + .accounts + .remove(&feature_set::disable_bpf_loader_instructions::id()); + + genesis_config + .accounts + .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); + for direct_mapping in [false, true] { let mut bank = Bank::new_for_tests(&genesis_config); let feature_set = Arc::make_mut(&mut bank.feature_set); @@ -4550,11 +4889,12 @@ fn test_deny_executable_write() { if !direct_mapping { feature_set.deactivate(&feature_set::bpf_account_data_direct_mapping::id()); } - let bank = Arc::new(bank); + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let mut bank_client = BankClient::new_shared(bank); let (_bank, invoke_program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_invoke", diff --git a/programs/stake/src/stake_instruction.rs b/programs/stake/src/stake_instruction.rs index cd0b59e82534ae..e0f20d601437b1 100644 --- a/programs/stake/src/stake_instruction.rs +++ b/programs/stake/src/stake_instruction.rs @@ -12,7 +12,6 @@ use { declare_process_instruction, sysvar_cache::get_sysvar_with_account_check, }, solana_sdk::{ - clock::Clock, feature_set, instruction::InstructionError, program_utils::limited_deserialize, @@ -74,82 +73,53 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| Ok(StakeInstruction::Initialize(authorized, lockup)) => { let mut me = get_stake_account()?; let rent = get_sysvar_with_account_check::rent(invoke_context, instruction_context, 1)?; - initialize(&mut me, &authorized, &lockup, &rent) + initialize( + &mut me, + &authorized, + &lockup, + &rent, + &invoke_context.feature_set, + ) } Ok(StakeInstruction::Authorize(authorized_pubkey, stake_authorize)) => { let mut me = get_stake_account()?; - let require_custodian_for_locked_stake_authorize = invoke_context - .feature_set - .is_active(&feature_set::require_custodian_for_locked_stake_authorize::id()); - - if require_custodian_for_locked_stake_authorize { - let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; - instruction_context.check_number_of_instruction_accounts(3)?; - let custodian_pubkey = - get_optional_pubkey(transaction_context, instruction_context, 3, false)?; + let clock = + get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; + instruction_context.check_number_of_instruction_accounts(3)?; + let custodian_pubkey = + get_optional_pubkey(transaction_context, instruction_context, 3, false)?; - authorize( - &mut me, - &signers, - &authorized_pubkey, - stake_authorize, - require_custodian_for_locked_stake_authorize, - &clock, - custodian_pubkey, - ) - } else { - authorize( - &mut me, - &signers, - &authorized_pubkey, - stake_authorize, - require_custodian_for_locked_stake_authorize, - &Clock::default(), - None, - ) - } + authorize( + &mut me, + &signers, + &authorized_pubkey, + stake_authorize, + &clock, + custodian_pubkey, + &invoke_context.feature_set, + ) } Ok(StakeInstruction::AuthorizeWithSeed(args)) => { let mut me = get_stake_account()?; instruction_context.check_number_of_instruction_accounts(2)?; - let require_custodian_for_locked_stake_authorize = invoke_context - .feature_set - .is_active(&feature_set::require_custodian_for_locked_stake_authorize::id()); - if require_custodian_for_locked_stake_authorize { - let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; - let custodian_pubkey = - get_optional_pubkey(transaction_context, instruction_context, 3, false)?; - - authorize_with_seed( - transaction_context, - instruction_context, - &mut me, - 1, - &args.authority_seed, - &args.authority_owner, - &args.new_authorized_pubkey, - args.stake_authorize, - require_custodian_for_locked_stake_authorize, - &clock, - custodian_pubkey, - ) - } else { - authorize_with_seed( - transaction_context, - instruction_context, - &mut me, - 1, - &args.authority_seed, - &args.authority_owner, - &args.new_authorized_pubkey, - args.stake_authorize, - require_custodian_for_locked_stake_authorize, - &Clock::default(), - None, - ) - } + let clock = + get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; + let custodian_pubkey = + get_optional_pubkey(transaction_context, instruction_context, 3, false)?; + + authorize_with_seed( + transaction_context, + instruction_context, + &mut me, + 1, + &args.authority_seed, + &args.authority_owner, + &args.new_authorized_pubkey, + args.stake_authorize, + &clock, + custodian_pubkey, + &invoke_context.feature_set, + ) } Ok(StakeInstruction::DelegateStake) => { let me = get_stake_account()?; @@ -251,6 +221,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| None }, new_warmup_cooldown_rate_epoch(invoke_context), + &invoke_context.feature_set, ) } Ok(StakeInstruction::Deactivate) => { @@ -262,123 +233,112 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| Ok(StakeInstruction::SetLockup(lockup)) => { let mut me = get_stake_account()?; let clock = invoke_context.get_sysvar_cache().get_clock()?; - set_lockup(&mut me, &lockup, &signers, &clock) + set_lockup( + &mut me, + &lockup, + &signers, + &clock, + &invoke_context.feature_set, + ) } Ok(StakeInstruction::InitializeChecked) => { let mut me = get_stake_account()?; - if invoke_context - .feature_set - .is_active(&feature_set::vote_stake_checked_instructions::id()) - { - instruction_context.check_number_of_instruction_accounts(4)?; - let staker_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(2)?, - )?; - let withdrawer_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(3)?, - )?; - if !instruction_context.is_instruction_account_signer(3)? { - return Err(InstructionError::MissingRequiredSignature); - } + instruction_context.check_number_of_instruction_accounts(4)?; + let staker_pubkey = transaction_context.get_key_of_account_at_index( + instruction_context.get_index_of_instruction_account_in_transaction(2)?, + )?; + let withdrawer_pubkey = transaction_context.get_key_of_account_at_index( + instruction_context.get_index_of_instruction_account_in_transaction(3)?, + )?; + if !instruction_context.is_instruction_account_signer(3)? { + return Err(InstructionError::MissingRequiredSignature); + } - let authorized = Authorized { - staker: *staker_pubkey, - withdrawer: *withdrawer_pubkey, - }; + let authorized = Authorized { + staker: *staker_pubkey, + withdrawer: *withdrawer_pubkey, + }; - let rent = - get_sysvar_with_account_check::rent(invoke_context, instruction_context, 1)?; - initialize(&mut me, &authorized, &Lockup::default(), &rent) - } else { - Err(InstructionError::InvalidInstructionData) - } + let rent = get_sysvar_with_account_check::rent(invoke_context, instruction_context, 1)?; + initialize( + &mut me, + &authorized, + &Lockup::default(), + &rent, + &invoke_context.feature_set, + ) } Ok(StakeInstruction::AuthorizeChecked(stake_authorize)) => { let mut me = get_stake_account()?; - if invoke_context - .feature_set - .is_active(&feature_set::vote_stake_checked_instructions::id()) - { - let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; - instruction_context.check_number_of_instruction_accounts(4)?; - let authorized_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(3)?, - )?; - if !instruction_context.is_instruction_account_signer(3)? { - return Err(InstructionError::MissingRequiredSignature); - } - let custodian_pubkey = - get_optional_pubkey(transaction_context, instruction_context, 4, false)?; - - authorize( - &mut me, - &signers, - authorized_pubkey, - stake_authorize, - true, - &clock, - custodian_pubkey, - ) - } else { - Err(InstructionError::InvalidInstructionData) + let clock = + get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; + instruction_context.check_number_of_instruction_accounts(4)?; + let authorized_pubkey = transaction_context.get_key_of_account_at_index( + instruction_context.get_index_of_instruction_account_in_transaction(3)?, + )?; + if !instruction_context.is_instruction_account_signer(3)? { + return Err(InstructionError::MissingRequiredSignature); } + let custodian_pubkey = + get_optional_pubkey(transaction_context, instruction_context, 4, false)?; + + authorize( + &mut me, + &signers, + authorized_pubkey, + stake_authorize, + &clock, + custodian_pubkey, + &invoke_context.feature_set, + ) } Ok(StakeInstruction::AuthorizeCheckedWithSeed(args)) => { let mut me = get_stake_account()?; - if invoke_context - .feature_set - .is_active(&feature_set::vote_stake_checked_instructions::id()) - { - instruction_context.check_number_of_instruction_accounts(2)?; - let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; - instruction_context.check_number_of_instruction_accounts(4)?; - let authorized_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(3)?, - )?; - if !instruction_context.is_instruction_account_signer(3)? { - return Err(InstructionError::MissingRequiredSignature); - } - let custodian_pubkey = - get_optional_pubkey(transaction_context, instruction_context, 4, false)?; - - authorize_with_seed( - transaction_context, - instruction_context, - &mut me, - 1, - &args.authority_seed, - &args.authority_owner, - authorized_pubkey, - args.stake_authorize, - true, - &clock, - custodian_pubkey, - ) - } else { - Err(InstructionError::InvalidInstructionData) + instruction_context.check_number_of_instruction_accounts(2)?; + let clock = + get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; + instruction_context.check_number_of_instruction_accounts(4)?; + let authorized_pubkey = transaction_context.get_key_of_account_at_index( + instruction_context.get_index_of_instruction_account_in_transaction(3)?, + )?; + if !instruction_context.is_instruction_account_signer(3)? { + return Err(InstructionError::MissingRequiredSignature); } + let custodian_pubkey = + get_optional_pubkey(transaction_context, instruction_context, 4, false)?; + + authorize_with_seed( + transaction_context, + instruction_context, + &mut me, + 1, + &args.authority_seed, + &args.authority_owner, + authorized_pubkey, + args.stake_authorize, + &clock, + custodian_pubkey, + &invoke_context.feature_set, + ) } Ok(StakeInstruction::SetLockupChecked(lockup_checked)) => { let mut me = get_stake_account()?; - if invoke_context - .feature_set - .is_active(&feature_set::vote_stake_checked_instructions::id()) - { - let custodian_pubkey = - get_optional_pubkey(transaction_context, instruction_context, 2, true)?; + let custodian_pubkey = + get_optional_pubkey(transaction_context, instruction_context, 2, true)?; - let lockup = LockupArgs { - unix_timestamp: lockup_checked.unix_timestamp, - epoch: lockup_checked.epoch, - custodian: custodian_pubkey.cloned(), - }; - let clock = invoke_context.get_sysvar_cache().get_clock()?; - set_lockup(&mut me, &lockup, &signers, &clock) - } else { - Err(InstructionError::InvalidInstructionData) - } + let lockup = LockupArgs { + unix_timestamp: lockup_checked.unix_timestamp, + epoch: lockup_checked.epoch, + custodian: custodian_pubkey.cloned(), + }; + let clock = invoke_context.get_sysvar_cache().get_clock()?; + set_lockup( + &mut me, + &lockup, + &signers, + &clock, + &invoke_context.feature_set, + ) } Ok(StakeInstruction::GetMinimumDelegation) => { let feature_set = invoke_context.feature_set.as_ref(); @@ -460,7 +420,7 @@ mod tests { WritableAccount, }, account_utils::StateMut, - clock::{Epoch, UnixTimestamp}, + clock::{Clock, Epoch, UnixTimestamp}, epoch_schedule::EpochSchedule, feature_set::FeatureSet, instruction::{AccountMeta, Instruction}, @@ -635,7 +595,7 @@ mod tests { if let StakeStateV2::Stake(_meta, stake, _stake_flags) = account.state().unwrap() { let stake_status = stake.delegation.stake_activating_and_deactivating( clock.epoch, - Some(stake_history), + stake_history, None, ); active_stake += stake_status.effective; @@ -6886,15 +6846,11 @@ mod tests { create_account_shared_data_for_test(&stake_history), ); if stake_amount - == stake.stake( - clock.epoch, - Some(&stake_history), - new_warmup_cooldown_rate_epoch, - ) + == stake.stake(clock.epoch, &stake_history, new_warmup_cooldown_rate_epoch) && merge_from_amount == merge_from_stake.stake( clock.epoch, - Some(&stake_history), + &stake_history, new_warmup_cooldown_rate_epoch, ) { @@ -6975,14 +6931,10 @@ mod tests { stake_history::id(), create_account_shared_data_for_test(&stake_history), ); - if 0 == stake.stake( - clock.epoch, - Some(&stake_history), - new_warmup_cooldown_rate_epoch, - ) && 0 - == merge_from_stake.stake( + if 0 == stake.stake(clock.epoch, &stake_history, new_warmup_cooldown_rate_epoch) + && 0 == merge_from_stake.stake( clock.epoch, - Some(&stake_history), + &stake_history, new_warmup_cooldown_rate_epoch, ) { @@ -7428,11 +7380,7 @@ mod tests { initial_stake_state .delegation() .unwrap() - .stake_activating_and_deactivating( - current_epoch, - Some(&stake_history), - None - ) + .stake_activating_and_deactivating(current_epoch, &stake_history, None) ); } @@ -7928,7 +7876,7 @@ mod tests { }, stake.delegation.stake_activating_and_deactivating( current_epoch, - Some(&stake_history), + &stake_history, None ) ); diff --git a/programs/stake/src/stake_state.rs b/programs/stake/src/stake_state.rs index c533728b11f24c..17232d083f06ec 100644 --- a/programs/stake/src/stake_state.rs +++ b/programs/stake/src/stake_state.rs @@ -14,7 +14,7 @@ use { account::{AccountSharedData, ReadableAccount, WritableAccount}, account_utils::StateMut, clock::{Clock, Epoch}, - feature_set::{self, stake_merge_with_unmatched_credits_observed, FeatureSet}, + feature_set::{self, FeatureSet}, instruction::{checked_add, InstructionError}, pubkey::Pubkey, rent::Rent, @@ -111,7 +111,7 @@ fn get_stake_status( let stake_history = invoke_context.get_sysvar_cache().get_stake_history()?; Ok(stake.delegation.stake_activating_and_deactivating( clock.epoch, - Some(&stake_history), + &stake_history, new_warmup_cooldown_rate_epoch(invoke_context), )) } @@ -127,7 +127,7 @@ fn redelegate_stake( ) -> Result<(), StakeError> { let new_rate_activation_epoch = new_warmup_cooldown_rate_epoch(invoke_context); // If stake is currently active: - if stake.stake(clock.epoch, Some(stake_history), new_rate_activation_epoch) != 0 { + if stake.stake(clock.epoch, stake_history, new_rate_activation_epoch) != 0 { let stake_lamports_ok = if invoke_context .feature_set .is_active(&feature_set::stake_redelegate_instruction::id()) @@ -194,7 +194,7 @@ fn redeem_stake_rewards( stake: &mut Stake, point_value: &PointValue, vote_state: &VoteState, - stake_history: Option<&StakeHistory>, + stake_history: &StakeHistory, inflation_point_calc_tracer: Option, new_rate_activation_epoch: Option, ) -> Option<(u64, u64)> { @@ -232,7 +232,7 @@ fn redeem_stake_rewards( fn calculate_stake_points( stake: &Stake, vote_state: &VoteState, - stake_history: Option<&StakeHistory>, + stake_history: &StakeHistory, inflation_point_calc_tracer: Option, new_rate_activation_epoch: Option, ) -> u128 { @@ -259,7 +259,7 @@ struct CalculatedStakePoints { fn calculate_stake_points_and_credits( stake: &Stake, new_vote_state: &VoteState, - stake_history: Option<&StakeHistory>, + stake_history: &StakeHistory, inflation_point_calc_tracer: Option, new_rate_activation_epoch: Option, ) -> CalculatedStakePoints { @@ -378,7 +378,7 @@ fn calculate_stake_rewards( stake: &Stake, point_value: &PointValue, vote_state: &VoteState, - stake_history: Option<&StakeHistory>, + stake_history: &StakeHistory, inflation_point_calc_tracer: Option, new_rate_activation_epoch: Option, ) -> Option { @@ -478,18 +478,23 @@ pub fn initialize( authorized: &Authorized, lockup: &Lockup, rent: &Rent, + feature_set: &FeatureSet, ) -> Result<(), InstructionError> { if stake_account.get_data().len() != StakeStateV2::size_of() { return Err(InstructionError::InvalidAccountData); } + if let StakeStateV2::Uninitialized = stake_account.get_state()? { let rent_exempt_reserve = rent.minimum_balance(stake_account.get_data().len()); if stake_account.get_lamports() >= rent_exempt_reserve { - stake_account.set_state(&StakeStateV2::Initialized(Meta { - rent_exempt_reserve, - authorized: *authorized, - lockup: *lockup, - })) + stake_account.set_state( + &StakeStateV2::Initialized(Meta { + rent_exempt_reserve, + authorized: *authorized, + lockup: *lockup, + }), + feature_set, + ) } else { Err(InstructionError::InsufficientFunds) } @@ -506,9 +511,9 @@ pub fn authorize( signers: &HashSet, new_authority: &Pubkey, stake_authorize: StakeAuthorize, - require_custodian_for_locked_stake_authorize: bool, clock: &Clock, custodian: Option<&Pubkey>, + feature_set: &FeatureSet, ) -> Result<(), InstructionError> { match stake_account.get_state()? { StakeStateV2::Stake(mut meta, stake, stake_flags) => { @@ -516,26 +521,18 @@ pub fn authorize( signers, new_authority, stake_authorize, - if require_custodian_for_locked_stake_authorize { - Some((&meta.lockup, clock, custodian)) - } else { - None - }, + Some((&meta.lockup, clock, custodian)), )?; - stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) + stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags), feature_set) } StakeStateV2::Initialized(mut meta) => { meta.authorized.authorize( signers, new_authority, stake_authorize, - if require_custodian_for_locked_stake_authorize { - Some((&meta.lockup, clock, custodian)) - } else { - None - }, + Some((&meta.lockup, clock, custodian)), )?; - stake_account.set_state(&StakeStateV2::Initialized(meta)) + stake_account.set_state(&StakeStateV2::Initialized(meta), feature_set) } _ => Err(InstructionError::InvalidAccountData), } @@ -551,9 +548,9 @@ pub fn authorize_with_seed( authority_owner: &Pubkey, new_authority: &Pubkey, stake_authorize: StakeAuthorize, - require_custodian_for_locked_stake_authorize: bool, clock: &Clock, custodian: Option<&Pubkey>, + feature_set: &FeatureSet, ) -> Result<(), InstructionError> { let mut signers = HashSet::default(); if instruction_context.is_instruction_account_signer(authority_base_index)? { @@ -572,9 +569,9 @@ pub fn authorize_with_seed( &signers, new_authority, stake_authorize, - require_custodian_for_locked_stake_authorize, clock, custodian, + feature_set, ) } @@ -612,7 +609,10 @@ pub fn delegate( &vote_state?.convert_to_current(), clock.epoch, ); - stake_account.set_state(&StakeStateV2::Stake(meta, stake, StakeFlags::empty())) + stake_account.set_state( + &StakeStateV2::Stake(meta, stake, StakeFlags::empty()), + feature_set, + ) } StakeStateV2::Stake(meta, mut stake, stake_flags) => { meta.authorized.check(signers, StakeAuthorize::Staker)?; @@ -627,7 +627,7 @@ pub fn delegate( clock, stake_history, )?; - stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) + stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags), feature_set) } _ => Err(InstructionError::InvalidAccountData), } @@ -649,7 +649,7 @@ fn deactivate_stake( // deactivation is only permitted when the stake delegation activating amount is zero. let status = stake.delegation.stake_activating_and_deactivating( epoch, - Some(stake_history.as_ref()), + &stake_history, new_warmup_cooldown_rate_epoch(invoke_context), ); if status.activating != 0 { @@ -683,7 +683,10 @@ pub fn deactivate( if let StakeStateV2::Stake(meta, mut stake, mut stake_flags) = stake_account.get_state()? { meta.authorized.check(signers, StakeAuthorize::Staker)?; deactivate_stake(invoke_context, &mut stake, &mut stake_flags, clock.epoch)?; - stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) + stake_account.set_state( + &StakeStateV2::Stake(meta, stake, stake_flags), + &invoke_context.feature_set, + ) } else { Err(InstructionError::InvalidAccountData) } @@ -694,15 +697,16 @@ pub fn set_lockup( lockup: &LockupArgs, signers: &HashSet, clock: &Clock, + feature_set: &FeatureSet, ) -> Result<(), InstructionError> { match stake_account.get_state()? { StakeStateV2::Initialized(mut meta) => { meta.set_lockup(lockup, signers, clock)?; - stake_account.set_state(&StakeStateV2::Initialized(meta)) + stake_account.set_state(&StakeStateV2::Initialized(meta), feature_set) } StakeStateV2::Stake(mut meta, stake, stake_flags) => { meta.set_lockup(lockup, signers, clock)?; - stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) + stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags), feature_set) } _ => Err(InstructionError::InvalidAccountData), } @@ -811,11 +815,17 @@ pub fn split( let mut stake_account = instruction_context .try_borrow_instruction_account(transaction_context, stake_account_index)?; - stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags))?; + stake_account.set_state( + &StakeStateV2::Stake(meta, stake, stake_flags), + &invoke_context.feature_set, + )?; drop(stake_account); let mut split = instruction_context .try_borrow_instruction_account(transaction_context, split_index)?; - split.set_state(&StakeStateV2::Stake(split_meta, split_stake, stake_flags))?; + split.set_state( + &StakeStateV2::Stake(split_meta, split_stake, stake_flags), + &invoke_context.feature_set, + )?; } StakeStateV2::Initialized(meta) => { meta.authorized.check(signers, StakeAuthorize::Staker)?; @@ -834,7 +844,10 @@ pub fn split( split_meta.rent_exempt_reserve = validated_split_info.destination_rent_exempt_reserve; let mut split = instruction_context .try_borrow_instruction_account(transaction_context, split_index)?; - split.set_state(&StakeStateV2::Initialized(split_meta))?; + split.set_state( + &StakeStateV2::Initialized(split_meta), + &invoke_context.feature_set, + )?; } StakeStateV2::Uninitialized => { let stake_pubkey = transaction_context.get_key_of_account_at_index( @@ -852,17 +865,17 @@ pub fn split( let mut stake_account = instruction_context .try_borrow_instruction_account(transaction_context, stake_account_index)?; if lamports == stake_account.get_lamports() { - stake_account.set_state(&StakeStateV2::Uninitialized)?; + stake_account.set_state(&StakeStateV2::Uninitialized, &invoke_context.feature_set)?; } drop(stake_account); let mut split = instruction_context.try_borrow_instruction_account(transaction_context, split_index)?; - split.checked_add_lamports(lamports)?; + split.checked_add_lamports(lamports, &invoke_context.feature_set)?; drop(split); let mut stake_account = instruction_context .try_borrow_instruction_account(transaction_context, stake_account_index)?; - stake_account.checked_sub_lamports(lamports)?; + stake_account.checked_sub_lamports(lamports, &invoke_context.feature_set)?; Ok(()) } @@ -918,16 +931,16 @@ pub fn merge( ic_msg!(invoke_context, "Merging stake accounts"); if let Some(merged_state) = stake_merge_kind.merge(invoke_context, source_merge_kind, clock)? { - stake_account.set_state(&merged_state)?; + stake_account.set_state(&merged_state, &invoke_context.feature_set)?; } // Source is about to be drained, deinitialize its state - source_account.set_state(&StakeStateV2::Uninitialized)?; + source_account.set_state(&StakeStateV2::Uninitialized, &invoke_context.feature_set)?; // Drain the source stake account let lamports = source_account.get_lamports(); - source_account.checked_sub_lamports(lamports)?; - stake_account.checked_add_lamports(lamports)?; + source_account.checked_sub_lamports(lamports, &invoke_context.feature_set)?; + stake_account.checked_add_lamports(lamports, &invoke_context.feature_set)?; Ok(()) } @@ -1019,8 +1032,9 @@ pub fn redelegate( deactivate(invoke_context, stake_account, &clock, signers)?; // transfer the effective stake to the uninitialized stake account - stake_account.checked_sub_lamports(effective_stake)?; - uninitialized_stake_account.checked_add_lamports(effective_stake)?; + stake_account.checked_sub_lamports(effective_stake, &invoke_context.feature_set)?; + uninitialized_stake_account + .checked_add_lamports(effective_stake, &invoke_context.feature_set)?; // initialize and schedule `uninitialized_stake_account` for activation let sysvar_cache = invoke_context.get_sysvar_cache(); @@ -1034,16 +1048,19 @@ pub fn redelegate( &uninitialized_stake_meta, &invoke_context.feature_set, )?; - uninitialized_stake_account.set_state(&StakeStateV2::Stake( - uninitialized_stake_meta, - new_stake( - stake_amount, - &vote_pubkey, - &vote_state.convert_to_current(), - clock.epoch, + uninitialized_stake_account.set_state( + &StakeStateV2::Stake( + uninitialized_stake_meta, + new_stake( + stake_amount, + &vote_pubkey, + &vote_state.convert_to_current(), + clock.epoch, + ), + StakeFlags::MUST_FULLY_ACTIVATE_BEFORE_DEACTIVATION_IS_PERMITTED, ), - StakeFlags::MUST_FULLY_ACTIVATE_BEFORE_DEACTIVATION_IS_PERMITTED, - ))?; + &invoke_context.feature_set, + )?; Ok(()) } @@ -1060,6 +1077,7 @@ pub fn withdraw( withdraw_authority_index: IndexOfAccount, custodian_index: Option, new_rate_activation_epoch: Option, + feature_set: &FeatureSet, ) -> Result<(), InstructionError> { let withdraw_authority_pubkey = transaction_context.get_key_of_account_at_index( instruction_context @@ -1081,7 +1099,7 @@ pub fn withdraw( let staked = if clock.epoch >= stake.delegation.deactivation_epoch { stake .delegation - .stake(clock.epoch, Some(stake_history), new_rate_activation_epoch) + .stake(clock.epoch, stake_history, new_rate_activation_epoch) } else { // Assume full stake if the stake account hasn't been // de-activated, because in the future the exposed stake @@ -1144,14 +1162,14 @@ pub fn withdraw( // Deinitialize state upon zero balance if lamports == stake_account.get_lamports() { - stake_account.set_state(&StakeStateV2::Uninitialized)?; + stake_account.set_state(&StakeStateV2::Uninitialized, feature_set)?; } - stake_account.checked_sub_lamports(lamports)?; + stake_account.checked_sub_lamports(lamports, feature_set)?; drop(stake_account); let mut to = instruction_context.try_borrow_instruction_account(transaction_context, to_index)?; - to.checked_add_lamports(lamports)?; + to.checked_add_lamports(lamports, feature_set)?; Ok(()) } @@ -1199,7 +1217,10 @@ pub(crate) fn deactivate_delinquent( // voted in the last `MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION` if eligible_for_deactivate_delinquent(&delinquent_vote_state.epoch_credits, current_epoch) { deactivate_stake(invoke_context, &mut stake, &mut stake_flags, current_epoch)?; - stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) + stake_account.set_state( + &StakeStateV2::Stake(meta, stake, stake_flags), + &invoke_context.feature_set, + ) } else { Err(StakeError::MinimumDelinquentEpochsForDeactivationNotMet.into()) } @@ -1367,7 +1388,7 @@ impl MergeKind { // activating or deactivating with non-zero effective stake. let status = stake.delegation.stake_activating_and_deactivating( clock.epoch, - Some(stake_history), + stake_history, new_warmup_cooldown_rate_epoch(invoke_context), ); @@ -1428,29 +1449,6 @@ impl MergeKind { } } - // Remove this when the `stake_merge_with_unmatched_credits_observed` feature is removed - fn active_stakes_can_merge( - invoke_context: &InvokeContext, - stake: &Stake, - source: &Stake, - ) -> Result<(), InstructionError> { - Self::active_delegations_can_merge(invoke_context, &stake.delegation, &source.delegation)?; - // `credits_observed` MUST match to prevent earning multiple rewards - // from a stake account by merging it into another stake account that - // is small enough to not be paid out every epoch. This would effectively - // reset the larger stake accounts `credits_observed` to that of the - // smaller account. - if stake.credits_observed == source.credits_observed { - Ok(()) - } else { - ic_msg!( - invoke_context, - "Unable to merge due to credits observed mismatch" - ); - Err(StakeError::MergeMismatch.into()) - } - } - fn merge( self, invoke_context: &InvokeContext, @@ -1461,18 +1459,11 @@ impl MergeKind { self.active_stake() .zip(source.active_stake()) .map(|(stake, source)| { - if invoke_context - .feature_set - .is_active(&stake_merge_with_unmatched_credits_observed::id()) - { - Self::active_delegations_can_merge( - invoke_context, - &stake.delegation, - &source.delegation, - ) - } else { - Self::active_stakes_can_merge(invoke_context, stake, source) - } + Self::active_delegations_can_merge( + invoke_context, + &stake.delegation, + &source.delegation, + ) }) .unwrap_or(Ok(()))?; let merged_state = match (self, source) { @@ -1498,7 +1489,6 @@ impl MergeKind { source_stake.delegation.stake, )?; merge_delegation_stake_and_credits_observed( - invoke_context, &mut stake, source_lamports, source_stake.credits_observed, @@ -1515,7 +1505,6 @@ impl MergeKind { // instead be moved into the destination account as extra, // withdrawable `lamports` merge_delegation_stake_and_credits_observed( - invoke_context, &mut stake, source_stake.delegation.stake, source_stake.credits_observed, @@ -1529,19 +1518,13 @@ impl MergeKind { } fn merge_delegation_stake_and_credits_observed( - invoke_context: &InvokeContext, stake: &mut Stake, absorbed_lamports: u64, absorbed_credits_observed: u64, ) -> Result<(), InstructionError> { - if invoke_context - .feature_set - .is_active(&stake_merge_with_unmatched_credits_observed::id()) - { - stake.credits_observed = - stake_weighted_credits_observed(stake, absorbed_lamports, absorbed_credits_observed) - .ok_or(InstructionError::ArithmeticOverflow)?; - } + stake.credits_observed = + stake_weighted_credits_observed(stake, absorbed_lamports, absorbed_credits_observed) + .ok_or(InstructionError::ArithmeticOverflow)?; stake.delegation.stake = checked_add(stake.delegation.stake, absorbed_lamports)?; Ok(()) } @@ -1603,7 +1586,7 @@ pub fn redeem_rewards( stake_account: &mut AccountSharedData, vote_state: &VoteState, point_value: &PointValue, - stake_history: Option<&StakeHistory>, + stake_history: &StakeHistory, inflation_point_calc_tracer: Option, new_rate_activation_epoch: Option, ) -> Result<(u64, u64), InstructionError> { @@ -1650,7 +1633,7 @@ pub fn redeem_rewards( pub fn calculate_points( stake_state: &StakeStateV2, vote_state: &VoteState, - stake_history: Option<&StakeHistory>, + stake_history: &StakeHistory, new_rate_activation_epoch: Option, ) -> Result { if let StakeStateV2::Stake(_meta, stake, _stake_flags) = stake_state { @@ -1672,7 +1655,7 @@ pub type RewriteStakeStatus = (&'static str, (u64, u64), (u64, u64)); pub fn new_stake_history_entry<'a, I>( epoch: Epoch, stakes: I, - history: Option<&StakeHistory>, + history: &StakeHistory, new_rate_activation_epoch: Option, ) -> StakeHistoryEntry where @@ -1706,7 +1689,7 @@ pub fn create_stake_history_from_delegations( let entry = new_stake_history_entry( epoch, delegations.iter().chain(bootstrap_delegation.iter()), - Some(&stake_history), + &stake_history, new_rate_activation_epoch, ); stake_history.add(epoch, entry); @@ -1865,13 +1848,6 @@ mod tests { ..Clock::default() }; - // Legacy behaviour when the `require_custodian_for_locked_stake_authorize` feature is - // inactive - assert_eq!( - authorized.authorize(&signers, &staker, StakeAuthorize::Withdrawer, None), - Ok(()) - ); - // No lockup, no custodian assert_eq!( authorized.authorize( @@ -2007,33 +1983,25 @@ mod tests { let mut stake_history = StakeHistory::default(); // assert that this stake follows step function if there's no history assert_eq!( - stake.stake_activating_and_deactivating( - stake.activation_epoch, - Some(&stake_history), - None - ), + stake.stake_activating_and_deactivating(stake.activation_epoch, &stake_history, None), StakeActivationStatus::with_effective_and_activating(0, stake.stake), ); for epoch in stake.activation_epoch + 1..stake.deactivation_epoch { assert_eq!( - stake.stake_activating_and_deactivating(epoch, Some(&stake_history), None), + stake.stake_activating_and_deactivating(epoch, &stake_history, None), StakeActivationStatus::with_effective(stake.stake), ); } // assert that this stake is full deactivating assert_eq!( - stake.stake_activating_and_deactivating( - stake.deactivation_epoch, - Some(&stake_history), - None - ), + stake.stake_activating_and_deactivating(stake.deactivation_epoch, &stake_history, None), StakeActivationStatus::with_deactivating(stake.stake), ); // assert that this stake is fully deactivated if there's no history assert_eq!( stake.stake_activating_and_deactivating( stake.deactivation_epoch + 1, - Some(&stake_history), + &stake_history, None ), StakeActivationStatus::default(), @@ -2048,7 +2016,7 @@ mod tests { ); // assert that this stake is broken, because above setup is broken assert_eq!( - stake.stake_activating_and_deactivating(1, Some(&stake_history), None), + stake.stake_activating_and_deactivating(1, &stake_history, None), StakeActivationStatus::with_effective_and_activating(0, stake.stake), ); @@ -2063,7 +2031,7 @@ mod tests { ); // assert that this stake is broken, because above setup is broken assert_eq!( - stake.stake_activating_and_deactivating(2, Some(&stake_history), None), + stake.stake_activating_and_deactivating(2, &stake_history, None), StakeActivationStatus::with_effective_and_activating( increment, stake.stake - increment @@ -2084,7 +2052,7 @@ mod tests { assert_eq!( stake.stake_activating_and_deactivating( stake.deactivation_epoch + 1, - Some(&stake_history), + &stake_history, None, ), StakeActivationStatus::with_deactivating(stake.stake), @@ -2103,7 +2071,7 @@ mod tests { assert_eq!( stake.stake_activating_and_deactivating( stake.deactivation_epoch + 2, - Some(&stake_history), + &stake_history, None, ), // hung, should be lower @@ -2173,7 +2141,7 @@ mod tests { (0..expected_stakes.len()) .map(|epoch| stake.stake_activating_and_deactivating( epoch as u64, - Some(&stake_history), + &stake_history, None, )) .collect::>() @@ -2302,11 +2270,7 @@ mod tests { let calculate_each_staking_status = |stake: &Delegation, epoch_count: usize| -> Vec<_> { (0..epoch_count) .map(|epoch| { - stake.stake_activating_and_deactivating( - epoch as u64, - Some(&stake_history), - None, - ) + stake.stake_activating_and_deactivating(epoch as u64, &stake_history, None) }) .collect::>() }; @@ -2426,7 +2390,7 @@ mod tests { (0, history.deactivating) }; assert_eq!( - stake.stake_activating_and_deactivating(epoch, Some(&stake_history), None), + stake.stake_activating_and_deactivating(epoch, &stake_history, None), StakeActivationStatus { effective: expected_stake, activating: expected_activating, @@ -2457,7 +2421,7 @@ mod tests { for epoch in 0..epochs { let stake = delegations .iter() - .map(|delegation| delegation.stake(epoch, Some(&stake_history), None)) + .map(|delegation| delegation.stake(epoch, &stake_history, None)) .sum::(); max_stake = max_stake.max(stake); min_stake = min_stake.min(stake); @@ -2526,7 +2490,7 @@ mod tests { let mut prev_total_effective_stake = delegations .iter() - .map(|delegation| delegation.stake(0, Some(&stake_history), new_rate_activation_epoch)) + .map(|delegation| delegation.stake(0, &stake_history, new_rate_activation_epoch)) .sum::(); // uncomment and add ! for fun with graphing @@ -2535,7 +2499,7 @@ mod tests { let total_effective_stake = delegations .iter() .map(|delegation| { - delegation.stake(epoch, Some(&stake_history), new_rate_activation_epoch) + delegation.stake(epoch, &stake_history, new_rate_activation_epoch) }) .sum::(); @@ -2586,7 +2550,7 @@ mod tests { points: 1 }, &vote_state, - None, + &StakeHistory::default(), null_tracer(), None, ) @@ -2607,7 +2571,7 @@ mod tests { points: 1 }, &vote_state, - None, + &StakeHistory::default(), null_tracer(), None, ) @@ -2644,7 +2608,7 @@ mod tests { points: 1 }, &vote_state, - None, + &StakeHistory::default(), null_tracer(), None, ) @@ -2660,7 +2624,13 @@ mod tests { // no overflow on points assert_eq!( u128::from(stake.delegation.stake) * epoch_slots, - calculate_stake_points(&stake, &vote_state, None, null_tracer(), None) + calculate_stake_points( + &stake, + &vote_state, + &StakeHistory::default(), + null_tracer(), + None + ) ); } @@ -2682,7 +2652,7 @@ mod tests { points: 1 }, &vote_state, - None, + &StakeHistory::default(), null_tracer(), None, ) @@ -2707,7 +2677,7 @@ mod tests { points: 2 // all his }, &vote_state, - None, + &StakeHistory::default(), null_tracer(), None, ) @@ -2729,7 +2699,7 @@ mod tests { points: 1 }, &vote_state, - None, + &StakeHistory::default(), null_tracer(), None, ) @@ -2754,7 +2724,7 @@ mod tests { points: 2 }, &vote_state, - None, + &StakeHistory::default(), null_tracer(), None, ) @@ -2777,7 +2747,7 @@ mod tests { points: 2 }, &vote_state, - None, + &StakeHistory::default(), null_tracer(), None, ) @@ -2802,7 +2772,7 @@ mod tests { points: 4 }, &vote_state, - None, + &StakeHistory::default(), null_tracer(), None, ) @@ -2821,7 +2791,7 @@ mod tests { points: 4 }, &vote_state, - None, + &StakeHistory::default(), null_tracer(), None, ) @@ -2837,7 +2807,7 @@ mod tests { points: 4 }, &vote_state, - None, + &StakeHistory::default(), null_tracer(), None, ) @@ -2860,7 +2830,7 @@ mod tests { points: 4 }, &vote_state, - None, + &StakeHistory::default(), null_tracer(), None, ) @@ -2883,7 +2853,7 @@ mod tests { points: 4 }, &vote_state, - None, + &StakeHistory::default(), null_tracer(), None, ) @@ -2895,7 +2865,13 @@ mod tests { new_credits_observed: 4, force_credits_update_with_skipped_reward: false, }, - calculate_stake_points_and_credits(&stake, &vote_state, None, null_tracer(), None) + calculate_stake_points_and_credits( + &stake, + &vote_state, + &StakeHistory::default(), + null_tracer(), + None + ) ); // credits_observed is auto-rewinded when vote_state credits are assumed to have been @@ -2908,7 +2884,13 @@ mod tests { new_credits_observed: 4, force_credits_update_with_skipped_reward: true, }, - calculate_stake_points_and_credits(&stake, &vote_state, None, null_tracer(), None) + calculate_stake_points_and_credits( + &stake, + &vote_state, + &StakeHistory::default(), + null_tracer(), + None + ) ); // this is new behavior 2; don't hint when credits both from stake and vote are identical stake.credits_observed = 4; @@ -2918,7 +2900,13 @@ mod tests { new_credits_observed: 4, force_credits_update_with_skipped_reward: false, }, - calculate_stake_points_and_credits(&stake, &vote_state, None, null_tracer(), None) + calculate_stake_points_and_credits( + &stake, + &vote_state, + &StakeHistory::default(), + null_tracer(), + None + ) ); // get rewards and credits observed when not the activation epoch @@ -2939,7 +2927,7 @@ mod tests { points: 1 }, &vote_state, - None, + &StakeHistory::default(), null_tracer(), None, ) @@ -2963,7 +2951,7 @@ mod tests { points: 1 }, &vote_state, - None, + &StakeHistory::default(), null_tracer(), None, ) @@ -3051,20 +3039,12 @@ mod tests { }; let identical = good_stake; - assert!( - MergeKind::active_stakes_can_merge(&invoke_context, &good_stake, &identical).is_ok() - ); - - let bad_credits_observed = Stake { - credits_observed: good_stake.credits_observed + 1, - ..good_stake - }; - assert!(MergeKind::active_stakes_can_merge( + assert!(MergeKind::active_delegations_can_merge( &invoke_context, - &good_stake, - &bad_credits_observed + &good_stake.delegation, + &identical.delegation ) - .is_err()); + .is_ok()); let good_delegation = good_stake.delegation; let different_stake_ok = Delegation { diff --git a/programs/system/src/system_instruction.rs b/programs/system/src/system_instruction.rs index 95860379fb17a0..0c9daf22d4b024 100644 --- a/programs/system/src/system_instruction.rs +++ b/programs/system/src/system_instruction.rs @@ -56,7 +56,10 @@ pub fn advance_nonce_account( next_durable_nonce, invoke_context.lamports_per_signature, ); - account.set_state(&Versions::new(State::Initialized(new_data))) + account.set_state( + &Versions::new(State::Initialized(new_data)), + &invoke_context.feature_set, + ) } State::Uninitialized => { ic_msg!( @@ -114,7 +117,10 @@ pub fn withdraw_nonce_account( ); return Err(SystemError::NonceBlockhashNotExpired.into()); } - from.set_state(&Versions::new(State::Uninitialized))?; + from.set_state( + &Versions::new(State::Uninitialized), + &invoke_context.feature_set, + )?; } else { let min_balance = rent.minimum_balance(from.get_data().len()); let amount = checked_add(lamports, min_balance)?; @@ -141,11 +147,11 @@ pub fn withdraw_nonce_account( return Err(InstructionError::MissingRequiredSignature); } - from.checked_sub_lamports(lamports)?; + from.checked_sub_lamports(lamports, &invoke_context.feature_set)?; drop(from); let mut to = instruction_context .try_borrow_instruction_account(transaction_context, to_account_index)?; - to.checked_add_lamports(lamports)?; + to.checked_add_lamports(lamports, &invoke_context.feature_set)?; Ok(()) } @@ -184,7 +190,7 @@ pub fn initialize_nonce_account( invoke_context.lamports_per_signature, ); let state = State::Initialized(data); - account.set_state(&Versions::new(state)) + account.set_state(&Versions::new(state), &invoke_context.feature_set) } State::Initialized(_) => { ic_msg!( @@ -215,7 +221,7 @@ pub fn authorize_nonce_account( .get_state::()? .authorize(signers, *nonce_authority) { - Ok(versions) => account.set_state(&versions), + Ok(versions) => account.set_state(&versions, &invoke_context.feature_set), Err(AuthorizeNonceError::Uninitialized) => { ic_msg!( invoke_context, @@ -996,7 +1002,9 @@ mod test { let mut nonce_account = instruction_context .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) .unwrap(); - nonce_account.checked_sub_lamports(42 * 2).unwrap(); + nonce_account + .checked_sub_lamports(42 * 2, &invoke_context.feature_set) + .unwrap(); set_invoke_context_blockhash!(invoke_context, 63); let authorized = *nonce_account.get_key(); let result = diff --git a/programs/system/src/system_processor.rs b/programs/system/src/system_processor.rs index b224997dc625a7..2a66b388103f9a 100644 --- a/programs/system/src/system_processor.rs +++ b/programs/system/src/system_processor.rs @@ -9,7 +9,6 @@ use { sysvar_cache::get_sysvar_with_account_check, }, solana_sdk::{ - feature_set, instruction::InstructionError, nonce, program_utils::limited_deserialize, @@ -105,7 +104,7 @@ fn allocate( return Err(SystemError::InvalidAccountDataLength.into()); } - account.set_data_length(space as usize)?; + account.set_data_length(space as usize, &invoke_context.feature_set)?; Ok(()) } @@ -127,7 +126,7 @@ fn assign( return Err(InstructionError::MissingRequiredSignature); } - account.set_owner(&owner.to_bytes()) + account.set_owner(&owner.to_bytes(), &invoke_context.feature_set) } fn allocate_and_assign( @@ -204,11 +203,11 @@ fn transfer_verified( return Err(SystemError::ResultWithNegativeLamports.into()); } - from.checked_sub_lamports(lamports)?; + from.checked_sub_lamports(lamports, &invoke_context.feature_set)?; drop(from); let mut to = instruction_context .try_borrow_instruction_account(transaction_context, to_account_index)?; - to.checked_add_lamports(lamports)?; + to.checked_add_lamports(lamports, &invoke_context.feature_set)?; Ok(()) } @@ -220,14 +219,6 @@ fn transfer( transaction_context: &TransactionContext, instruction_context: &InstructionContext, ) -> Result<(), InstructionError> { - if !invoke_context - .feature_set - .is_active(&feature_set::system_transfer_zero_check::id()) - && lamports == 0 - { - return Ok(()); - } - if !instruction_context.is_instruction_account_signer(from_account_index)? { ic_msg!( invoke_context, @@ -261,14 +252,6 @@ fn transfer_with_seed( transaction_context: &TransactionContext, instruction_context: &InstructionContext, ) -> Result<(), InstructionError> { - if !invoke_context - .feature_set - .is_active(&feature_set::system_transfer_zero_check::id()) - && lamports == 0 - { - return Ok(()); - } - if !instruction_context.is_instruction_account_signer(from_base_account_index)? { ic_msg!( invoke_context, @@ -498,7 +481,9 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| let nonce_versions: nonce::state::Versions = nonce_account.get_state()?; match nonce_versions.upgrade() { None => Err(InstructionError::InvalidArgument), - Some(nonce_versions) => nonce_account.set_state(&nonce_versions), + Some(nonce_versions) => { + nonce_account.set_state(&nonce_versions, &invoke_context.feature_set) + } } } SystemInstruction::Allocate { space } => { diff --git a/programs/vote/src/vote_processor.rs b/programs/vote/src/vote_processor.rs index d09309ddc81fb5..443aeb391b8c13 100644 --- a/programs/vote/src/vote_processor.rs +++ b/programs/vote/src/vote_processor.rs @@ -1,7 +1,7 @@ //! Vote program processor use { - crate::{vote_error::VoteError, vote_state}, + crate::vote_state, log::*, solana_program::vote::{instruction::VoteInstruction, program::id, state::VoteAuthorize}, solana_program_runtime::{ @@ -9,7 +9,6 @@ use { sysvar_cache::get_sysvar_with_account_check, }, solana_sdk::{ - feature_set, instruction::InstructionError, program_utils::limited_deserialize, pubkey::Pubkey, @@ -140,20 +139,14 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| ) } VoteInstruction::UpdateCommission(commission) => { - if invoke_context.feature_set.is_active( - &feature_set::commission_updates_only_allowed_in_first_half_of_epoch::id(), - ) { - let sysvar_cache = invoke_context.get_sysvar_cache(); - let epoch_schedule = sysvar_cache.get_epoch_schedule()?; - let clock = sysvar_cache.get_clock()?; - if !vote_state::is_commission_update_allowed(clock.slot, &epoch_schedule) { - return Err(VoteError::CommissionUpdateTooLate.into()); - } - } + let sysvar_cache = invoke_context.get_sysvar_cache(); + vote_state::update_commission( &mut me, commission, &signers, + sysvar_cache.get_epoch_schedule()?.as_ref(), + sysvar_cache.get_clock()?.as_ref(), &invoke_context.feature_set, ) } @@ -219,30 +212,23 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| ) } VoteInstruction::AuthorizeChecked(vote_authorize) => { - if invoke_context - .feature_set - .is_active(&feature_set::vote_stake_checked_instructions::id()) - { - instruction_context.check_number_of_instruction_accounts(4)?; - let voter_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(3)?, - )?; - if !instruction_context.is_instruction_account_signer(3)? { - return Err(InstructionError::MissingRequiredSignature); - } - let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; - vote_state::authorize( - &mut me, - voter_pubkey, - vote_authorize, - &signers, - &clock, - &invoke_context.feature_set, - ) - } else { - Err(InstructionError::InvalidInstructionData) + instruction_context.check_number_of_instruction_accounts(4)?; + let voter_pubkey = transaction_context.get_key_of_account_at_index( + instruction_context.get_index_of_instruction_account_in_transaction(3)?, + )?; + if !instruction_context.is_instruction_account_signer(3)? { + return Err(InstructionError::MissingRequiredSignature); } + let clock = + get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; + vote_state::authorize( + &mut me, + voter_pubkey, + vote_authorize, + &signers, + &clock, + &invoke_context.feature_set, + ) } } }); diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index cdc6780d2bc000..4e77b4adc9d281 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -156,22 +156,24 @@ fn set_vote_account_state( && (!vote_account .is_rent_exempt_at_data_length(VoteStateVersions::vote_state_size_of(true)) || vote_account - .set_data_length(VoteStateVersions::vote_state_size_of(true)) + .set_data_length(VoteStateVersions::vote_state_size_of(true), feature_set) .is_err()) { // Account cannot be resized to the size of a vote state as it will not be rent exempt, or failed to be // resized for other reasons. So store the V1_14_11 version. - return vote_account.set_state(&VoteStateVersions::V1_14_11(Box::new( - VoteState1_14_11::from(vote_state), - ))); + return vote_account.set_state( + &VoteStateVersions::V1_14_11(Box::new(VoteState1_14_11::from(vote_state))), + feature_set, + ); } // Vote account is large enough to store the newest version of vote state - vote_account.set_state(&VoteStateVersions::new_current(vote_state)) + vote_account.set_state(&VoteStateVersions::new_current(vote_state), feature_set) // Else when the vote_state_add_vote_latency feature is not enabled, then the V1_14_11 version is stored } else { - vote_account.set_state(&VoteStateVersions::V1_14_11(Box::new( - VoteState1_14_11::from(vote_state), - ))) + vote_account.set_state( + &VoteStateVersions::V1_14_11(Box::new(VoteState1_14_11::from(vote_state))), + feature_set, + ) } } @@ -877,11 +879,41 @@ pub fn update_commission( vote_account: &mut BorrowedAccount, commission: u8, signers: &HashSet, + epoch_schedule: &EpochSchedule, + clock: &Clock, feature_set: &FeatureSet, ) -> Result<(), InstructionError> { - let mut vote_state: VoteState = vote_account - .get_state::()? - .convert_to_current(); + // Decode vote state only once, and only if needed + let mut vote_state = None; + + let enforce_commission_update_rule = + if feature_set.is_active(&feature_set::allow_commission_decrease_at_any_time::id()) { + if let Ok(decoded_vote_state) = vote_account.get_state::() { + vote_state = Some(decoded_vote_state.convert_to_current()); + is_commission_increase(vote_state.as_ref().unwrap(), commission) + } else { + true + } + } else { + true + }; + + #[allow(clippy::collapsible_if)] + if enforce_commission_update_rule + && feature_set + .is_active(&feature_set::commission_updates_only_allowed_in_first_half_of_epoch::id()) + { + if !is_commission_update_allowed(clock.slot, epoch_schedule) { + return Err(VoteError::CommissionUpdateTooLate.into()); + } + } + + let mut vote_state = match vote_state { + Some(vote_state) => vote_state, + None => vote_account + .get_state::()? + .convert_to_current(), + }; // current authorized withdrawer must say "yay" verify_authorized_signer(&vote_state.authorized_withdrawer, signers)?; @@ -891,6 +923,11 @@ pub fn update_commission( set_vote_account_state(vote_account, vote_state, feature_set) } +/// Given a proposed new commission, returns true if this would be a commission increase, false otherwise +pub fn is_commission_increase(vote_state: &VoteState, commission: u8) -> bool { + commission > vote_state.commission +} + /// Given the current slot and epoch schedule, determine if a commission change /// is allowed pub fn is_commission_update_allowed(slot: Slot, epoch_schedule: &EpochSchedule) -> bool { @@ -971,11 +1008,11 @@ pub fn withdraw( } } - vote_account.checked_sub_lamports(lamports)?; + vote_account.checked_sub_lamports(lamports, feature_set)?; drop(vote_account); let mut to_account = instruction_context .try_borrow_instruction_account(transaction_context, to_account_index)?; - to_account.checked_add_lamports(lamports)?; + to_account.checked_add_lamports(lamports, feature_set)?; Ok(()) } @@ -1237,7 +1274,7 @@ mod tests { let processor_account = AccountSharedData::new(0, 0, &solana_sdk::native_loader::id()); let transaction_context = TransactionContext::new( vec![(id(), processor_account), (node_pubkey, vote_account)], - rent, + rent.clone(), 0, 0, ); @@ -1310,7 +1347,7 @@ mod tests { // Test that when the feature is enabled, if the vote account does have sufficient lamports, the // new vote state is written out assert_eq!( - borrowed_account.set_lamports(rent.minimum_balance(VoteState::size_of())), + borrowed_account.set_lamports(rent.minimum_balance(VoteState::size_of()), &feature_set), Ok(()) ); assert_eq!( @@ -1365,6 +1402,192 @@ mod tests { assert_eq!(vote_state.votes.len(), 2); } + #[test] + fn test_update_commission() { + let node_pubkey = Pubkey::new_unique(); + let withdrawer_pubkey = Pubkey::new_unique(); + let clock = Clock::default(); + let vote_state = VoteState::new( + &VoteInit { + node_pubkey, + authorized_voter: withdrawer_pubkey, + authorized_withdrawer: withdrawer_pubkey, + commission: 10, + }, + &clock, + ); + + let serialized = + bincode::serialize(&VoteStateVersions::Current(Box::new(vote_state.clone()))).unwrap(); + let serialized_len = serialized.len(); + let rent = Rent::default(); + let lamports = rent.minimum_balance(serialized_len); + let mut vote_account = AccountSharedData::new(lamports, serialized_len, &id()); + vote_account.set_data_from_slice(&serialized); + + // Create a fake TransactionContext with a fake InstructionContext with a single account which is the + // vote account that was just created + let processor_account = AccountSharedData::new(0, 0, &solana_sdk::native_loader::id()); + let transaction_context = TransactionContext::new( + vec![(id(), processor_account), (node_pubkey, vote_account)], + rent, + 0, + 0, + ); + let mut instruction_context = InstructionContext::default(); + instruction_context.configure( + &[0], + &[InstructionAccount { + index_in_transaction: 1, + index_in_caller: 1, + index_in_callee: 0, + is_signer: false, + is_writable: true, + }], + &[], + ); + + // Get the BorrowedAccount from the InstructionContext which is what is used to manipulate and inspect account + // state + let mut borrowed_account = instruction_context + .try_borrow_instruction_account(&transaction_context, 0) + .unwrap(); + + let epoch_schedule = std::sync::Arc::new(EpochSchedule::without_warmup()); + + let first_half_clock = std::sync::Arc::new(Clock { + slot: epoch_schedule.slots_per_epoch / 4, + ..Clock::default() + }); + + let second_half_clock = std::sync::Arc::new(Clock { + slot: (epoch_schedule.slots_per_epoch * 3) / 4, + ..Clock::default() + }); + + let mut feature_set = FeatureSet::default(); + feature_set.activate( + &feature_set::commission_updates_only_allowed_in_first_half_of_epoch::id(), + 1, + ); + + let signers: HashSet = vec![withdrawer_pubkey].into_iter().collect(); + + // Increase commission in first half of epoch -- allowed + assert_eq!( + borrowed_account + .get_state::() + .unwrap() + .convert_to_current() + .commission, + 10 + ); + assert_matches!( + update_commission( + &mut borrowed_account, + 11, + &signers, + &epoch_schedule, + &first_half_clock, + &feature_set + ), + Ok(()) + ); + assert_eq!( + borrowed_account + .get_state::() + .unwrap() + .convert_to_current() + .commission, + 11 + ); + + // Increase commission in second half of epoch -- disallowed + assert_matches!( + update_commission( + &mut borrowed_account, + 12, + &signers, + &epoch_schedule, + &second_half_clock, + &feature_set + ), + Err(_) + ); + assert_eq!( + borrowed_account + .get_state::() + .unwrap() + .convert_to_current() + .commission, + 11 + ); + + // Decrease commission in first half of epoch -- allowed + assert_matches!( + update_commission( + &mut borrowed_account, + 10, + &signers, + &epoch_schedule, + &first_half_clock, + &feature_set + ), + Ok(()) + ); + assert_eq!( + borrowed_account + .get_state::() + .unwrap() + .convert_to_current() + .commission, + 10 + ); + + // Decrease commission in second half of epoch -- disallowed because feature_set does not allow it + assert_matches!( + update_commission( + &mut borrowed_account, + 9, + &signers, + &epoch_schedule, + &second_half_clock, + &feature_set + ), + Err(_) + ); + assert_eq!( + borrowed_account + .get_state::() + .unwrap() + .convert_to_current() + .commission, + 10 + ); + + // Decrease commission in second half of epoch -- allowed because feature_set allows it + feature_set.activate(&feature_set::allow_commission_decrease_at_any_time::id(), 1); + assert_matches!( + update_commission( + &mut borrowed_account, + 9, + &signers, + &epoch_schedule, + &second_half_clock, + &feature_set + ), + Ok(()) + ); + assert_eq!( + borrowed_account + .get_state::() + .unwrap() + .convert_to_current() + .commission, + 9 + ); + } + #[test] fn test_vote_double_lockout_after_expiration() { let voter_pubkey = solana_sdk::pubkey::new_rand(); diff --git a/programs/zk-token-proof/src/lib.rs b/programs/zk-token-proof/src/lib.rs index 0aa75c4ef5cff5..cedf42712377dc 100644 --- a/programs/zk-token-proof/src/lib.rs +++ b/programs/zk-token-proof/src/lib.rs @@ -80,7 +80,8 @@ where return Err(InstructionError::InvalidAccountData); } - proof_context_account.set_data_from_slice(&context_state_data)?; + proof_context_account + .set_data_from_slice(&context_state_data, &invoke_context.feature_set)?; } Ok(()) @@ -122,10 +123,13 @@ fn process_close_proof_context(invoke_context: &mut InvokeContext) -> Result<(), let mut destination_account = instruction_context.try_borrow_instruction_account(transaction_context, 1)?; - destination_account.checked_add_lamports(proof_context_account.get_lamports())?; - proof_context_account.set_lamports(0)?; - proof_context_account.set_data_length(0)?; - proof_context_account.set_owner(system_program::id().as_ref())?; + destination_account.checked_add_lamports( + proof_context_account.get_lamports(), + &invoke_context.feature_set, + )?; + proof_context_account.set_lamports(0, &invoke_context.feature_set)?; + proof_context_account.set_data_length(0, &invoke_context.feature_set)?; + proof_context_account.set_owner(system_program::id().as_ref(), &invoke_context.feature_set)?; Ok(()) } @@ -135,6 +139,11 @@ declare_process_instruction!(Entrypoint, 0, |invoke_context| { let native_programs_consume_cu = invoke_context .feature_set .is_active(&feature_set::native_programs_consume_cu::id()); + + let enable_zk_transfer_with_fee = invoke_context + .feature_set + .is_active(&feature_set::enable_zk_transfer_with_fee::id()); + let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; let instruction_data = instruction_context.get_instruction_data(); @@ -198,6 +207,11 @@ declare_process_instruction!(Entrypoint, 0, |invoke_context| { process_verify_proof::(invoke_context) } ProofInstruction::VerifyTransferWithFee => { + // transfer with fee related proofs are not enabled + if !enable_zk_transfer_with_fee { + return Err(InstructionError::InvalidInstructionData); + } + if native_programs_consume_cu { invoke_context .consume_checked(VERIFY_TRANSFER_WITH_FEE_COMPUTE_UNITS) @@ -291,6 +305,11 @@ declare_process_instruction!(Entrypoint, 0, |invoke_context| { >(invoke_context) } ProofInstruction::VerifyFeeSigma => { + // transfer with fee related proofs are not enabled + if !enable_zk_transfer_with_fee { + return Err(InstructionError::InvalidInstructionData); + } + invoke_context .consume_checked(VERIFY_FEE_SIGMA_COMPUTE_UNITS) .map_err(|_| InstructionError::ComputationalBudgetExceeded)?; diff --git a/pubsub-client/src/nonblocking/pubsub_client.rs b/pubsub-client/src/nonblocking/pubsub_client.rs index fe0540ebed4227..408df60454e4e1 100644 --- a/pubsub-client/src/nonblocking/pubsub_client.rs +++ b/pubsub-client/src/nonblocking/pubsub_client.rs @@ -3,7 +3,7 @@ //! The [`PubsubClient`] implements [Solana WebSocket event //! subscriptions][spec]. //! -//! [spec]: https://docs.solana.com/developing/clients/jsonrpc-api#subscription-websocket +//! [spec]: https://solana.com/docs/rpc/websocket //! //! This is a nonblocking (async) API. For a blocking API use the synchronous //! client in [`crate::pubsub_client`]. @@ -366,7 +366,7 @@ impl PubsubClient { /// /// This method corresponds directly to the [`accountSubscribe`] RPC method. /// - /// [`accountSubscribe`]: https://docs.solana.com/developing/clients/jsonrpc-api#accountsubscribe + /// [`accountSubscribe`]: https://solana.com/docs/rpc/websocket#accountsubscribe pub async fn account_subscribe( &self, pubkey: &Pubkey, @@ -387,7 +387,7 @@ impl PubsubClient { /// /// This method corresponds directly to the [`blockSubscribe`] RPC method. /// - /// [`blockSubscribe`]: https://docs.solana.com/developing/clients/jsonrpc-api#blocksubscribe---unstable-disabled-by-default + /// [`blockSubscribe`]: https://solana.com/docs/rpc/websocket#blocksubscribe pub async fn block_subscribe( &self, filter: RpcBlockSubscribeFilter, @@ -404,7 +404,7 @@ impl PubsubClient { /// /// This method corresponds directly to the [`logsSubscribe`] RPC method. /// - /// [`logsSubscribe`]: https://docs.solana.com/developing/clients/jsonrpc-api#logssubscribe + /// [`logsSubscribe`]: https://solana.com/docs/rpc/websocket#logssubscribe pub async fn logs_subscribe( &self, filter: RpcTransactionLogsFilter, @@ -422,7 +422,7 @@ impl PubsubClient { /// /// This method corresponds directly to the [`programSubscribe`] RPC method. /// - /// [`programSubscribe`]: https://docs.solana.com/developing/clients/jsonrpc-api#programsubscribe + /// [`programSubscribe`]: https://solana.com/docs/rpc/websocket#programsubscribe pub async fn program_subscribe( &self, pubkey: &Pubkey, @@ -458,7 +458,7 @@ impl PubsubClient { /// /// This method corresponds directly to the [`voteSubscribe`] RPC method. /// - /// [`voteSubscribe`]: https://docs.solana.com/developing/clients/jsonrpc-api#votesubscribe---unstable-disabled-by-default + /// [`voteSubscribe`]: https://solana.com/docs/rpc/websocket#votesubscribe pub async fn vote_subscribe(&self) -> SubscribeResult<'_, RpcVote> { self.subscribe("vote", json!([])).await } @@ -468,13 +468,13 @@ impl PubsubClient { /// Receives messages of type [`Slot`] when a new [root] is set by the /// validator. /// - /// [root]: https://docs.solana.com/terminology#root + /// [root]: https://solana.com/docs/terminology#root /// /// # RPC Reference /// /// This method corresponds directly to the [`rootSubscribe`] RPC method. /// - /// [`rootSubscribe`]: https://docs.solana.com/developing/clients/jsonrpc-api#rootsubscribe + /// [`rootSubscribe`]: https://solana.com/docs/rpc/websocket#rootsubscribe pub async fn root_subscribe(&self) -> SubscribeResult<'_, Slot> { self.subscribe("root", json!([])).await } @@ -491,7 +491,7 @@ impl PubsubClient { /// /// This method corresponds directly to the [`signatureSubscribe`] RPC method. /// - /// [`signatureSubscribe`]: https://docs.solana.com/developing/clients/jsonrpc-api#signaturesubscribe + /// [`signatureSubscribe`]: https://solana.com/docs/rpc/websocket#signaturesubscribe pub async fn signature_subscribe( &self, signature: &Signature, @@ -509,7 +509,7 @@ impl PubsubClient { /// /// This method corresponds directly to the [`slotSubscribe`] RPC method. /// - /// [`slotSubscribe`]: https://docs.solana.com/developing/clients/jsonrpc-api#slotsubscribe + /// [`slotSubscribe`]: https://solana.com/docs/rpc/websocket#slotsubscribe pub async fn slot_subscribe(&self) -> SubscribeResult<'_, SlotInfo> { self.subscribe("slot", json!([])).await } @@ -519,7 +519,7 @@ impl PubsubClient { /// Receives messages of type [`SlotUpdate`] when various updates to a slot occur. /// /// Note that this method operates differently than other subscriptions: - /// instead of sending the message to a reciever on a channel, it accepts a + /// instead of sending the message to a receiver on a channel, it accepts a /// `handler` callback that processes the message directly. This processing /// occurs on another thread. /// @@ -527,7 +527,7 @@ impl PubsubClient { /// /// This method corresponds directly to the [`slotUpdatesSubscribe`] RPC method. /// - /// [`slotUpdatesSubscribe`]: https://docs.solana.com/developing/clients/jsonrpc-api#slotsupdatessubscribe---unstable + /// [`slotUpdatesSubscribe`]: https://solana.com/docs/rpc/websocket#slotsupdatessubscribe pub async fn slot_updates_subscribe(&self) -> SubscribeResult<'_, SlotUpdate> { self.subscribe("slotsUpdates", json!([])).await } diff --git a/pubsub-client/src/pubsub_client.rs b/pubsub-client/src/pubsub_client.rs index 612df285d8e5e3..e1a2dd34546528 100644 --- a/pubsub-client/src/pubsub_client.rs +++ b/pubsub-client/src/pubsub_client.rs @@ -3,7 +3,7 @@ //! The [`PubsubClient`] implements [Solana WebSocket event //! subscriptions][spec]. //! -//! [spec]: https://docs.solana.com/developing/clients/jsonrpc-api#subscription-websocket +//! [spec]: https://solana.com/docs/rpc/websocket //! //! This is a blocking API. For a non-blocking API use the asynchronous client //! in [`crate::nonblocking::pubsub_client`]. @@ -369,7 +369,7 @@ impl PubsubClient { /// /// This method corresponds directly to the [`accountSubscribe`] RPC method. /// - /// [`accountSubscribe`]: https://docs.solana.com/developing/clients/jsonrpc-api#accountsubscribe + /// [`accountSubscribe`]: https://solana.com/docs/rpc/websocket/accountsubscribe pub fn account_subscribe( url: &str, pubkey: &Pubkey, @@ -422,7 +422,7 @@ impl PubsubClient { /// /// This method corresponds directly to the [`blockSubscribe`] RPC method. /// - /// [`blockSubscribe`]: https://docs.solana.com/developing/clients/jsonrpc-api#blocksubscribe---unstable-disabled-by-default + /// [`blockSubscribe`]: https://solana.com/docs/rpc/websocket/blocksubscribe pub fn block_subscribe( url: &str, filter: RpcBlockSubscribeFilter, @@ -470,7 +470,7 @@ impl PubsubClient { /// /// This method corresponds directly to the [`logsSubscribe`] RPC method. /// - /// [`logsSubscribe`]: https://docs.solana.com/developing/clients/jsonrpc-api#logssubscribe + /// [`logsSubscribe`]: https://solana.com/docs/rpc/websocket/logssubscribe pub fn logs_subscribe( url: &str, filter: RpcTransactionLogsFilter, @@ -519,7 +519,7 @@ impl PubsubClient { /// /// This method corresponds directly to the [`programSubscribe`] RPC method. /// - /// [`programSubscribe`]: https://docs.solana.com/developing/clients/jsonrpc-api#programsubscribe + /// [`programSubscribe`]: https://solana.com/docs/rpc/websocket/programsubscribe pub fn program_subscribe( url: &str, pubkey: &Pubkey, @@ -584,7 +584,7 @@ impl PubsubClient { /// /// This method corresponds directly to the [`voteSubscribe`] RPC method. /// - /// [`voteSubscribe`]: https://docs.solana.com/developing/clients/jsonrpc-api#votesubscribe---unstable-disabled-by-default + /// [`voteSubscribe`]: https://solana.com/docs/rpc/websocket/votesubscribe pub fn vote_subscribe(url: &str) -> Result { let url = Url::parse(url)?; let socket = connect_with_retry(url)?; @@ -623,13 +623,13 @@ impl PubsubClient { /// Receives messages of type [`Slot`] when a new [root] is set by the /// validator. /// - /// [root]: https://docs.solana.com/terminology#root + /// [root]: https://solana.com/docs/terminology#root /// /// # RPC Reference /// /// This method corresponds directly to the [`rootSubscribe`] RPC method. /// - /// [`rootSubscribe`]: https://docs.solana.com/developing/clients/jsonrpc-api#rootsubscribe + /// [`rootSubscribe`]: https://solana.com/docs/rpc/websocket/rootsubscribe pub fn root_subscribe(url: &str) -> Result { let url = Url::parse(url)?; let socket = connect_with_retry(url)?; @@ -675,7 +675,7 @@ impl PubsubClient { /// /// This method corresponds directly to the [`signatureSubscribe`] RPC method. /// - /// [`signatureSubscribe`]: https://docs.solana.com/developing/clients/jsonrpc-api#signaturesubscribe + /// [`signatureSubscribe`]: https://solana.com/docs/rpc/websocket/signaturesubscribe pub fn signature_subscribe( url: &str, signature: &Signature, @@ -726,7 +726,7 @@ impl PubsubClient { /// /// This method corresponds directly to the [`slotSubscribe`] RPC method. /// - /// [`slotSubscribe`]: https://docs.solana.com/developing/clients/jsonrpc-api#slotsubscribe + /// [`slotSubscribe`]: https://solana.com/docs/rpc/websocket/slotsubscribe pub fn slot_subscribe(url: &str) -> Result { let url = Url::parse(url)?; let socket = connect_with_retry(url)?; @@ -766,7 +766,7 @@ impl PubsubClient { /// Receives messages of type [`SlotUpdate`] when various updates to a slot occur. /// /// Note that this method operates differently than other subscriptions: - /// instead of sending the message to a reciever on a channel, it accepts a + /// instead of sending the message to a receiver on a channel, it accepts a /// `handler` callback that processes the message directly. This processing /// occurs on another thread. /// @@ -774,7 +774,7 @@ impl PubsubClient { /// /// This method corresponds directly to the [`slotUpdatesSubscribe`] RPC method. /// - /// [`slotUpdatesSubscribe`]: https://docs.solana.com/developing/clients/jsonrpc-api#slotsupdatessubscribe---unstable + /// [`slotUpdatesSubscribe`]: https://solana.com/docs/rpc/websocket/slotsupdatessubscribe pub fn slot_updates_subscribe( url: &str, handler: impl Fn(SlotUpdate) + Send + 'static, diff --git a/quic-client/src/lib.rs b/quic-client/src/lib.rs index 90a55deaa691ed..6339c5080d9b17 100644 --- a/quic-client/src/lib.rs +++ b/quic-client/src/lib.rs @@ -84,39 +84,52 @@ impl ConnectionPool for QuicPool { } } -#[derive(Clone)] pub struct QuicConfig { - client_certificate: Arc, + // Arc to prevent having to copy the struct + client_certificate: RwLock>, maybe_staked_nodes: Option>>, maybe_client_pubkey: Option, // The optional specified endpoint for the quic based client connections // If not specified, the connection cache will create as needed. client_endpoint: Option, + addr: IpAddr, +} + +impl Clone for QuicConfig { + fn clone(&self) -> Self { + let cert_guard = self.client_certificate.read().unwrap(); + QuicConfig { + client_certificate: RwLock::new(cert_guard.clone()), + maybe_staked_nodes: self.maybe_staked_nodes.clone(), + maybe_client_pubkey: self.maybe_client_pubkey, + client_endpoint: self.client_endpoint.clone(), + addr: self.addr, + } + } } impl NewConnectionConfig for QuicConfig { fn new() -> Result { - let (cert, priv_key) = - new_self_signed_tls_certificate(&Keypair::new(), IpAddr::V4(Ipv4Addr::UNSPECIFIED))?; + let addr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); + let (cert, priv_key) = new_self_signed_tls_certificate(&Keypair::new(), addr)?; Ok(Self { - client_certificate: Arc::new(QuicClientCertificate { + client_certificate: RwLock::new(Arc::new(QuicClientCertificate { certificate: cert, key: priv_key, - }), + })), maybe_staked_nodes: None, maybe_client_pubkey: None, client_endpoint: None, + addr, }) } } impl QuicConfig { fn create_endpoint(&self) -> QuicLazyInitializedEndpoint { - QuicLazyInitializedEndpoint::new( - self.client_certificate.clone(), - self.client_endpoint.as_ref().cloned(), - ) + let cert_guard = self.client_certificate.read().unwrap(); + QuicLazyInitializedEndpoint::new(cert_guard.clone(), self.client_endpoint.as_ref().cloned()) } fn compute_max_parallel_streams(&self) -> usize { @@ -143,7 +156,23 @@ impl QuicConfig { ipaddr: IpAddr, ) -> Result<(), RcgenError> { let (cert, priv_key) = new_self_signed_tls_certificate(keypair, ipaddr)?; - self.client_certificate = Arc::new(QuicClientCertificate { + self.addr = ipaddr; + + let mut cert_guard = self.client_certificate.write().unwrap(); + + *cert_guard = Arc::new(QuicClientCertificate { + certificate: cert, + key: priv_key, + }); + Ok(()) + } + + pub fn update_keypair(&self, keypair: &Keypair) -> Result<(), RcgenError> { + let (cert, priv_key) = new_self_signed_tls_certificate(keypair, self.addr)?; + + let mut cert_guard = self.client_certificate.write().unwrap(); + + *cert_guard = Arc::new(QuicClientCertificate { certificate: cert, key: priv_key, }); @@ -212,6 +241,11 @@ impl ConnectionManager for QuicConnectionManager { fn new_connection_config(&self) -> QuicConfig { self.connection_config.clone() } + + fn update_key(&self, key: &Keypair) -> Result<(), Box> { + self.connection_config.update_keypair(key)?; + Ok(()) + } } impl QuicConnectionManager { diff --git a/quic-client/src/quic_client.rs b/quic-client/src/quic_client.rs index c2a8e862b69138..f057980c79fe06 100644 --- a/quic-client/src/quic_client.rs +++ b/quic-client/src/quic_client.rs @@ -26,7 +26,7 @@ const SEND_DATA_TIMEOUT: Duration = Duration::from_secs(10); /// A semaphore used for limiting the number of asynchronous tasks spawn to the /// runtime. Before spawnning a task, use acquire. After the task is done (be it -/// succsess or failure), call release. +/// success or failure), call release. struct AsyncTaskSemaphore { /// Keep the counter info about the usage counter: Mutex, diff --git a/quic-client/tests/quic_client.rs b/quic-client/tests/quic_client.rs index 7608e2b7b265c7..9f18acd5c75772 100644 --- a/quic-client/tests/quic_client.rs +++ b/quic-client/tests/quic_client.rs @@ -10,8 +10,8 @@ mod tests { }, solana_sdk::{net::DEFAULT_TPU_COALESCE, packet::PACKET_DATA_SIZE, signature::Keypair}, solana_streamer::{ - nonblocking::quic::DEFAULT_WAIT_FOR_CHUNK_TIMEOUT, streamer::StakedNodes, - tls_certificates::new_self_signed_tls_certificate, + nonblocking::quic::DEFAULT_WAIT_FOR_CHUNK_TIMEOUT, quic::SpawnServerResult, + streamer::StakedNodes, tls_certificates::new_self_signed_tls_certificate, }, std::{ net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, @@ -46,7 +46,7 @@ mod tests { assert_eq!(p.meta().size, num_bytes); } } - assert_eq!(total_packets, num_expected_packets); + assert!(total_packets > 0); } fn server_args() -> (UdpSocket, Arc, Keypair, IpAddr) { @@ -68,7 +68,11 @@ mod tests { let (sender, receiver) = unbounded(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); let (s, exit, keypair, ip) = server_args(); - let (_, t) = solana_streamer::quic::spawn_server( + let SpawnServerResult { + endpoint: _, + thread: t, + key_updater: _, + } = solana_streamer::quic::spawn_server( "quic_streamer_test", s.try_clone().unwrap(), &keypair, @@ -135,7 +139,7 @@ mod tests { assert_eq!(p.meta().size, num_bytes); } } - assert_eq!(total_packets, num_expected_packets); + assert!(total_packets > 0); } #[tokio::test] @@ -178,7 +182,9 @@ mod tests { let num_bytes = PACKET_DATA_SIZE; let num_expected_packets: usize = 3000; let packets = vec![vec![0u8; PACKET_DATA_SIZE]; num_expected_packets]; - assert!(client.send_data_batch(&packets).await.is_ok()); + for packet in packets { + let _ = client.send_data(&packet).await; + } nonblocking_check_packets(receiver, num_bytes, num_expected_packets).await; exit.store(true, Ordering::Relaxed); @@ -189,7 +195,7 @@ mod tests { fn test_quic_bi_direction() { /// This tests bi-directional quic communication. There are the following components /// The request receiver -- responsible for receiving requests - /// The request sender -- responsible sending requests to the request reciever using quic + /// The request sender -- responsible sending requests to the request receiver using quic /// The response receiver -- responsible for receiving the responses to the requests /// The response sender -- responsible for sending responses to the response receiver. /// In this we demonstrate that the request sender and the response receiver use the @@ -204,7 +210,11 @@ mod tests { let (sender, receiver) = unbounded(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); let (request_recv_socket, request_recv_exit, keypair, request_recv_ip) = server_args(); - let (request_recv_endpoint, request_recv_thread) = solana_streamer::quic::spawn_server( + let SpawnServerResult { + endpoint: request_recv_endpoint, + thread: request_recv_thread, + key_updater: _, + } = solana_streamer::quic::spawn_server( "quic_streamer_test", request_recv_socket.try_clone().unwrap(), &keypair, @@ -228,7 +238,11 @@ mod tests { let addr = response_recv_socket.local_addr().unwrap().ip(); let port = response_recv_socket.local_addr().unwrap().port(); let server_addr = SocketAddr::new(addr, port); - let (response_recv_endpoint, response_recv_thread) = solana_streamer::quic::spawn_server( + let SpawnServerResult { + endpoint: response_recv_endpoint, + thread: response_recv_thread, + key_updater: _, + } = solana_streamer::quic::spawn_server( "quic_streamer_test", response_recv_socket, &keypair2, diff --git a/remote-wallet/src/locator.rs b/remote-wallet/src/locator.rs index ca8e7d696cffe2..581b87f609dda4 100644 --- a/remote-wallet/src/locator.rs +++ b/remote-wallet/src/locator.rs @@ -124,7 +124,7 @@ impl Locator { let host = uri.host().map(|h| h.to_string()); match (scheme, host) { (Some(scheme), Some(host)) if scheme == "usb" => { - let path = uri.path().segments().get(0).and_then(|s| { + let path = uri.path().segments().first().and_then(|s| { if !s.is_empty() { Some(s.as_str()) } else { diff --git a/rpc-client-api/src/config.rs b/rpc-client-api/src/config.rs index 9ecff334ca720c..cecc0b64bdf7b2 100644 --- a/rpc-client-api/src/config.rs +++ b/rpc-client-api/src/config.rs @@ -44,6 +44,8 @@ pub struct RpcSimulateTransactionConfig { pub encoding: Option, pub accounts: Option, pub min_context_slot: Option, + #[serde(default)] + pub inner_instructions: bool, } #[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)] diff --git a/rpc-client-api/src/response.rs b/rpc-client-api/src/response.rs index 7591c58c036d32..fa70e89b6b88ee 100644 --- a/rpc-client-api/src/response.rs +++ b/rpc-client-api/src/response.rs @@ -11,7 +11,7 @@ use { }, solana_transaction_status::{ ConfirmedTransactionStatusWithSignature, TransactionConfirmationStatus, UiConfirmedBlock, - UiTransactionReturnData, + UiInnerInstructions, UiTransactionReturnData, }, std::{collections::HashMap, fmt, net::SocketAddr, str::FromStr}, thiserror::Error, @@ -423,6 +423,7 @@ pub struct RpcSimulateTransactionResult { pub accounts: Option>>, pub units_consumed: Option, pub return_data: Option, + pub inner_instructions: Option>, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] diff --git a/rpc-client/src/mock_sender.rs b/rpc-client/src/mock_sender.rs index 654f45d0296477..44ab26359c3f32 100644 --- a/rpc-client/src/mock_sender.rs +++ b/rpc-client/src/mock_sender.rs @@ -66,7 +66,7 @@ pub struct MockSender { /// If `url` is "fails" then any call to `send` will return `Ok(Value::Null)`. /// /// It is customary to set the `url` to "succeeds" for mocks that should -/// return sucessfully, though this value is not actually interpreted. +/// return successfully, though this value is not actually interpreted. /// /// Other possible values of `url` are specific to different `RpcRequest` /// values. Read the implementation for specifics. @@ -350,6 +350,7 @@ impl RpcSender for MockSender { accounts: None, units_consumed: None, return_data: None, + inner_instructions: None, }, })?, "getMinimumBalanceForRentExemption" => json![20], diff --git a/rpc-client/src/nonblocking/rpc_client.rs b/rpc-client/src/nonblocking/rpc_client.rs index 21350938a75750..01f9510b68c9f2 100644 --- a/rpc-client/src/nonblocking/rpc_client.rs +++ b/rpc-client/src/nonblocking/rpc_client.rs @@ -104,10 +104,10 @@ use { /// /// [`Finalized`]: CommitmentLevel::Finalized /// [`Processed`]: CommitmentLevel::Processed -/// [jsonprot]: https://docs.solana.com/developing/clients/jsonrpc-api +/// [jsonprot]: https://solana.com/docs/rpc /// [JSON-RPC]: https://www.jsonrpc.org/specification -/// [slots]: https://docs.solana.com/terminology#slot -/// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment +/// [slots]: https://solana.com/docs/terminology#slot +/// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # Errors /// @@ -176,7 +176,7 @@ impl RpcClient { /// The client has a default timeout of 30 seconds, and a default [commitment /// level][cl] of [`Finalized`](CommitmentLevel::Finalized). /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # Examples /// @@ -191,7 +191,7 @@ impl RpcClient { /// Create an HTTP `RpcClient` with specified [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// The URL is an HTTP URL, usually for port 8899, as in /// "http://localhost:8899". @@ -223,7 +223,7 @@ impl RpcClient { /// The client has and a default [commitment level][cl] of /// [`Finalized`](CommitmentLevel::Finalized). /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # Examples /// @@ -243,7 +243,7 @@ impl RpcClient { /// Create an HTTP `RpcClient` with specified timeout and [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// The URL is an HTTP URL, usually for port 8899, as in /// "http://localhost:8899". @@ -276,7 +276,7 @@ impl RpcClient { /// Create an HTTP `RpcClient` with specified timeout and [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// The URL is an HTTP URL, usually for port 8899, as in /// "http://localhost:8899". @@ -341,7 +341,7 @@ impl RpcClient { /// behavior in specific scenarios: /// /// - It is customary to set the `url` to "succeeds" for mocks that should - /// return sucessfully, though this value is not actually interpreted. + /// return successfully, though this value is not actually interpreted. /// /// - If `url` is "fails" then any call to `send` will return `Ok(Value::Null)`. /// @@ -396,7 +396,7 @@ impl RpcClient { /// scenarios. /// /// It is customary to set the `url` to "succeeds" for mocks that should - /// return sucessfully, though this value is not actually interpreted. + /// return successfully, though this value is not actually interpreted. /// /// If `url` is "fails" then any call to `send` will return `Ok(Value::Null)`. /// @@ -424,7 +424,7 @@ impl RpcClient { /// # use solana_rpc_client::nonblocking::rpc_client::RpcClient; /// # use std::collections::HashMap; /// # use serde_json::json; - /// // Create a mock with a custom repsonse to the `GetBalance` request + /// // Create a mock with a custom response to the `GetBalance` request /// let account_balance = 50; /// let account_balance_response = json!(Response { /// context: RpcResponseContext { slot: 1, api_version: None }, @@ -448,7 +448,7 @@ impl RpcClient { /// The client has a default timeout of 30 seconds, and a default [commitment /// level][cl] of [`Finalized`](CommitmentLevel::Finalized). /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # Examples /// @@ -464,7 +464,7 @@ impl RpcClient { /// Create an HTTP `RpcClient` from a [`SocketAddr`] with specified [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// The client has a default timeout of 30 seconds, and a user-specified /// [`CommitmentLevel`] via [`CommitmentConfig`]. @@ -493,7 +493,7 @@ impl RpcClient { /// /// The client has a default [commitment level][cl] of [`Finalized`](CommitmentLevel::Finalized). /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # Examples /// @@ -541,7 +541,7 @@ impl RpcClient { /// Get the configured default [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// The commitment config may be specified during construction, and /// determines how thoroughly committed a transaction must be when waiting @@ -609,7 +609,7 @@ impl RpcClient { /// Once this function returns successfully, the given transaction is /// guaranteed to be processed with the configured [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// After sending the transaction, this method polls in a loop for the /// status of the transaction until it has ben confirmed. @@ -639,8 +639,8 @@ impl RpcClient { /// This method is built on the [`sendTransaction`] RPC method, and the /// [`getLatestBlockhash`] RPC method. /// - /// [`sendTransaction`]: https://docs.solana.com/developing/clients/jsonrpc-api#sendtransaction - /// [`getLatestBlockhash`]: https://docs.solana.com/developing/clients/jsonrpc-api#getlatestblockhash + /// [`sendTransaction`]: https://solana.com/docs/rpc/http/sendtransaction + /// [`getLatestBlockhash`]: https://solana.com/docs/rpc/http/getlatestblockhash /// /// # Examples /// @@ -811,7 +811,7 @@ impl RpcClient { /// /// This method is built on the [`sendTransaction`] RPC method. /// - /// [`sendTransaction`]: https://docs.solana.com/developing/clients/jsonrpc-api#sendtransaction + /// [`sendTransaction`]: https://solana.com/docs/rpc/http/sendtransaction /// /// # Examples /// @@ -900,7 +900,7 @@ impl RpcClient { /// /// This method is built on the [`sendTransaction`] RPC method. /// - /// [`sendTransaction`]: https://docs.solana.com/developing/clients/jsonrpc-api#sendtransaction + /// [`sendTransaction`]: https://solana.com/docs/rpc/http/sendtransaction /// /// # Examples /// @@ -1014,7 +1014,7 @@ impl RpcClient { /// with the configured [commitment level][cl], which can be retrieved with /// the [`commitment`](RpcClient::commitment) method. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// Note that this method does not wait for a transaction to be confirmed /// — it only checks whether a transaction has been confirmed. To @@ -1028,7 +1028,7 @@ impl RpcClient { /// /// This method is built on the [`getSignatureStatuses`] RPC method. /// - /// [`getSignatureStatuses`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturestatuses + /// [`getSignatureStatuses`]: https://solana.com/docs/rpc/http/getsignaturestatuses /// /// # Examples /// @@ -1073,7 +1073,7 @@ impl RpcClient { /// Returns an [`RpcResult`] with value `true` if the given transaction /// succeeded and has been committed with the given [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// Note that this method does not wait for a transaction to be confirmed /// — it only checks whether a transaction has been confirmed. To @@ -1087,7 +1087,7 @@ impl RpcClient { /// /// This method is built on the [`getSignatureStatuses`] RPC method. /// - /// [`getSignatureStatuses`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturestatuses + /// [`getSignatureStatuses`]: https://solana.com/docs/rpc/http/getsignaturestatuses /// /// # Examples /// @@ -1255,7 +1255,7 @@ impl RpcClient { /// Simulating a transaction is similar to the ["preflight check"] that is /// run by default when sending a transaction. /// - /// ["preflight check"]: https://docs.solana.com/developing/clients/jsonrpc-api#sendtransaction + /// ["preflight check"]: https://solana.com/docs/rpc/http/sendtransaction /// /// By default, signatures are not verified during simulation. To verify /// signatures, call the [`simulate_transaction_with_config`] method, with @@ -1269,7 +1269,7 @@ impl RpcClient { /// /// This method is built on the [`simulateTransaction`] RPC method. /// - /// [`simulateTransaction`]: https://docs.solana.com/developing/clients/jsonrpc-api#simulatetransaction + /// [`simulateTransaction`]: https://solana.com/docs/rpc/http/simulatetransaction /// /// # Examples /// @@ -1326,7 +1326,7 @@ impl RpcClient { /// Simulating a transaction is similar to the ["preflight check"] that is /// run by default when sending a transaction. /// - /// ["preflight check"]: https://docs.solana.com/developing/clients/jsonrpc-api#sendtransaction + /// ["preflight check"]: https://solana.com/docs/rpc/http/sendtransaction /// /// By default, signatures are not verified during simulation. To verify /// signatures, call the [`simulate_transaction_with_config`] method, with @@ -1349,7 +1349,7 @@ impl RpcClient { /// /// This method is built on the [`simulateTransaction`] RPC method. /// - /// [`simulateTransaction`]: https://docs.solana.com/developing/clients/jsonrpc-api#simulatetransaction + /// [`simulateTransaction`]: https://solana.com/docs/rpc/http/simulatetransaction /// /// # Examples /// @@ -1421,7 +1421,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getHighestSnapshotSlot`] RPC method. /// - /// [`getHighestSnapshotSlot`]: https://docs.solana.com/developing/clients/jsonrpc-api#gethighestsnapshotslot + /// [`getHighestSnapshotSlot`]: https://solana.com/docs/rpc/http/gethighestsnapshotslot /// /// # Examples /// @@ -1461,7 +1461,7 @@ impl RpcClient { /// Check if a transaction has been processed with the default [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// If the transaction has been processed with the default commitment level, /// then this method returns `Ok` of `Some`. If the transaction has not yet @@ -1480,14 +1480,14 @@ impl RpcClient { /// recent slots, plus up to /// [`MAX_RECENT_BLOCKHASHES`][solana_sdk::clock::MAX_RECENT_BLOCKHASHES] /// rooted slots. To search the full transaction history use the - /// [`get_signature_statuse_with_commitment_and_history`][RpcClient::get_signature_status_with_commitment_and_history] + /// [`get_signature_status_with_commitment_and_history`][RpcClient::get_signature_status_with_commitment_and_history] /// method. /// /// # RPC Reference /// /// This method is built on the [`getSignatureStatuses`] RPC method. /// - /// [`getSignatureStatuses`]: https://docs.solana.com/developing/clients/jsonrpc-api#gitsignaturestatuses + /// [`getSignatureStatuses`]: https://solana.com/docs/rpc/http/getsignaturestatuses /// /// # Examples /// @@ -1556,7 +1556,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getSignatureStatuses`] RPC method. /// - /// [`getSignatureStatuses`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturestatuses + /// [`getSignatureStatuses`]: https://solana.com/docs/rpc/http/getsignaturestatuses /// /// # Examples /// @@ -1636,7 +1636,7 @@ impl RpcClient { /// method, with the `searchTransactionHistory` configuration option set to /// `true`. /// - /// [`getSignatureStatuses`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturestatuses + /// [`getSignatureStatuses`]: https://solana.com/docs/rpc/http/getsignaturestatuses /// /// # Examples /// @@ -1681,7 +1681,7 @@ impl RpcClient { /// Check if a transaction has been processed with the given [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// If the transaction has been processed with the given commitment level, /// then this method returns `Ok` of `Some`. If the transaction has not yet @@ -1700,14 +1700,14 @@ impl RpcClient { /// recent slots, plus up to /// [`MAX_RECENT_BLOCKHASHES`][solana_sdk::clock::MAX_RECENT_BLOCKHASHES] /// rooted slots. To search the full transaction history use the - /// [`get_signature_statuse_with_commitment_and_history`][RpcClient::get_signature_status_with_commitment_and_history] + /// [`get_signature_status_with_commitment_and_history`][RpcClient::get_signature_status_with_commitment_and_history] /// method. /// /// # RPC Reference /// /// This method is built on the [`getSignatureStatuses`] RPC method. /// - /// [`getSignatureStatuses`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturestatuses + /// [`getSignatureStatuses`]: https://solana.com/docs/rpc/http/getsignaturestatuses /// /// # Examples /// @@ -1757,7 +1757,7 @@ impl RpcClient { /// Check if a transaction has been processed with the given [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// If the transaction has been processed with the given commitment level, /// then this method returns `Ok` of `Some`. If the transaction has not yet @@ -1779,7 +1779,7 @@ impl RpcClient { /// /// This method is built on the [`getSignatureStatuses`] RPC method. /// - /// [`getSignatureStatuses`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturestatuses + /// [`getSignatureStatuses`]: https://solana.com/docs/rpc/http/getsignaturestatuses /// /// # Examples /// @@ -1834,13 +1834,13 @@ impl RpcClient { /// Returns the slot that has reached the configured [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getSlot`] RPC method. /// - /// [`getSlot`]: https://docs.solana.com/developing/clients/jsonrpc-api#getslot + /// [`getSlot`]: https://solana.com/docs/rpc/http/getslot /// /// # Examples /// @@ -1860,13 +1860,13 @@ impl RpcClient { /// Returns the slot that has reached the given [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getSlot`] RPC method. /// - /// [`getSlot`]: https://docs.solana.com/developing/clients/jsonrpc-api#getslot + /// [`getSlot`]: https://solana.com/docs/rpc/http/getslot /// /// # Examples /// @@ -1895,13 +1895,13 @@ impl RpcClient { /// Returns the block height that has reached the configured [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method is corresponds directly to the [`getBlockHeight`] RPC method. /// - /// [`getBlockHeight`]: https://docs.solana.com/developing/clients/jsonrpc-api#getblockheight + /// [`getBlockHeight`]: https://solana.com/docs/rpc/http/getblockheight /// /// # Examples /// @@ -1922,13 +1922,13 @@ impl RpcClient { /// Returns the block height that has reached the given [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method is corresponds directly to the [`getBlockHeight`] RPC method. /// - /// [`getBlockHeight`]: https://docs.solana.com/developing/clients/jsonrpc-api#getblockheight + /// [`getBlockHeight`]: https://solana.com/docs/rpc/http/getblockheight /// /// # Examples /// @@ -1963,7 +1963,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getSlotLeaders`] RPC method. /// - /// [`getSlotLeaders`]: https://docs.solana.com/developing/clients/jsonrpc-api#getslotleaders + /// [`getSlotLeaders`]: https://solana.com/docs/rpc/http/getslotleaders /// /// # Examples /// @@ -2006,7 +2006,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getBlockProduction`] RPC method. /// - /// [`getBlockProduction`]: https://docs.solana.com/developing/clients/jsonrpc-api#getblockproduction + /// [`getBlockProduction`]: https://solana.com/docs/rpc/http/getblockproduction /// /// # Examples /// @@ -2030,7 +2030,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getBlockProduction`] RPC method. /// - /// [`getBlockProduction`]: https://docs.solana.com/developing/clients/jsonrpc-api#getblockproduction + /// [`getBlockProduction`]: https://solana.com/docs/rpc/http/getblockproduction /// /// # Examples /// @@ -2080,13 +2080,13 @@ impl RpcClient { /// /// This method uses the configured [commitment level]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getStakeActivation`] RPC method. /// - /// [`getStakeActivation`]: https://docs.solana.com/developing/clients/jsonrpc-api#getstakeactivation + /// [`getStakeActivation`]: https://solana.com/docs/rpc/http/getstakeactivation /// /// # Examples /// @@ -2173,13 +2173,13 @@ impl RpcClient { /// /// This method uses the configured [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getSupply`] RPC method. /// - /// [`getSupply`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsupply + /// [`getSupply`]: https://solana.com/docs/rpc/http/getsupply /// /// # Examples /// @@ -2203,7 +2203,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getSupply`] RPC method. /// - /// [`getSupply`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsupply + /// [`getSupply`]: https://solana.com/docs/rpc/http/getsupply /// /// # Examples /// @@ -2239,7 +2239,7 @@ impl RpcClient { /// This method corresponds directly to the [`getLargestAccounts`] RPC /// method. /// - /// [`getLargestAccounts`]: https://docs.solana.com/developing/clients/jsonrpc-api#getlargestaccounts + /// [`getLargestAccounts`]: https://solana.com/docs/rpc/http/getlargestaccounts /// /// # Examples /// @@ -2282,14 +2282,14 @@ impl RpcClient { /// Returns the account info and associated stake for all the voting accounts /// that have reached the configured [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getVoteAccounts`] /// RPC method. /// - /// [`getVoteAccounts`]: https://docs.solana.com/developing/clients/jsonrpc-api#getvoteaccounts + /// [`getVoteAccounts`]: https://solana.com/docs/rpc/http/getvoteaccounts /// /// # Examples /// @@ -2311,13 +2311,13 @@ impl RpcClient { /// Returns the account info and associated stake for all the voting accounts /// that have reached the given [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getVoteAccounts`] RPC method. /// - /// [`getVoteAccounts`]: https://docs.solana.com/developing/clients/jsonrpc-api#getvoteaccounts + /// [`getVoteAccounts`]: https://solana.com/docs/rpc/http/getvoteaccounts /// /// # Examples /// @@ -2349,13 +2349,13 @@ impl RpcClient { /// Returns the account info and associated stake for all the voting accounts /// that have reached the given [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getVoteAccounts`] RPC method. /// - /// [`getVoteAccounts`]: https://docs.solana.com/developing/clients/jsonrpc-api#getvoteaccounts + /// [`getVoteAccounts`]: https://solana.com/docs/rpc/http/getvoteaccounts /// /// # Examples /// @@ -2435,7 +2435,7 @@ impl RpcClient { /// This method corresponds directly to the [`getClusterNodes`] /// RPC method. /// - /// [`getClusterNodes`]: https://docs.solana.com/developing/clients/jsonrpc-api#getclusternodes + /// [`getClusterNodes`]: https://solana.com/docs/rpc/http/getclusternodes /// /// # Examples /// @@ -2467,7 +2467,7 @@ impl RpcClient { /// This method corresponds directly to the [`getBlock`] RPC /// method. /// - /// [`getBlock`]: https://docs.solana.com/developing/clients/jsonrpc-api#getblock + /// [`getBlock`]: https://solana.com/docs/rpc/http/getblock /// /// # Examples /// @@ -2493,7 +2493,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getBlock`] RPC method. /// - /// [`getBlock`]: https://docs.solana.com/developing/clients/jsonrpc-api#getblock + /// [`getBlock`]: https://solana.com/docs/rpc/http/getblock /// /// # Examples /// @@ -2531,7 +2531,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getBlock`] RPC method. /// - /// [`getBlock`]: https://docs.solana.com/developing/clients/jsonrpc-api#getblock + /// [`getBlock`]: https://solana.com/docs/rpc/http/getblock /// /// # Examples /// @@ -2626,7 +2626,7 @@ impl RpcClient { /// /// [`Finalized`]: CommitmentLevel::Finalized /// [`get_blocks_with_limit`]: RpcClient::get_blocks_with_limit. - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # Errors /// @@ -2638,8 +2638,8 @@ impl RpcClient { /// the remote node version is less than 1.7, in which case it maps to the /// [`getConfirmedBlocks`] RPC method. /// - /// [`getBlocks`]: https://docs.solana.com/developing/clients/jsonrpc-api#getblocks - /// [`getConfirmedBlocks`]: https://docs.solana.com/developing/clients/jsonrpc-api#getConfirmedblocks + /// [`getBlocks`]: https://solana.com/docs/rpc/http/getblocks + /// [`getConfirmedBlocks`]: https://solana.com/docs/rpc/deprecated/getconfirmedblocks /// /// # Examples /// @@ -2676,7 +2676,7 @@ impl RpcClient { /// If `end_slot` is not provided, then the end slot is for the latest /// block with the given [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// This method may not return blocks for the full range of slots if some /// slots do not have corresponding blocks. To simply get a specific number @@ -2700,8 +2700,8 @@ impl RpcClient { /// the remote node version is less than 1.7, in which case it maps to the /// [`getConfirmedBlocks`] RPC method. /// - /// [`getBlocks`]: https://docs.solana.com/developing/clients/jsonrpc-api#getblocks - /// [`getConfirmedBlocks`]: https://docs.solana.com/developing/clients/jsonrpc-api#getConfirmedblocks + /// [`getBlocks`]: https://solana.com/docs/rpc/http/getblocks + /// [`getConfirmedBlocks`]: https://solana.com/docs/rpc/deprecated/getconfirmedblocks /// /// # Examples /// @@ -2752,7 +2752,7 @@ impl RpcClient { /// This method uses the [`Finalized`] [commitment level][cl]. /// /// [`Finalized`]: CommitmentLevel::Finalized. - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # Errors /// @@ -2764,8 +2764,8 @@ impl RpcClient { /// method, unless the remote node version is less than 1.7, in which case /// it maps to the [`getConfirmedBlocksWithLimit`] RPC method. /// - /// [`getBlocksWithLimit`]: https://docs.solana.com/developing/clients/jsonrpc-api#getblockswithlimit - /// [`getConfirmedBlocksWithLimit`]: https://docs.solana.com/developing/clients/jsonrpc-api#getconfirmedblockswithlimit + /// [`getBlocksWithLimit`]: https://solana.com/docs/rpc/http/getblockswithlimit + /// [`getConfirmedBlocksWithLimit`]: https://solana.com/docs/rpc/deprecated/getconfirmedblockswithlimit /// /// # Examples /// @@ -2804,7 +2804,7 @@ impl RpcClient { /// This method returns an error if the given [commitment level][cl] is below /// [`Confirmed`]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// [`Confirmed`]: CommitmentLevel::Confirmed /// /// # RPC Reference @@ -2813,8 +2813,8 @@ impl RpcClient { /// method, unless the remote node version is less than 1.7, in which case /// it maps to the `getConfirmedBlocksWithLimit` RPC method. /// - /// [`getBlocksWithLimit`]: https://docs.solana.com/developing/clients/jsonrpc-api#getblockswithlimit - /// [`getConfirmedBlocksWithLimit`]: https://docs.solana.com/developing/clients/jsonrpc-api#getconfirmedblockswithlimit + /// [`getBlocksWithLimit`]: https://solana.com/docs/rpc/http/getblockswithlimit + /// [`getConfirmedBlocksWithLimit`]: https://solana.com/docs/rpc/deprecated/getconfirmedblockswithlimit /// /// # Examples /// @@ -2941,16 +2941,14 @@ impl RpcClient { /// This method uses the [`Finalized`] [commitment level][cl]. /// /// [`Finalized`]: CommitmentLevel::Finalized. - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getSignaturesForAddress`] RPC - /// method, unless the remote node version is less than 1.7, in which case - /// it maps to the [`getSignaturesForAddress2`] RPC method. + /// method. /// - /// [`getSignaturesForAddress`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturesforaddress - /// [`getSignaturesForAddress2`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturesforaddress2 + /// [`getSignaturesForAddress`]: https://solana.com/docs/rpc/http/getsignaturesforaddress /// /// # Examples /// @@ -2990,17 +2988,15 @@ impl RpcClient { /// This method returns an error if the given [commitment level][cl] is below /// [`Confirmed`]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// [`Confirmed`]: CommitmentLevel::Confirmed /// /// # RPC Reference /// /// This method corresponds directly to the [`getSignaturesForAddress`] RPC - /// method, unless the remote node version is less than 1.7, in which case - /// it maps to the [`getSignaturesForAddress2`] RPC method. + /// method. /// - /// [`getSignaturesForAddress`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturesforaddress - /// [`getSignaturesForAddress2`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturesforaddress2 + /// [`getSignaturesForAddress`]: https://solana.com/docs/rpc/http/getsignaturesforaddress /// /// # Examples /// @@ -3110,7 +3106,7 @@ impl RpcClient { /// This method uses the [`Finalized`] [commitment level][cl]. /// /// [`Finalized`]: CommitmentLevel::Finalized - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// @@ -3118,8 +3114,8 @@ impl RpcClient { /// unless the remote node version is less than 1.7, in which case it maps /// to the [`getConfirmedTransaction`] RPC method. /// - /// [`getTransaction`]: https://docs.solana.com/developing/clients/jsonrpc-api#gettransaction - /// [`getConfirmedTransaction`]: https://docs.solana.com/developing/clients/jsonrpc-api#getconfirmedtransaction + /// [`getTransaction`]: https://solana.com/docs/rpc/http/gettransaction + /// [`getConfirmedTransaction`]: https://solana.com/docs/rpc/deprecated/getConfirmedTransaction /// /// # Examples /// @@ -3168,7 +3164,7 @@ impl RpcClient { /// This method returns an error if the given [commitment level][cl] is below /// [`Confirmed`]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// [`Confirmed`]: CommitmentLevel::Confirmed /// /// # RPC Reference @@ -3177,8 +3173,8 @@ impl RpcClient { /// unless the remote node version is less than 1.7, in which case it maps /// to the [`getConfirmedTransaction`] RPC method. /// - /// [`getTransaction`]: https://docs.solana.com/developing/clients/jsonrpc-api#gettransaction - /// [`getConfirmedTransaction`]: https://docs.solana.com/developing/clients/jsonrpc-api#getconfirmedtransaction + /// [`getTransaction`]: https://solana.com/docs/rpc/http/gettransaction + /// [`getConfirmedTransaction`]: https://solana.com/docs/rpc/deprecated/getConfirmedTransaction /// /// # Examples /// @@ -3269,7 +3265,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getBlockTime`] RPC method. /// - /// [`getBlockTime`]: https://docs.solana.com/developing/clients/jsonrpc-api#getblocktime + /// [`getBlockTime`]: https://solana.com/docs/rpc/http/getblocktime /// /// # Examples /// @@ -3306,13 +3302,13 @@ impl RpcClient { /// /// This method uses the configured default [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getEpochInfo`] RPC method. /// - /// [`getEpochInfo`]: https://docs.solana.com/developing/clients/jsonrpc-api#getepochinfo + /// [`getEpochInfo`]: https://solana.com/docs/rpc/http/getepochinfo /// /// # Examples /// @@ -3336,7 +3332,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getEpochInfo`] RPC method. /// - /// [`getEpochInfo`]: https://docs.solana.com/developing/clients/jsonrpc-api#getepochinfo + /// [`getEpochInfo`]: https://solana.com/docs/rpc/http/getepochinfo /// /// # Examples /// @@ -3369,13 +3365,13 @@ impl RpcClient { /// /// This method uses the configured default [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getLeaderSchedule`] RPC method. /// - /// [`getLeaderSchedule`]: https://docs.solana.com/developing/clients/jsonrpc-api#getleaderschedule + /// [`getLeaderSchedule`]: https://solana.com/docs/rpc/http/getleaderschedule /// /// # Examples /// @@ -3407,7 +3403,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getLeaderSchedule`] RPC method. /// - /// [`getLeaderSchedule`]: https://docs.solana.com/developing/clients/jsonrpc-api#getleaderschedule + /// [`getLeaderSchedule`]: https://solana.com/docs/rpc/http/getleaderschedule /// /// # Examples /// @@ -3448,7 +3444,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getLeaderSchedule`] RPC method. /// - /// [`getLeaderSchedule`]: https://docs.solana.com/developing/clients/jsonrpc-api#getleaderschedule + /// [`getLeaderSchedule`]: https://solana.com/docs/rpc/http/getleaderschedule /// /// # Examples /// @@ -3490,7 +3486,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getEpochSchedule`] RPC method. /// - /// [`getEpochSchedule`]: https://docs.solana.com/developing/clients/jsonrpc-api#getepochschedule + /// [`getEpochSchedule`]: https://solana.com/docs/rpc/http/getepochschedule /// /// # Examples /// @@ -3517,7 +3513,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getRecentPerformanceSamples`] RPC method. /// - /// [`getRecentPerformanceSamples`]: https://docs.solana.com/developing/clients/jsonrpc-api#getrecentperformancesamples + /// [`getRecentPerformanceSamples`]: https://solana.com/docs/rpc/http/getrecentperformancesamples /// /// # Examples /// @@ -3553,7 +3549,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getRecentPrioritizationFees`] RPC method. /// - /// [`getRecentPrioritizationFees`]: https://docs.solana.com/developing/clients/jsonrpc-api#getrecentprioritizationfees + /// [`getRecentPrioritizationFees`]: https://solana.com/docs/rpc/http/getrecentprioritizationfees /// /// # Examples /// @@ -3591,7 +3587,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getIdentity`] RPC method. /// - /// [`getIdentity`]: https://docs.solana.com/developing/clients/jsonrpc-api#getidentity + /// [`getIdentity`]: https://solana.com/docs/rpc/http/getidentity /// /// # Examples /// @@ -3621,14 +3617,14 @@ impl RpcClient { /// This method uses the [`Finalized`] [commitment level][cl]. /// /// [`Finalized`]: CommitmentLevel::Finalized - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getInflationGovernor`] RPC /// method. /// - /// [`getInflationGovernor`]: https://docs.solana.com/developing/clients/jsonrpc-api#getinflationgovernor + /// [`getInflationGovernor`]: https://solana.com/docs/rpc/http/getinflationgovernor /// /// # Examples /// @@ -3653,7 +3649,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getInflationRate`] RPC method. /// - /// [`getInflationRate`]: https://docs.solana.com/developing/clients/jsonrpc-api#getinflationrate + /// [`getInflationRate`]: https://solana.com/docs/rpc/http/getinflationrate /// /// # Examples /// @@ -3675,13 +3671,13 @@ impl RpcClient { /// /// This method uses the configured [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getInflationReward`] RPC method. /// - /// [`getInflationReward`]: https://docs.solana.com/developing/clients/jsonrpc-api#getinflationreward + /// [`getInflationReward`]: https://solana.com/docs/rpc/http/getinflationreward /// /// # Examples /// @@ -3733,7 +3729,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getVersion`] RPC method. /// - /// [`getVersion`]: https://docs.solana.com/developing/clients/jsonrpc-api#getversion + /// [`getVersion`]: https://solana.com/docs/rpc/http/getversion /// /// # Examples /// @@ -3765,7 +3761,7 @@ impl RpcClient { /// This method corresponds directly to the [`minimumLedgerSlot`] RPC /// method. /// - /// [`minimumLedgerSlot`]: https://docs.solana.com/developing/clients/jsonrpc-api#minimumledgerslot + /// [`minimumLedgerSlot`]: https://solana.com/docs/rpc/http/minimumledgerslot /// /// # Examples /// @@ -3787,7 +3783,7 @@ impl RpcClient { /// /// This method uses the configured [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// To get multiple accounts at once, use the [`get_multiple_accounts`] method. /// @@ -3805,7 +3801,7 @@ impl RpcClient { /// /// This method is built on the [`getAccountInfo`] RPC method. /// - /// [`getAccountInfo`]: https://docs.solana.com/developing/clients/jsonrpc-api#getaccountinfo + /// [`getAccountInfo`]: https://solana.com/docs/rpc/http/getaccountinfo /// /// # Examples /// @@ -3846,7 +3842,7 @@ impl RpcClient { /// /// This method is built on the [`getAccountInfo`] RPC method. /// - /// [`getAccountInfo`]: https://docs.solana.com/developing/clients/jsonrpc-api#getaccountinfo + /// [`getAccountInfo`]: https://solana.com/docs/rpc/http/getaccountinfo /// /// # Examples /// @@ -3901,7 +3897,7 @@ impl RpcClient { /// /// This method is built on the [`getAccountInfo`] RPC method. /// - /// [`getAccountInfo`]: https://docs.solana.com/developing/clients/jsonrpc-api#getaccountinfo + /// [`getAccountInfo`]: https://solana.com/docs/rpc/http/getaccountinfo /// /// # Examples /// @@ -3983,7 +3979,7 @@ impl RpcClient { /// This method corresponds directly to the [`getMaxRetransmitSlot`] RPC /// method. /// - /// [`getMaxRetransmitSlot`]: https://docs.solana.com/developing/clients/jsonrpc-api#getmaxretransmitslot + /// [`getMaxRetransmitSlot`]: https://solana.com/docs/rpc/http/getmaxretransmitslot /// /// # Examples /// @@ -4001,14 +3997,14 @@ impl RpcClient { .await } - /// Get the max slot seen from after [shred](https://docs.solana.com/terminology#shred) insert. + /// Get the max slot seen from after [shred](https://solana.com/docs/terminology#shred) insert. /// /// # RPC Reference /// /// This method corresponds directly to the /// [`getMaxShredInsertSlot`] RPC method. /// - /// [`getMaxShredInsertSlot`]: https://docs.solana.com/developing/clients/jsonrpc-api#getmaxshredinsertslot + /// [`getMaxShredInsertSlot`]: https://solana.com/docs/rpc/http/getmaxshredinsertslot /// /// # Examples /// @@ -4030,13 +4026,13 @@ impl RpcClient { /// /// This method uses the configured [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method is built on the [`getMultipleAccounts`] RPC method. /// - /// [`getMultipleAccounts`]: https://docs.solana.com/developing/clients/jsonrpc-api#getmultipleaccounts + /// [`getMultipleAccounts`]: https://solana.com/docs/rpc/http/getmultipleaccounts /// /// # Examples /// @@ -4073,7 +4069,7 @@ impl RpcClient { /// /// This method is built on the [`getMultipleAccounts`] RPC method. /// - /// [`getMultipleAccounts`]: https://docs.solana.com/developing/clients/jsonrpc-api#getmultipleaccounts + /// [`getMultipleAccounts`]: https://solana.com/docs/rpc/http/getmultipleaccounts /// /// # Examples /// @@ -4122,7 +4118,7 @@ impl RpcClient { /// /// This method is built on the [`getMultipleAccounts`] RPC method. /// - /// [`getMultipleAccounts`]: https://docs.solana.com/developing/clients/jsonrpc-api#getmultipleaccounts + /// [`getMultipleAccounts`]: https://solana.com/docs/rpc/http/getmultipleaccounts /// /// # Examples /// @@ -4196,7 +4192,7 @@ impl RpcClient { /// /// This method is built on the [`getAccountInfo`] RPC method. /// - /// [`getAccountInfo`]: https://docs.solana.com/developing/clients/jsonrpc-api#getaccountinfo + /// [`getAccountInfo`]: https://solana.com/docs/rpc/http/getaccountinfo /// /// # Examples /// @@ -4229,7 +4225,7 @@ impl RpcClient { /// This method corresponds directly to the /// [`getMinimumBalanceForRentExemption`] RPC method. /// - /// [`getMinimumBalanceForRentExemption`]: https://docs.solana.com/developing/clients/jsonrpc-api#getminimumbalanceforrentexemption + /// [`getMinimumBalanceForRentExemption`]: https://solana.com/docs/rpc/http/getminimumbalanceforrentexemption /// /// # Examples /// @@ -4268,13 +4264,13 @@ impl RpcClient { /// /// This method uses the configured [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getBalance`] RPC method. /// - /// [`getBalance`]: https://docs.solana.com/developing/clients/jsonrpc-api#getbalance + /// [`getBalance`]: https://solana.com/docs/rpc/http/getbalance /// /// # Examples /// @@ -4306,7 +4302,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getBalance`] RPC method. /// - /// [`getBalance`]: https://docs.solana.com/developing/clients/jsonrpc-api#getbalance + /// [`getBalance`]: https://solana.com/docs/rpc/http/getbalance /// /// # Examples /// @@ -4349,14 +4345,14 @@ impl RpcClient { /// /// This method uses the configured [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getProgramAccounts`] RPC /// method. /// - /// [`getProgramAccounts`]: https://docs.solana.com/developing/clients/jsonrpc-api#getprogramaccounts + /// [`getProgramAccounts`]: https://solana.com/docs/rpc/http/getprogramaccounts /// /// # Examples /// @@ -4398,7 +4394,7 @@ impl RpcClient { /// /// This method is built on the [`getProgramAccounts`] RPC method. /// - /// [`getProgramAccounts`]: https://docs.solana.com/developing/clients/jsonrpc-api#getprogramaccounts + /// [`getProgramAccounts`]: https://solana.com/docs/rpc/http/getprogramaccounts /// /// # Examples /// @@ -4481,7 +4477,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getStakeMinimumDelegation`] RPC method. /// - /// [`getStakeMinimumDelegation`]: https://docs.solana.com/developing/clients/jsonrpc-api#getstakeminimumdelegation + /// [`getStakeMinimumDelegation`]: https://solana.com/docs/rpc/http/getstakeminimumdelegation /// /// # Examples /// @@ -4506,7 +4502,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getStakeMinimumDelegation`] RPC method. /// - /// [`getStakeMinimumDelegation`]: https://docs.solana.com/developing/clients/jsonrpc-api#getstakeminimumdelegation + /// [`getStakeMinimumDelegation`]: https://solana.com/docs/rpc/http/getstakeminimumdelegation /// /// # Examples /// diff --git a/rpc-client/src/rpc_client.rs b/rpc-client/src/rpc_client.rs index afccd7af003290..0c5f1fdc4ac549 100644 --- a/rpc-client/src/rpc_client.rs +++ b/rpc-client/src/rpc_client.rs @@ -151,10 +151,10 @@ pub struct GetConfirmedSignaturesForAddress2Config { /// /// [`Finalized`]: solana_sdk::commitment_config::CommitmentLevel::Finalized /// [`Processed`]: solana_sdk::commitment_config::CommitmentLevel::Processed -/// [jsonprot]: https://docs.solana.com/developing/clients/jsonrpc-api +/// [jsonprot]: https://solana.com/docs/rpc /// [JSON-RPC]: https://www.jsonrpc.org/specification -/// [slots]: https://docs.solana.com/terminology#slot -/// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment +/// [slots]: https://solana.com/docs/terminology#slot +/// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # Errors /// @@ -240,7 +240,7 @@ impl RpcClient { /// The client has a default timeout of 30 seconds, and a default [commitment /// level][cl] of [`Finalized`]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// [`Finalized`]: solana_sdk::commitment_config::CommitmentLevel::Finalized /// /// # Examples @@ -256,7 +256,7 @@ impl RpcClient { /// Create an HTTP `RpcClient` with specified [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// The URL is an HTTP URL, usually for port 8899, as in /// "http://localhost:8899". @@ -290,7 +290,7 @@ impl RpcClient { /// The client has and a default [commitment level][cl] of /// [`Finalized`]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// [`Finalized`]: solana_sdk::commitment_config::CommitmentLevel::Finalized /// /// # Examples @@ -311,7 +311,7 @@ impl RpcClient { /// Create an HTTP `RpcClient` with specified timeout and [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// The URL is an HTTP URL, usually for port 8899, as in /// "http://localhost:8899". @@ -344,7 +344,7 @@ impl RpcClient { /// Create an HTTP `RpcClient` with specified timeout and [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// The URL is an HTTP URL, usually for port 8899, as in /// "http://localhost:8899". @@ -492,7 +492,7 @@ impl RpcClient { /// # use solana_rpc_client::rpc_client::RpcClient; /// # use std::collections::HashMap; /// # use serde_json::json; - /// // Create a mock with a custom repsonse to the `GetBalance` request + /// // Create a mock with a custom response to the `GetBalance` request /// let account_balance = 50; /// let account_balance_response = json!(Response { /// context: RpcResponseContext { slot: 1, api_version: None }, @@ -516,7 +516,7 @@ impl RpcClient { /// The client has a default timeout of 30 seconds, and a default [commitment /// level][cl] of [`Finalized`]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// [`Finalized`]: solana_sdk::commitment_config::CommitmentLevel::Finalized /// /// # Examples @@ -533,7 +533,7 @@ impl RpcClient { /// Create an HTTP `RpcClient` from a [`SocketAddr`] with specified [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// The client has a default timeout of 30 seconds, and a user-specified /// [`CommitmentLevel`] via [`CommitmentConfig`]. @@ -564,7 +564,7 @@ impl RpcClient { /// /// The client has a default [commitment level][cl] of [`Finalized`]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// [`Finalized`]: solana_sdk::commitment_config::CommitmentLevel::Finalized /// /// # Examples @@ -589,7 +589,7 @@ impl RpcClient { /// Get the configured default [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// The commitment config may be specified during construction, and /// determines how thoroughly committed a transaction must be when waiting @@ -611,7 +611,7 @@ impl RpcClient { /// Once this function returns successfully, the given transaction is /// guaranteed to be processed with the configured [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// After sending the transaction, this method polls in a loop for the /// status of the transaction until it has ben confirmed. @@ -642,8 +642,8 @@ impl RpcClient { /// This method is built on the [`sendTransaction`] RPC method, and the /// [`getLatestBlockhash`] RPC method. /// - /// [`sendTransaction`]: https://docs.solana.com/developing/clients/jsonrpc-api#sendtransaction - /// [`getLatestBlockhash`]: https://docs.solana.com/developing/clients/jsonrpc-api#getlatestblockhash + /// [`sendTransaction`]: https://solana.com/docs/rpc/http/sendtransaction + /// [`getLatestBlockhash`]: https://solana.com/docs/rpc/http/getlatestblockhash /// /// # Examples /// @@ -754,7 +754,7 @@ impl RpcClient { /// /// This method is built on the [`sendTransaction`] RPC method. /// - /// [`sendTransaction`]: https://docs.solana.com/developing/clients/jsonrpc-api#sendtransaction + /// [`sendTransaction`]: https://solana.com/docs/rpc/http/sendtransaction /// /// # Examples /// @@ -830,7 +830,7 @@ impl RpcClient { /// /// This method is built on the [`sendTransaction`] RPC method. /// - /// [`sendTransaction`]: https://docs.solana.com/developing/clients/jsonrpc-api#sendtransaction + /// [`sendTransaction`]: https://solana.com/docs/rpc/http/sendtransaction /// /// # Examples /// @@ -885,7 +885,7 @@ impl RpcClient { /// with the configured [commitment level][cl], which can be retrieved with /// the [`commitment`](RpcClient::commitment) method. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// Note that this method does not wait for a transaction to be confirmed /// — it only checks whether a transaction has been confirmed. To @@ -899,7 +899,7 @@ impl RpcClient { /// /// This method is built on the [`getSignatureStatuses`] RPC method. /// - /// [`getSignatureStatuses`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturestatuses + /// [`getSignatureStatuses`]: https://solana.com/docs/rpc/http/getsignaturestatuses /// /// # Examples /// @@ -938,7 +938,7 @@ impl RpcClient { /// Returns an [`RpcResult`] with value `true` if the given transaction /// succeeded and has been committed with the given [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// Note that this method does not wait for a transaction to be confirmed /// — it only checks whether a transaction has been confirmed. To @@ -952,7 +952,7 @@ impl RpcClient { /// /// This method is built on the [`getSignatureStatuses`] RPC method. /// - /// [`getSignatureStatuses`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturestatuses + /// [`getSignatureStatuses`]: https://solana.com/docs/rpc/http/getsignaturestatuses /// /// # Examples /// @@ -1022,7 +1022,7 @@ impl RpcClient { /// Simulating a transaction is similar to the ["preflight check"] that is /// run by default when sending a transaction. /// - /// ["preflight check"]: https://docs.solana.com/developing/clients/jsonrpc-api#sendtransaction + /// ["preflight check"]: https://solana.com/docs/rpc/http/sendtransaction /// /// By default, signatures are not verified during simulation. To verify /// signatures, call the [`simulate_transaction_with_config`] method, with @@ -1036,7 +1036,7 @@ impl RpcClient { /// /// This method is built on the [`simulateTransaction`] RPC method. /// - /// [`simulateTransaction`]: https://docs.solana.com/developing/clients/jsonrpc-api#simulatetransaction + /// [`simulateTransaction`]: https://solana.com/docs/rpc/http/simulatetransaction /// /// # Examples /// @@ -1083,7 +1083,7 @@ impl RpcClient { /// Simulating a transaction is similar to the ["preflight check"] that is /// run by default when sending a transaction. /// - /// ["preflight check"]: https://docs.solana.com/developing/clients/jsonrpc-api#sendtransaction + /// ["preflight check"]: https://solana.com/docs/rpc/http/sendtransaction /// /// By default, signatures are not verified during simulation. To verify /// signatures, call the [`simulate_transaction_with_config`] method, with @@ -1106,7 +1106,7 @@ impl RpcClient { /// /// This method is built on the [`simulateTransaction`] RPC method. /// - /// [`simulateTransaction`]: https://docs.solana.com/developing/clients/jsonrpc-api#simulatetransaction + /// [`simulateTransaction`]: https://solana.com/docs/rpc/http/simulatetransaction /// /// # Examples /// @@ -1160,7 +1160,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getHighestSnapshotSlot`] RPC method. /// - /// [`getHighestSnapshotSlot`]: https://docs.solana.com/developing/clients/jsonrpc-api#gethighestsnapshotslot + /// [`getHighestSnapshotSlot`]: https://solana.com/docs/rpc/http/gethighestsnapshotslot /// /// # Examples /// @@ -1186,7 +1186,7 @@ impl RpcClient { /// Check if a transaction has been processed with the default [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// If the transaction has been processed with the default commitment level, /// then this method returns `Ok` of `Some`. If the transaction has not yet @@ -1205,14 +1205,14 @@ impl RpcClient { /// recent slots, plus up to /// [`MAX_RECENT_BLOCKHASHES`][solana_sdk::clock::MAX_RECENT_BLOCKHASHES] /// rooted slots. To search the full transaction history use the - /// [`get_signature_statuse_with_commitment_and_history`][RpcClient::get_signature_status_with_commitment_and_history] + /// [`get_signature_status_with_commitment_and_history`][RpcClient::get_signature_status_with_commitment_and_history] /// method. /// /// # RPC Reference /// /// This method is built on the [`getSignatureStatuses`] RPC method. /// - /// [`getSignatureStatuses`]: https://docs.solana.com/developing/clients/jsonrpc-api#gitsignaturestatuses + /// [`getSignatureStatuses`]: https://solana.com/docs/rpc/http/getsignaturestatuses /// /// # Examples /// @@ -1277,7 +1277,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getSignatureStatuses`] RPC method. /// - /// [`getSignatureStatuses`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturestatuses + /// [`getSignatureStatuses`]: https://solana.com/docs/rpc/http/getsignaturestatuses /// /// # Examples /// @@ -1352,7 +1352,7 @@ impl RpcClient { /// method, with the `searchTransactionHistory` configuration option set to /// `true`. /// - /// [`getSignatureStatuses`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturestatuses + /// [`getSignatureStatuses`]: https://solana.com/docs/rpc/http/getsignaturestatuses /// /// # Examples /// @@ -1387,7 +1387,7 @@ impl RpcClient { /// Check if a transaction has been processed with the given [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// If the transaction has been processed with the given commitment level, /// then this method returns `Ok` of `Some`. If the transaction has not yet @@ -1406,14 +1406,14 @@ impl RpcClient { /// recent slots, plus up to /// [`MAX_RECENT_BLOCKHASHES`][solana_sdk::clock::MAX_RECENT_BLOCKHASHES] /// rooted slots. To search the full transaction history use the - /// [`get_signature_statuse_with_commitment_and_history`][RpcClient::get_signature_status_with_commitment_and_history] + /// [`get_signature_status_with_commitment_and_history`][RpcClient::get_signature_status_with_commitment_and_history] /// method. /// /// # RPC Reference /// /// This method is built on the [`getSignatureStatuses`] RPC method. /// - /// [`getSignatureStatuses`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturestatuses + /// [`getSignatureStatuses`]: https://solana.com/docs/rpc/http/getsignaturestatuses /// /// # Examples /// @@ -1454,7 +1454,7 @@ impl RpcClient { /// Check if a transaction has been processed with the given [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// If the transaction has been processed with the given commitment level, /// then this method returns `Ok` of `Some`. If the transaction has not yet @@ -1476,7 +1476,7 @@ impl RpcClient { /// /// This method is built on the [`getSignatureStatuses`] RPC method. /// - /// [`getSignatureStatuses`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturestatuses + /// [`getSignatureStatuses`]: https://solana.com/docs/rpc/http/getsignaturestatuses /// /// # Examples /// @@ -1523,13 +1523,13 @@ impl RpcClient { /// Returns the slot that has reached the configured [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getSlot`] RPC method. /// - /// [`getSlot`]: https://docs.solana.com/developing/clients/jsonrpc-api#getslot + /// [`getSlot`]: https://solana.com/docs/rpc/http/getslot /// /// # Examples /// @@ -1546,13 +1546,13 @@ impl RpcClient { /// Returns the slot that has reached the given [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getSlot`] RPC method. /// - /// [`getSlot`]: https://docs.solana.com/developing/clients/jsonrpc-api#getslot + /// [`getSlot`]: https://solana.com/docs/rpc/http/getslot /// /// # Examples /// @@ -1574,13 +1574,13 @@ impl RpcClient { /// Returns the block height that has reached the configured [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method is corresponds directly to the [`getBlockHeight`] RPC method. /// - /// [`getBlockHeight`]: https://docs.solana.com/developing/clients/jsonrpc-api#getblockheight + /// [`getBlockHeight`]: https://solana.com/docs/rpc/http/getblockheight /// /// # Examples /// @@ -1597,13 +1597,13 @@ impl RpcClient { /// Returns the block height that has reached the given [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method is corresponds directly to the [`getBlockHeight`] RPC method. /// - /// [`getBlockHeight`]: https://docs.solana.com/developing/clients/jsonrpc-api#getblockheight + /// [`getBlockHeight`]: https://solana.com/docs/rpc/http/getblockheight /// /// # Examples /// @@ -1631,7 +1631,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getSlotLeaders`] RPC method. /// - /// [`getSlotLeaders`]: https://docs.solana.com/developing/clients/jsonrpc-api#getslotleaders + /// [`getSlotLeaders`]: https://solana.com/docs/rpc/http/getslotleaders /// /// # Examples /// @@ -1655,7 +1655,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getBlockProduction`] RPC method. /// - /// [`getBlockProduction`]: https://docs.solana.com/developing/clients/jsonrpc-api#getblockproduction + /// [`getBlockProduction`]: https://solana.com/docs/rpc/http/getblockproduction /// /// # Examples /// @@ -1676,7 +1676,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getBlockProduction`] RPC method. /// - /// [`getBlockProduction`]: https://docs.solana.com/developing/clients/jsonrpc-api#getblockproduction + /// [`getBlockProduction`]: https://solana.com/docs/rpc/http/getblockproduction /// /// # Examples /// @@ -1721,13 +1721,13 @@ impl RpcClient { /// /// This method uses the configured [commitment level]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getStakeActivation`] RPC method. /// - /// [`getStakeActivation`]: https://docs.solana.com/developing/clients/jsonrpc-api#getstakeactivation + /// [`getStakeActivation`]: https://solana.com/docs/rpc/http/getstakeactivation /// /// # Examples /// @@ -1800,13 +1800,13 @@ impl RpcClient { /// /// This method uses the configured [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getSupply`] RPC method. /// - /// [`getSupply`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsupply + /// [`getSupply`]: https://solana.com/docs/rpc/http/getsupply /// /// # Examples /// @@ -1827,7 +1827,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getSupply`] RPC method. /// - /// [`getSupply`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsupply + /// [`getSupply`]: https://solana.com/docs/rpc/http/getsupply /// /// # Examples /// @@ -1856,7 +1856,7 @@ impl RpcClient { /// This method corresponds directly to the [`getLargestAccounts`] RPC /// method. /// - /// [`getLargestAccounts`]: https://docs.solana.com/developing/clients/jsonrpc-api#getlargestaccounts + /// [`getLargestAccounts`]: https://solana.com/docs/rpc/http/getlargestaccounts /// /// # Examples /// @@ -1888,14 +1888,14 @@ impl RpcClient { /// Returns the account info and associated stake for all the voting accounts /// that have reached the configured [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getVoteAccounts`] /// RPC method. /// - /// [`getVoteAccounts`]: https://docs.solana.com/developing/clients/jsonrpc-api#getvoteaccounts + /// [`getVoteAccounts`]: https://solana.com/docs/rpc/http/getvoteaccounts /// /// # Examples /// @@ -1913,13 +1913,13 @@ impl RpcClient { /// Returns the account info and associated stake for all the voting accounts /// that have reached the given [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getVoteAccounts`] RPC method. /// - /// [`getVoteAccounts`]: https://docs.solana.com/developing/clients/jsonrpc-api#getvoteaccounts + /// [`getVoteAccounts`]: https://solana.com/docs/rpc/http/getvoteaccounts /// /// # Examples /// @@ -1944,13 +1944,13 @@ impl RpcClient { /// Returns the account info and associated stake for all the voting accounts /// that have reached the given [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getVoteAccounts`] RPC method. /// - /// [`getVoteAccounts`]: https://docs.solana.com/developing/clients/jsonrpc-api#getvoteaccounts + /// [`getVoteAccounts`]: https://solana.com/docs/rpc/http/getvoteaccounts /// /// # Examples /// @@ -2002,7 +2002,7 @@ impl RpcClient { /// This method corresponds directly to the [`getClusterNodes`] /// RPC method. /// - /// [`getClusterNodes`]: https://docs.solana.com/developing/clients/jsonrpc-api#getclusternodes + /// [`getClusterNodes`]: https://solana.com/docs/rpc/http/getclusternodes /// /// # Examples /// @@ -2031,7 +2031,7 @@ impl RpcClient { /// This method corresponds directly to the [`getBlock`] RPC /// method. /// - /// [`getBlock`]: https://docs.solana.com/developing/clients/jsonrpc-api#getblock + /// [`getBlock`]: https://solana.com/docs/rpc/http/getblock /// /// # Examples /// @@ -2053,7 +2053,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getBlock`] RPC method. /// - /// [`getBlock`]: https://docs.solana.com/developing/clients/jsonrpc-api#getblock + /// [`getBlock`]: https://solana.com/docs/rpc/http/getblock /// /// # Examples /// @@ -2084,7 +2084,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getBlock`] RPC method. /// - /// [`getBlock`]: https://docs.solana.com/developing/clients/jsonrpc-api#getblock + /// [`getBlock`]: https://solana.com/docs/rpc/http/getblock /// /// # Examples /// @@ -2169,7 +2169,7 @@ impl RpcClient { /// /// [`Finalized`]: solana_sdk::commitment_config::CommitmentLevel::Finalized /// [`get_blocks_with_limit`]: RpcClient::get_blocks_with_limit. - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # Errors /// @@ -2181,8 +2181,8 @@ impl RpcClient { /// the remote node version is less than 1.7, in which case it maps to the /// [`getConfirmedBlocks`] RPC method. /// - /// [`getBlocks`]: https://docs.solana.com/developing/clients/jsonrpc-api#getblocks - /// [`getConfirmedBlocks`]: https://docs.solana.com/developing/clients/jsonrpc-api#getConfirmedblocks + /// [`getBlocks`]: https://solana.com/docs/rpc/http/getblocks + /// [`getConfirmedBlocks`]: https://solana.com/docs/rpc/deprecated/getconfirmedblocks /// /// # Examples /// @@ -2208,7 +2208,7 @@ impl RpcClient { /// If `end_slot` is not provided, then the end slot is for the latest /// block with the given [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// This method may not return blocks for the full range of slots if some /// slots do not have corresponding blocks. To simply get a specific number @@ -2232,8 +2232,8 @@ impl RpcClient { /// the remote node version is less than 1.7, in which case it maps to the /// [`getConfirmedBlocks`] RPC method. /// - /// [`getBlocks`]: https://docs.solana.com/developing/clients/jsonrpc-api#getblocks - /// [`getConfirmedBlocks`]: https://docs.solana.com/developing/clients/jsonrpc-api#getConfirmedblocks + /// [`getBlocks`]: https://solana.com/docs/rpc/http/getblocks + /// [`getConfirmedBlocks`]: https://solana.com/docs/rpc/deprecated/getconfirmedblocks /// /// # Examples /// @@ -2272,7 +2272,7 @@ impl RpcClient { /// This method uses the [`Finalized`] [commitment level][cl]. /// /// [`Finalized`]: CommitmentLevel::Finalized. - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # Errors /// @@ -2284,8 +2284,8 @@ impl RpcClient { /// method, unless the remote node version is less than 1.7, in which case /// it maps to the [`getConfirmedBlocksWithLimit`] RPC method. /// - /// [`getBlocksWithLimit`]: https://docs.solana.com/developing/clients/jsonrpc-api#getblockswithlimit - /// [`getConfirmedBlocksWithLimit`]: https://docs.solana.com/developing/clients/jsonrpc-api#getconfirmedblockswithlimit + /// [`getBlocksWithLimit`]: https://solana.com/docs/rpc/http/getblockswithlimit + /// [`getConfirmedBlocksWithLimit`]: https://solana.com/docs/rpc/deprecated/getconfirmedblockswithlimit /// /// # Examples /// @@ -2312,7 +2312,7 @@ impl RpcClient { /// This method returns an error if the given [commitment level][cl] is below /// [`Confirmed`]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// [`Confirmed`]: solana_sdk::commitment_config::CommitmentLevel::Confirmed /// /// # RPC Reference @@ -2321,8 +2321,8 @@ impl RpcClient { /// method, unless the remote node version is less than 1.7, in which case /// it maps to the `getConfirmedBlocksWithLimit` RPC method. /// - /// [`getBlocksWithLimit`]: https://docs.solana.com/developing/clients/jsonrpc-api#getblockswithlimit - /// [`getConfirmedBlocksWithLimit`]: https://docs.solana.com/developing/clients/jsonrpc-api#getconfirmedblockswithlimit + /// [`getBlocksWithLimit`]: https://solana.com/docs/rpc/http/getblockswithlimit + /// [`getConfirmedBlocksWithLimit`]: https://solana.com/docs/rpc/deprecated/getconfirmedblockswithlimit /// /// # Examples /// @@ -2427,16 +2427,14 @@ impl RpcClient { /// This method uses the [`Finalized`] [commitment level][cl]. /// /// [`Finalized`]: CommitmentLevel::Finalized. - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getSignaturesForAddress`] RPC - /// method, unless the remote node version is less than 1.7, in which case - /// it maps to the [`getSignaturesForAddress2`] RPC method. + /// method. /// - /// [`getSignaturesForAddress`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturesforaddress - /// [`getSignaturesForAddress2`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturesforaddress2 + /// [`getSignaturesForAddress`]: https://solana.com/docs/rpc/http/getsignaturesforaddress /// /// # Examples /// @@ -2469,17 +2467,15 @@ impl RpcClient { /// This method returns an error if the given [commitment level][cl] is below /// [`Confirmed`]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// [`Confirmed`]: solana_sdk::commitment_config::CommitmentLevel::Confirmed /// /// # RPC Reference /// /// This method corresponds directly to the [`getSignaturesForAddress`] RPC - /// method, unless the remote node version is less than 1.7, in which case - /// it maps to the [`getSignaturesForAddress2`] RPC method. + /// method. /// - /// [`getSignaturesForAddress`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturesforaddress - /// [`getSignaturesForAddress2`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturesforaddress2 + /// [`getSignaturesForAddress`]: https://solana.com/docs/rpc/http/getsignaturesforaddress /// /// # Examples /// @@ -2554,7 +2550,7 @@ impl RpcClient { /// This method uses the [`Finalized`] [commitment level][cl]. /// /// [`Finalized`]: solana_sdk::commitment_config::CommitmentLevel::Finalized - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// @@ -2562,8 +2558,8 @@ impl RpcClient { /// unless the remote node version is less than 1.7, in which case it maps /// to the [`getConfirmedTransaction`] RPC method. /// - /// [`getTransaction`]: https://docs.solana.com/developing/clients/jsonrpc-api#gettransaction - /// [`getConfirmedTransaction`]: https://docs.solana.com/developing/clients/jsonrpc-api#getconfirmedtransaction + /// [`getTransaction`]: https://solana.com/docs/rpc/http/gettransaction + /// [`getConfirmedTransaction`]: https://solana.com/docs/rpc/deprecated/getConfirmedTransaction /// /// # Examples /// @@ -2605,7 +2601,7 @@ impl RpcClient { /// This method returns an error if the given [commitment level][cl] is below /// [`Confirmed`]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// [`Confirmed`]: solana_sdk::commitment_config::CommitmentLevel::Confirmed /// /// # RPC Reference @@ -2614,8 +2610,8 @@ impl RpcClient { /// unless the remote node version is less than 1.7, in which case it maps /// to the [`getConfirmedTransaction`] RPC method. /// - /// [`getTransaction`]: https://docs.solana.com/developing/clients/jsonrpc-api#gettransaction - /// [`getConfirmedTransaction`]: https://docs.solana.com/developing/clients/jsonrpc-api#getconfirmedtransaction + /// [`getTransaction`]: https://solana.com/docs/rpc/http/gettransaction + /// [`getConfirmedTransaction`]: https://solana.com/docs/rpc/deprecated/getConfirmedTransaction /// /// # Examples /// @@ -2693,7 +2689,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getBlockTime`] RPC method. /// - /// [`getBlockTime`]: https://docs.solana.com/developing/clients/jsonrpc-api#getblocktime + /// [`getBlockTime`]: https://solana.com/docs/rpc/http/getblocktime /// /// # Examples /// @@ -2714,13 +2710,13 @@ impl RpcClient { /// /// This method uses the configured default [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getEpochInfo`] RPC method. /// - /// [`getEpochInfo`]: https://docs.solana.com/developing/clients/jsonrpc-api#getepochinfo + /// [`getEpochInfo`]: https://solana.com/docs/rpc/http/getepochinfo /// /// # Examples /// @@ -2741,7 +2737,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getEpochInfo`] RPC method. /// - /// [`getEpochInfo`]: https://docs.solana.com/developing/clients/jsonrpc-api#getepochinfo + /// [`getEpochInfo`]: https://solana.com/docs/rpc/http/getepochinfo /// /// # Examples /// @@ -2767,13 +2763,13 @@ impl RpcClient { /// /// This method uses the configured default [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getLeaderSchedule`] RPC method. /// - /// [`getLeaderSchedule`]: https://docs.solana.com/developing/clients/jsonrpc-api#getleaderschedule + /// [`getLeaderSchedule`]: https://solana.com/docs/rpc/http/getleaderschedule /// /// # Examples /// @@ -2801,7 +2797,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getLeaderSchedule`] RPC method. /// - /// [`getLeaderSchedule`]: https://docs.solana.com/developing/clients/jsonrpc-api#getleaderschedule + /// [`getLeaderSchedule`]: https://solana.com/docs/rpc/http/getleaderschedule /// /// # Examples /// @@ -2834,7 +2830,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getLeaderSchedule`] RPC method. /// - /// [`getLeaderSchedule`]: https://docs.solana.com/developing/clients/jsonrpc-api#getleaderschedule + /// [`getLeaderSchedule`]: https://solana.com/docs/rpc/http/getleaderschedule /// /// # Examples /// @@ -2872,7 +2868,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getEpochSchedule`] RPC method. /// - /// [`getEpochSchedule`]: https://docs.solana.com/developing/clients/jsonrpc-api#getepochschedule + /// [`getEpochSchedule`]: https://solana.com/docs/rpc/http/getepochschedule /// /// # Examples /// @@ -2896,7 +2892,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getRecentPerformanceSamples`] RPC method. /// - /// [`getRecentPerformanceSamples`]: https://docs.solana.com/developing/clients/jsonrpc-api#getrecentperformancesamples + /// [`getRecentPerformanceSamples`]: https://solana.com/docs/rpc/http/getrecentperformancesamples /// /// # Examples /// @@ -2928,7 +2924,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getRecentPrioritizationFees`] RPC method. /// - /// [`getRecentPrioritizationFees`]: https://docs.solana.com/developing/clients/jsonrpc-api#getrecentprioritizationfees + /// [`getRecentPrioritizationFees`]: https://solana.com/docs/rpc/http/getrecentprioritizationfees /// /// # Examples /// @@ -2958,7 +2954,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getIdentity`] RPC method. /// - /// [`getIdentity`]: https://docs.solana.com/developing/clients/jsonrpc-api#getidentity + /// [`getIdentity`]: https://solana.com/docs/rpc/http/getidentity /// /// # Examples /// @@ -2978,14 +2974,14 @@ impl RpcClient { /// This method uses the [`Finalized`] [commitment level][cl]. /// /// [`Finalized`]: solana_sdk::commitment_config::CommitmentLevel::Finalized - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getInflationGovernor`] RPC /// method. /// - /// [`getInflationGovernor`]: https://docs.solana.com/developing/clients/jsonrpc-api#getinflationgovernor + /// [`getInflationGovernor`]: https://solana.com/docs/rpc/http/getinflationgovernor /// /// # Examples /// @@ -3006,7 +3002,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getInflationRate`] RPC method. /// - /// [`getInflationRate`]: https://docs.solana.com/developing/clients/jsonrpc-api#getinflationrate + /// [`getInflationRate`]: https://solana.com/docs/rpc/http/getinflationrate /// /// # Examples /// @@ -3025,13 +3021,13 @@ impl RpcClient { /// /// This method uses the configured [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getInflationReward`] RPC method. /// - /// [`getInflationReward`]: https://docs.solana.com/developing/clients/jsonrpc-api#getinflationreward + /// [`getInflationReward`]: https://solana.com/docs/rpc/http/getinflationreward /// /// # Examples /// @@ -3065,7 +3061,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getVersion`] RPC method. /// - /// [`getVersion`]: https://docs.solana.com/developing/clients/jsonrpc-api#getversion + /// [`getVersion`]: https://solana.com/docs/rpc/http/getversion /// /// # Examples /// @@ -3094,7 +3090,7 @@ impl RpcClient { /// This method corresponds directly to the [`minimumLedgerSlot`] RPC /// method. /// - /// [`minimumLedgerSlot`]: https://docs.solana.com/developing/clients/jsonrpc-api#minimumledgerslot + /// [`minimumLedgerSlot`]: https://solana.com/docs/rpc/http/minimumledgerslot /// /// # Examples /// @@ -3113,7 +3109,7 @@ impl RpcClient { /// /// This method uses the configured [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// To get multiple accounts at once, use the [`get_multiple_accounts`] method. /// @@ -3132,7 +3128,7 @@ impl RpcClient { /// /// This method is built on the [`getAccountInfo`] RPC method. /// - /// [`getAccountInfo`]: https://docs.solana.com/developing/clients/jsonrpc-api#getaccountinfo + /// [`getAccountInfo`]: https://solana.com/docs/rpc/http/getaccountinfo /// /// # Examples /// @@ -3167,7 +3163,7 @@ impl RpcClient { /// /// This method is built on the [`getAccountInfo`] RPC method. /// - /// [`getAccountInfo`]: https://docs.solana.com/developing/clients/jsonrpc-api#getaccountinfo + /// [`getAccountInfo`]: https://solana.com/docs/rpc/http/getaccountinfo /// /// # Examples /// @@ -3214,7 +3210,7 @@ impl RpcClient { /// /// This method is built on the [`getAccountInfo`] RPC method. /// - /// [`getAccountInfo`]: https://docs.solana.com/developing/clients/jsonrpc-api#getaccountinfo + /// [`getAccountInfo`]: https://solana.com/docs/rpc/http/getaccountinfo /// /// # Examples /// @@ -3263,7 +3259,7 @@ impl RpcClient { /// This method corresponds directly to the [`getMaxRetransmitSlot`] RPC /// method. /// - /// [`getMaxRetransmitSlot`]: https://docs.solana.com/developing/clients/jsonrpc-api#getmaxretransmitslot + /// [`getMaxRetransmitSlot`]: https://solana.com/docs/rpc/http/getmaxretransmitslot /// /// # Examples /// @@ -3277,14 +3273,14 @@ impl RpcClient { self.invoke((self.rpc_client.as_ref()).get_max_retransmit_slot()) } - /// Get the max slot seen from after [shred](https://docs.solana.com/terminology#shred) insert. + /// Get the max slot seen from after [shred](https://solana.com/docs/terminology#shred) insert. /// /// # RPC Reference /// /// This method corresponds directly to the /// [`getMaxShredInsertSlot`] RPC method. /// - /// [`getMaxShredInsertSlot`]: https://docs.solana.com/developing/clients/jsonrpc-api#getmaxshredinsertslot + /// [`getMaxShredInsertSlot`]: https://solana.com/docs/rpc/http/getmaxshredinsertslot /// /// # Examples /// @@ -3302,13 +3298,13 @@ impl RpcClient { /// /// This method uses the configured [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method is built on the [`getMultipleAccounts`] RPC method. /// - /// [`getMultipleAccounts`]: https://docs.solana.com/developing/clients/jsonrpc-api#getmultipleaccounts + /// [`getMultipleAccounts`]: https://solana.com/docs/rpc/http/getmultipleaccounts /// /// # Examples /// @@ -3336,7 +3332,7 @@ impl RpcClient { /// /// This method is built on the [`getMultipleAccounts`] RPC method. /// - /// [`getMultipleAccounts`]: https://docs.solana.com/developing/clients/jsonrpc-api#getmultipleaccounts + /// [`getMultipleAccounts`]: https://solana.com/docs/rpc/http/getmultipleaccounts /// /// # Examples /// @@ -3376,7 +3372,7 @@ impl RpcClient { /// /// This method is built on the [`getMultipleAccounts`] RPC method. /// - /// [`getMultipleAccounts`]: https://docs.solana.com/developing/clients/jsonrpc-api#getmultipleaccounts + /// [`getMultipleAccounts`]: https://solana.com/docs/rpc/http/getmultipleaccounts /// /// # Examples /// @@ -3428,7 +3424,7 @@ impl RpcClient { /// /// This method is built on the [`getAccountInfo`] RPC method. /// - /// [`getAccountInfo`]: https://docs.solana.com/developing/clients/jsonrpc-api#getaccountinfo + /// [`getAccountInfo`]: https://solana.com/docs/rpc/http/getaccountinfo /// /// # Examples /// @@ -3458,7 +3454,7 @@ impl RpcClient { /// This method corresponds directly to the /// [`getMinimumBalanceForRentExemption`] RPC method. /// - /// [`getMinimumBalanceForRentExemption`]: https://docs.solana.com/developing/clients/jsonrpc-api#getminimumbalanceforrentexemption + /// [`getMinimumBalanceForRentExemption`]: https://solana.com/docs/rpc/http/getminimumbalanceforrentexemption /// /// # Examples /// @@ -3478,13 +3474,13 @@ impl RpcClient { /// /// This method uses the configured [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getBalance`] RPC method. /// - /// [`getBalance`]: https://docs.solana.com/developing/clients/jsonrpc-api#getbalance + /// [`getBalance`]: https://solana.com/docs/rpc/http/getbalance /// /// # Examples /// @@ -3510,7 +3506,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getBalance`] RPC method. /// - /// [`getBalance`]: https://docs.solana.com/developing/clients/jsonrpc-api#getbalance + /// [`getBalance`]: https://solana.com/docs/rpc/http/getbalance /// /// # Examples /// @@ -3545,14 +3541,14 @@ impl RpcClient { /// /// This method uses the configured [commitment level][cl]. /// - /// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment + /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment /// /// # RPC Reference /// /// This method corresponds directly to the [`getProgramAccounts`] RPC /// method. /// - /// [`getProgramAccounts`]: https://docs.solana.com/developing/clients/jsonrpc-api#getprogramaccounts + /// [`getProgramAccounts`]: https://solana.com/docs/rpc/http/getprogramaccounts /// /// # Examples /// @@ -3578,7 +3574,7 @@ impl RpcClient { /// /// This method is built on the [`getProgramAccounts`] RPC method. /// - /// [`getProgramAccounts`]: https://docs.solana.com/developing/clients/jsonrpc-api#getprogramaccounts + /// [`getProgramAccounts`]: https://solana.com/docs/rpc/http/getprogramaccounts /// /// # Examples /// @@ -3641,7 +3637,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getStakeMinimumDelegation`] RPC method. /// - /// [`getStakeMinimumDelegation`]: https://docs.solana.com/developing/clients/jsonrpc-api#getstakeminimumdelegation + /// [`getStakeMinimumDelegation`]: https://solana.com/docs/rpc/http/getstakeminimumdelegation /// /// # Examples /// @@ -3662,7 +3658,7 @@ impl RpcClient { /// /// This method corresponds directly to the [`getStakeMinimumDelegation`] RPC method. /// - /// [`getStakeMinimumDelegation`]: https://docs.solana.com/developing/clients/jsonrpc-api#getstakeminimumdelegation + /// [`getStakeMinimumDelegation`]: https://solana.com/docs/rpc/http/getstakeminimumdelegation /// /// # Examples /// diff --git a/rpc-test/tests/nonblocking.rs b/rpc-test/tests/nonblocking.rs index 45b1c9e8bb2237..2a346c93e25a3a 100644 --- a/rpc-test/tests/nonblocking.rs +++ b/rpc-test/tests/nonblocking.rs @@ -40,7 +40,7 @@ async fn test_tpu_send_transaction() { .get_signature_statuses(&signatures) .await .unwrap(); - if statuses.value.get(0).is_some() { + if statuses.value.first().is_some() { break; } } diff --git a/rpc-test/tests/rpc.rs b/rpc-test/tests/rpc.rs index f1c2d4acb95b28..d0245608d172d1 100644 --- a/rpc-test/tests/rpc.rs +++ b/rpc-test/tests/rpc.rs @@ -496,7 +496,7 @@ fn run_tpu_send_transaction(tpu_use_quic: bool) { loop { assert!(now.elapsed() < timeout); let statuses = rpc_client.get_signature_statuses(&signatures).unwrap(); - if statuses.value.get(0).is_some() { + if statuses.value.first().is_some() { return; } } diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 3edd4b38009639..98d9ee572f6824 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -63,6 +63,7 @@ tokio-util = { workspace = true, features = ["codec", "compat"] } [dev-dependencies] serial_test = { workspace = true } solana-net-utils = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-stake-program = { workspace = true } spl-pod = { workspace = true } symlink = { workspace = true } diff --git a/rpc/src/optimistically_confirmed_bank_tracker.rs b/rpc/src/optimistically_confirmed_bank_tracker.rs index 6a9e0e2a8a5ea7..e2de5f9d2223fb 100644 --- a/rpc/src/optimistically_confirmed_bank_tracker.rs +++ b/rpc/src/optimistically_confirmed_bank_tracker.rs @@ -235,7 +235,7 @@ impl OptimisticallyConfirmedBankTracker { } fn notify_new_root_slots( - roots: &mut Vec, + roots: &mut [Slot], newest_root_slot: &mut Slot, slot_notification_subscribers: &Option>>>, ) { diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 5e62dff9ce55d3..cf73a224589dec 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -87,10 +87,10 @@ use { solana_storage_bigtable::Error as StorageError, solana_streamer::socket::SocketAddrSpace, solana_transaction_status::{ - BlockEncodingOptions, ConfirmedBlock, ConfirmedTransactionStatusWithSignature, - ConfirmedTransactionWithStatusMeta, EncodedConfirmedTransactionWithStatusMeta, Reward, - RewardType, TransactionBinaryEncoding, TransactionConfirmationStatus, TransactionStatus, - UiConfirmedBlock, UiTransactionEncoding, + map_inner_instructions, BlockEncodingOptions, ConfirmedBlock, + ConfirmedTransactionStatusWithSignature, ConfirmedTransactionWithStatusMeta, + EncodedConfirmedTransactionWithStatusMeta, Reward, RewardType, TransactionBinaryEncoding, + TransactionConfirmationStatus, TransactionStatus, UiConfirmedBlock, UiTransactionEncoding, }, solana_vote_program::vote_state::{VoteState, MAX_LOCKOUT_HISTORY}, spl_token_2022::{ @@ -342,12 +342,13 @@ impl JsonRpcRequestProcessor { // Useful for unit testing pub fn new_from_bank( - bank: Arc, + bank: Bank, socket_addr_space: SocketAddrSpace, connection_cache: Arc, ) -> Self { let genesis_hash = bank.hash(); - let bank_forks = BankForks::new_from_banks(&[bank.clone()], bank.slot()); + let bank_forks = BankForks::new_rw_arc(bank); + let bank = bank_forks.read().unwrap().root_bank(); let blockstore = Arc::new(Blockstore::open(&get_tmp_ledger_path!()).unwrap()); let exit = Arc::new(AtomicBool::new(false)); let cluster_info = Arc::new({ @@ -631,7 +632,7 @@ impl JsonRpcRequestProcessor { // Since epoch schedule data comes from the genesis config, any commitment level should be // fine let bank = self.bank(Some(CommitmentConfig::finalized())); - *bank.epoch_schedule() + bank.epoch_schedule().clone() } pub fn get_balance( @@ -1769,7 +1770,7 @@ impl JsonRpcRequestProcessor { deactivating, } = delegation.stake_activating_and_deactivating( epoch, - Some(&stake_history), + &stake_history, new_rate_activation_epoch, ); let stake_activation_state = if deactivating > 0 { @@ -3265,6 +3266,7 @@ pub mod rpc_full { use { super::*, solana_sdk::message::{SanitizedVersionedMessage, VersionedMessage}, + solana_transaction_status::UiInnerInstructions, }; #[rpc] pub trait Full { @@ -3675,7 +3677,8 @@ pub mod rpc_full { post_simulation_accounts: _, units_consumed, return_data, - } = preflight_bank.simulate_transaction(transaction) + inner_instructions: _, // Always `None` due to `enable_cpi_recording = false` + } = preflight_bank.simulate_transaction(&transaction, false) { match err { TransactionError::BlockhashNotFound => { @@ -3693,6 +3696,7 @@ pub mod rpc_full { accounts: None, units_consumed: Some(units_consumed), return_data: return_data.map(|return_data| return_data.into()), + inner_instructions: None, }, } .into()); @@ -3723,6 +3727,7 @@ pub mod rpc_full { encoding, accounts: config_accounts, min_context_slot, + inner_instructions: enable_cpi_recording, } = config.unwrap_or_default(); let tx_encoding = encoding.unwrap_or(UiTransactionEncoding::Base58); let binary_encoding = tx_encoding.into_binary_encoding().ok_or_else(|| { @@ -3752,7 +3757,6 @@ pub mod rpc_full { if sig_verify { verify_transaction(&transaction, &bank.feature_set)?; } - let number_of_accounts = transaction.message().account_keys().len(); let TransactionSimulationResult { result, @@ -3760,7 +3764,11 @@ pub mod rpc_full { post_simulation_accounts, units_consumed, return_data, - } = bank.simulate_transaction(transaction); + inner_instructions, + } = bank.simulate_transaction(&transaction, enable_cpi_recording); + + let account_keys = transaction.message().account_keys(); + let number_of_accounts = account_keys.len(); let accounts = if let Some(config_accounts) = config_accounts { let accounts_encoding = config_accounts @@ -3803,6 +3811,12 @@ pub mod rpc_full { None }; + let inner_instructions = inner_instructions.map(|info| { + map_inner_instructions(info) + .map(|converted| UiInnerInstructions::parse(converted, &account_keys)) + .collect() + }); + Ok(new_response( bank, RpcSimulateTransactionResult { @@ -3811,6 +3825,7 @@ pub mod rpc_full { accounts, units_consumed: Some(units_consumed), return_data: return_data.map(|return_data| return_data.into()), + inner_instructions, }, )) } @@ -5057,18 +5072,20 @@ pub mod tests { fn test_rpc_request_processor_new() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let genesis = create_genesis_config(100); - let bank = Arc::new(Bank::new_for_tests(&genesis.genesis_config)); - bank.transfer(20, &genesis.mint_keypair, &bob_pubkey) - .unwrap(); + let bank = Bank::new_for_tests(&genesis.genesis_config); let connection_cache = Arc::new(ConnectionCache::new("connection_cache_test")); - let request_processor = JsonRpcRequestProcessor::new_from_bank( + let meta = JsonRpcRequestProcessor::new_from_bank( bank, SocketAddrSpace::Unspecified, connection_cache, ); + + let bank = meta.bank_forks.read().unwrap().root_bank(); + bank.transfer(20, &genesis.mint_keypair, &bob_pubkey) + .unwrap(); + assert_eq!( - request_processor - .get_transaction_count(RpcContextConfig::default()) + meta.get_transaction_count(RpcContextConfig::default()) .unwrap(), 1 ); @@ -5078,7 +5095,7 @@ pub mod tests { fn test_rpc_get_balance() { let genesis = create_genesis_config(20); let mint_pubkey = genesis.mint_keypair.pubkey(); - let bank = Arc::new(Bank::new_for_tests(&genesis.genesis_config)); + let bank = Bank::new_for_tests(&genesis.genesis_config); let connection_cache = Arc::new(ConnectionCache::new("connection_cache_test")); let meta = JsonRpcRequestProcessor::new_from_bank( bank, @@ -5110,7 +5127,7 @@ pub mod tests { fn test_rpc_get_balance_via_client() { let genesis = create_genesis_config(20); let mint_pubkey = genesis.mint_keypair.pubkey(); - let bank = Arc::new(Bank::new_for_tests(&genesis.genesis_config)); + let bank = Bank::new_for_tests(&genesis.genesis_config); let connection_cache = Arc::new(ConnectionCache::new("connection_cache_test")); let meta = JsonRpcRequestProcessor::new_from_bank( bank, @@ -5227,17 +5244,7 @@ pub mod tests { fn test_rpc_get_tx_count() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let genesis = create_genesis_config(10); - let bank = Arc::new(Bank::new_for_tests(&genesis.genesis_config)); - // Add 4 transactions - bank.transfer(1, &genesis.mint_keypair, &bob_pubkey) - .unwrap(); - bank.transfer(2, &genesis.mint_keypair, &bob_pubkey) - .unwrap(); - bank.transfer(3, &genesis.mint_keypair, &bob_pubkey) - .unwrap(); - bank.transfer(4, &genesis.mint_keypair, &bob_pubkey) - .unwrap(); - + let bank = Bank::new_for_tests(&genesis.genesis_config); let connection_cache = Arc::new(ConnectionCache::new("connection_cache_test")); let meta = JsonRpcRequestProcessor::new_from_bank( bank, @@ -5248,6 +5255,17 @@ pub mod tests { let mut io = MetaIoHandler::default(); io.extend_with(rpc_minimal::MinimalImpl.to_delegate()); + // Add 4 transactions + let bank = meta.bank_forks.read().unwrap().root_bank(); + bank.transfer(1, &genesis.mint_keypair, &bob_pubkey) + .unwrap(); + bank.transfer(2, &genesis.mint_keypair, &bob_pubkey) + .unwrap(); + bank.transfer(3, &genesis.mint_keypair, &bob_pubkey) + .unwrap(); + bank.transfer(4, &genesis.mint_keypair, &bob_pubkey) + .unwrap(); + let req = r#"{"jsonrpc":"2.0","id":1,"method":"getTransactionCount"}"#; let res = io.handle_request_sync(req, meta); let expected = r#"{"jsonrpc":"2.0","result":4,"id":1}"#; @@ -5909,6 +5927,7 @@ pub mod tests { } ], "err":null, + "innerInstructions": null, "logs":[ "Program 11111111111111111111111111111111 invoke [1]", "Program 11111111111111111111111111111111 success" @@ -5993,6 +6012,7 @@ pub mod tests { "value":{ "accounts":null, "err":null, + "innerInstructions":null, "logs":[ "Program 11111111111111111111111111111111 invoke [1]", "Program 11111111111111111111111111111111 success" @@ -6021,6 +6041,7 @@ pub mod tests { "value":{ "accounts":null, "err":null, + "innerInstructions":null, "logs":[ "Program 11111111111111111111111111111111 invoke [1]", "Program 11111111111111111111111111111111 success" @@ -6073,6 +6094,7 @@ pub mod tests { "value":{ "err":"BlockhashNotFound", "accounts":null, + "innerInstructions":null, "logs":[], "returnData":null, "unitsConsumed":0, @@ -6099,6 +6121,7 @@ pub mod tests { "value":{ "accounts":null, "err":null, + "innerInstructions":null, "logs":[ "Program 11111111111111111111111111111111 invoke [1]", "Program 11111111111111111111111111111111 success" @@ -6383,7 +6406,7 @@ pub mod tests { #[test] fn test_rpc_send_bad_tx() { let genesis = create_genesis_config(100); - let bank = Arc::new(Bank::new_for_tests(&genesis.genesis_config)); + let bank = Bank::new_for_tests(&genesis.genesis_config); let connection_cache = Arc::new(ConnectionCache::new("connection_cache_test")); let meta = JsonRpcRequestProcessor::new_from_bank( bank, @@ -6479,7 +6502,7 @@ pub mod tests { assert_eq!( res, Some( - r#"{"jsonrpc":"2.0","error":{"code":-32002,"message":"Transaction simulation failed: Blockhash not found","data":{"accounts":null,"err":"BlockhashNotFound","logs":[],"returnData":null,"unitsConsumed":0}},"id":1}"#.to_string(), + r#"{"jsonrpc":"2.0","error":{"code":-32002,"message":"Transaction simulation failed: Blockhash not found","data":{"accounts":null,"err":"BlockhashNotFound","innerInstructions":null,"logs":[],"returnData":null,"unitsConsumed":0}},"id":1}"#.to_string(), ) ); diff --git a/rpc/src/transaction_status_service.rs b/rpc/src/transaction_status_service.rs index b98f0831518675..eca53c66658766 100644 --- a/rpc/src/transaction_status_service.rs +++ b/rpc/src/transaction_status_service.rs @@ -8,7 +8,7 @@ use { blockstore_processor::{TransactionStatusBatch, TransactionStatusMessage}, }, solana_transaction_status::{ - extract_and_fmt_memos, InnerInstruction, InnerInstructions, Reward, TransactionStatusMeta, + extract_and_fmt_memos, map_inner_instructions, Reward, TransactionStatusMeta, }, std::{ sync::{ @@ -121,21 +121,7 @@ impl TransactionStatusService { let tx_account_locks = transaction.get_account_locks_unchecked(); let inner_instructions = inner_instructions.map(|inner_instructions| { - inner_instructions - .into_iter() - .enumerate() - .map(|(index, instructions)| InnerInstructions { - index: index as u8, - instructions: instructions - .into_iter() - .map(|info| InnerInstruction { - instruction: info.instruction, - stack_height: Some(u32::from(info.stack_height)), - }) - .collect(), - }) - .filter(|i| !i.instructions.is_empty()) - .collect() + map_inner_instructions(inner_instructions).collect() }); let pre_token_balances = Some(pre_token_balances); @@ -336,7 +322,7 @@ pub(crate) mod tests { #[test] fn test_notify_transaction() { let genesis_config = create_genesis_config(2).genesis_config; - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; let (transaction_status_sender, transaction_status_receiver) = unbounded(); let ledger_path = get_tmp_ledger_path_auto_delete!(); diff --git a/run.sh b/run.sh index 809893e88062bb..9f17e60f59b1d4 100755 --- a/run.sh +++ b/run.sh @@ -7,9 +7,9 @@ cat <<'EOF' You almost certainly do not want to run this script! If you are a dapp developer and looking for a way to run a local validator, please - see https://docs.solana.com/developing/test-validator + see https://docs.solanalabs.com/cli/examples/test-validator - If you are a prospective validator, please see https://docs.solana.com/running-validator + If you are a prospective validator, please see https://docs.solanalabs.com/operations If you are a core developer, many apologies for what you're about to endure, but you may be in the right place. This script is now located at `./scripts/run.sh`. diff --git a/runtime-transaction/Cargo.toml b/runtime-transaction/Cargo.toml new file mode 100644 index 00000000000000..947da05cc169c2 --- /dev/null +++ b/runtime-transaction/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "solana-runtime-transaction" +description = "Solana runtime-transaction" +documentation = "https://docs.rs/solana-runtime-transaction" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +log = { workspace = true } +solana-program-runtime = { workspace = true } +solana-sdk = { workspace = true } +thiserror = { workspace = true } + +[lib] +crate-type = ["lib"] +name = "solana_runtime_transaction" + +[dev-dependencies] +bincode = { workspace = true } +rand = { workspace = true } +solana-program ={ workspace = true } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[build-dependencies] +rustc_version = { workspace = true } diff --git a/runtime-transaction/build.rs b/runtime-transaction/build.rs new file mode 120000 index 00000000000000..ae66c237c5f4fd --- /dev/null +++ b/runtime-transaction/build.rs @@ -0,0 +1 @@ +../frozen-abi/build.rs \ No newline at end of file diff --git a/runtime-transaction/src/lib.rs b/runtime-transaction/src/lib.rs new file mode 100644 index 00000000000000..0fdeb7c5b6bd65 --- /dev/null +++ b/runtime-transaction/src/lib.rs @@ -0,0 +1,5 @@ +#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![allow(clippy::arithmetic_side_effects)] + +pub mod runtime_transaction; +pub mod transaction_meta; diff --git a/runtime-transaction/src/runtime_transaction.rs b/runtime-transaction/src/runtime_transaction.rs new file mode 100644 index 00000000000000..3ca7d4fb7920cd --- /dev/null +++ b/runtime-transaction/src/runtime_transaction.rs @@ -0,0 +1,301 @@ +//! RuntimeTransaction is `runtime` facing representation of transaction, while +//! solana_sdk::SanitizedTransaction is client facing representation. +//! +//! It has two states: +//! 1. Statically Loaded: after receiving `packet` from sigverify and deserializing +//! it into `solana_sdk::VersionedTransaction`, then sanitizing into +//! `solana_sdk::SanitizedVersionedTransaction`, which can be wrapped into +//! `RuntimeTransaction` with static transaction metadata extracted. +//! 2. Dynamically Loaded: after successfully loaded account addresses from onchain +//! ALT, RuntimeTransaction transits into Dynamically Loaded state, +//! with its dynamic metadata loaded. +use { + crate::transaction_meta::{DynamicMeta, StaticMeta, TransactionMeta}, + solana_program_runtime::compute_budget_processor::{ + process_compute_budget_instructions, ComputeBudgetLimits, + }, + solana_sdk::{ + hash::Hash, + message::{AddressLoader, SanitizedMessage, SanitizedVersionedMessage}, + signature::Signature, + simple_vote_transaction_checker::is_simple_vote_transaction, + transaction::{Result, SanitizedVersionedTransaction}, + }, +}; + +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct RuntimeTransaction { + signatures: Vec, + message: M, + // transaction meta is a collection of fields, it is updated + // during message state transition + meta: TransactionMeta, +} + +// These traits gate access to static and dynamic metadata +// so that only transactions with supporting message types +// can access them. +trait StaticMetaAccess {} +trait DynamicMetaAccess: StaticMetaAccess {} + +// Implement the gate traits for the message types that should +// have access to the static and dynamic metadata. +impl StaticMetaAccess for SanitizedVersionedMessage {} +impl StaticMetaAccess for SanitizedMessage {} +impl DynamicMetaAccess for SanitizedMessage {} + +impl StaticMeta for RuntimeTransaction { + fn message_hash(&self) -> &Hash { + &self.meta.message_hash + } + fn is_simple_vote_tx(&self) -> bool { + self.meta.is_simple_vote_tx + } + fn compute_unit_limit(&self) -> u32 { + self.meta.compute_unit_limit + } + fn compute_unit_price(&self) -> u64 { + self.meta.compute_unit_price + } + fn loaded_accounts_bytes(&self) -> u32 { + self.meta.loaded_accounts_bytes + } +} + +impl DynamicMeta for RuntimeTransaction {} + +impl RuntimeTransaction { + pub fn try_from( + sanitized_versioned_tx: SanitizedVersionedTransaction, + message_hash: Option, + is_simple_vote_tx: Option, + ) -> Result { + let mut meta = TransactionMeta::default(); + meta.set_is_simple_vote_tx( + is_simple_vote_tx + .unwrap_or_else(|| is_simple_vote_transaction(&sanitized_versioned_tx)), + ); + + let (signatures, message) = sanitized_versioned_tx.destruct(); + meta.set_message_hash(message_hash.unwrap_or_else(|| message.message.hash())); + + let ComputeBudgetLimits { + compute_unit_limit, + compute_unit_price, + loaded_accounts_bytes, + .. + } = process_compute_budget_instructions(message.program_instructions_iter())?; + meta.set_compute_unit_limit(compute_unit_limit); + meta.set_compute_unit_price(compute_unit_price); + meta.set_loaded_accounts_bytes(loaded_accounts_bytes); + + Ok(Self { + signatures, + message, + meta, + }) + } +} + +impl RuntimeTransaction { + pub fn try_from( + statically_loaded_runtime_tx: RuntimeTransaction, + address_loader: impl AddressLoader, + ) -> Result { + let mut tx = Self { + signatures: statically_loaded_runtime_tx.signatures, + message: SanitizedMessage::try_new( + statically_loaded_runtime_tx.message, + address_loader, + )?, + meta: statically_loaded_runtime_tx.meta, + }; + tx.load_dynamic_metadata()?; + + Ok(tx) + } + + fn load_dynamic_metadata(&mut self) -> Result<()> { + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + solana_program::{ + system_instruction, + vote::{self, state::Vote}, + }, + solana_sdk::{ + compute_budget::ComputeBudgetInstruction, + instruction::Instruction, + message::Message, + signer::{keypair::Keypair, Signer}, + transaction::{SimpleAddressLoader, Transaction, VersionedTransaction}, + }, + }; + + fn vote_sanitized_versioned_transaction() -> SanitizedVersionedTransaction { + let bank_hash = Hash::new_unique(); + let block_hash = Hash::new_unique(); + let vote_keypair = Keypair::new(); + let node_keypair = Keypair::new(); + let auth_keypair = Keypair::new(); + let votes = Vote::new(vec![1, 2, 3], bank_hash); + let vote_ix = + vote::instruction::vote(&vote_keypair.pubkey(), &auth_keypair.pubkey(), votes); + let mut vote_tx = Transaction::new_with_payer(&[vote_ix], Some(&node_keypair.pubkey())); + vote_tx.partial_sign(&[&node_keypair], block_hash); + vote_tx.partial_sign(&[&auth_keypair], block_hash); + + SanitizedVersionedTransaction::try_from(VersionedTransaction::from(vote_tx)).unwrap() + } + + fn non_vote_sanitized_versioned_transaction() -> SanitizedVersionedTransaction { + TestTransaction::new().to_sanitized_versioned_transaction() + } + + // Simple transfer transaction for testing, it does not support vote instruction + // because simple vote transaction will not request limits + struct TestTransaction { + from_keypair: Keypair, + hash: Hash, + instructions: Vec, + } + + impl TestTransaction { + fn new() -> Self { + let from_keypair = Keypair::new(); + let instructions = vec![system_instruction::transfer( + &from_keypair.pubkey(), + &solana_sdk::pubkey::new_rand(), + 1, + )]; + TestTransaction { + from_keypair, + hash: Hash::new_unique(), + instructions, + } + } + + fn add_compute_unit_limit(&mut self, val: u32) -> &mut TestTransaction { + self.instructions + .push(ComputeBudgetInstruction::set_compute_unit_limit(val)); + self + } + + fn add_compute_unit_price(&mut self, val: u64) -> &mut TestTransaction { + self.instructions + .push(ComputeBudgetInstruction::set_compute_unit_price(val)); + self + } + + fn add_loaded_accounts_bytes(&mut self, val: u32) -> &mut TestTransaction { + self.instructions + .push(ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(val)); + self + } + + fn to_sanitized_versioned_transaction(&self) -> SanitizedVersionedTransaction { + let message = Message::new(&self.instructions, Some(&self.from_keypair.pubkey())); + let tx = Transaction::new(&[&self.from_keypair], message, self.hash); + SanitizedVersionedTransaction::try_from(VersionedTransaction::from(tx)).unwrap() + } + } + + #[test] + fn test_runtime_transaction_is_vote_meta() { + fn get_is_simple_vote( + svt: SanitizedVersionedTransaction, + is_simple_vote: Option, + ) -> bool { + RuntimeTransaction::::try_from(svt, None, is_simple_vote) + .unwrap() + .meta + .is_simple_vote_tx + } + + assert!(!get_is_simple_vote( + non_vote_sanitized_versioned_transaction(), + None + )); + + assert!(get_is_simple_vote( + non_vote_sanitized_versioned_transaction(), + Some(true), // override + )); + + assert!(get_is_simple_vote( + vote_sanitized_versioned_transaction(), + None + )); + + assert!(!get_is_simple_vote( + vote_sanitized_versioned_transaction(), + Some(false), // override + )); + } + + #[test] + fn test_advancing_transaction_type() { + let hash = Hash::new_unique(); + + let statically_loaded_transaction = + RuntimeTransaction::::try_from( + non_vote_sanitized_versioned_transaction(), + Some(hash), + None, + ) + .unwrap(); + + assert_eq!(hash, *statically_loaded_transaction.message_hash()); + assert!(!statically_loaded_transaction.is_simple_vote_tx()); + + let dynamically_loaded_transaction = RuntimeTransaction::::try_from( + statically_loaded_transaction, + SimpleAddressLoader::Disabled, + ); + let dynamically_loaded_transaction = + dynamically_loaded_transaction.expect("created from statically loaded tx"); + + assert_eq!(hash, *dynamically_loaded_transaction.message_hash()); + assert!(!dynamically_loaded_transaction.is_simple_vote_tx()); + } + + #[test] + fn test_runtime_transaction_static_meta() { + let hash = Hash::new_unique(); + let compute_unit_limit = 250_000; + let compute_unit_price = 1_000; + let loaded_accounts_bytes = 1_024; + let mut test_transaction = TestTransaction::new(); + + let runtime_transaction_static = RuntimeTransaction::::try_from( + test_transaction + .add_compute_unit_limit(compute_unit_limit) + .add_compute_unit_price(compute_unit_price) + .add_loaded_accounts_bytes(loaded_accounts_bytes) + .to_sanitized_versioned_transaction(), + Some(hash), + None, + ) + .unwrap(); + + assert_eq!(&hash, runtime_transaction_static.message_hash()); + assert!(!runtime_transaction_static.is_simple_vote_tx()); + assert_eq!( + compute_unit_limit, + runtime_transaction_static.compute_unit_limit() + ); + assert_eq!( + compute_unit_price, + runtime_transaction_static.compute_unit_price() + ); + assert_eq!( + loaded_accounts_bytes, + runtime_transaction_static.loaded_accounts_bytes() + ); + } +} diff --git a/runtime-transaction/src/transaction_meta.rs b/runtime-transaction/src/transaction_meta.rs new file mode 100644 index 00000000000000..f46fa39c3ab71b --- /dev/null +++ b/runtime-transaction/src/transaction_meta.rs @@ -0,0 +1,62 @@ +//! Transaction Meta contains data that follows a transaction through the +//! execution pipeline in runtime. Examples of metadata could be limits +//! specified by compute-budget instructions, simple-vote flag, transaction +//! costs, durable nonce account etc; +//! +//! The premise is if anything qualifies as metadata, then it must be valid +//! and available as long as the transaction itself is valid and available. +//! Hence they are not Option type. Their visibility at different states +//! are defined in traits. +//! +//! The StaticMeta and DynamicMeta traits are accessor traits on the +//! RuntimeTransaction types, not the TransactionMeta itself. +//! +use solana_sdk::hash::Hash; + +/// metadata can be extracted statically from sanitized transaction, +/// for example: message hash, simple-vote-tx flag, limits set by instructions +pub trait StaticMeta { + fn message_hash(&self) -> &Hash; + fn is_simple_vote_tx(&self) -> bool; + fn compute_unit_limit(&self) -> u32; + fn compute_unit_price(&self) -> u64; + fn loaded_accounts_bytes(&self) -> u32; +} + +/// Statically loaded meta is a supertrait of Dynamically loaded meta, when +/// transaction transited successfully into dynamically loaded, it should +/// have both meta data populated and available. +/// Dynamic metadata available after accounts addresses are loaded from +/// on-chain ALT, examples are: transaction usage costs, nonce account. +pub trait DynamicMeta: StaticMeta {} + +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct TransactionMeta { + pub(crate) message_hash: Hash, + pub(crate) is_simple_vote_tx: bool, + pub(crate) compute_unit_limit: u32, + pub(crate) compute_unit_price: u64, + pub(crate) loaded_accounts_bytes: u32, +} + +impl TransactionMeta { + pub(crate) fn set_message_hash(&mut self, message_hash: Hash) { + self.message_hash = message_hash; + } + + pub(crate) fn set_is_simple_vote_tx(&mut self, is_simple_vote_tx: bool) { + self.is_simple_vote_tx = is_simple_vote_tx; + } + + pub(crate) fn set_compute_unit_limit(&mut self, compute_unit_limit: u32) { + self.compute_unit_limit = compute_unit_limit; + } + + pub(crate) fn set_compute_unit_price(&mut self, compute_unit_price: u64) { + self.compute_unit_price = compute_unit_price; + } + + pub(crate) fn set_loaded_accounts_bytes(&mut self, loaded_accounts_bytes: u32) { + self.loaded_accounts_bytes = loaded_accounts_bytes; + } +} diff --git a/runtime/benches/accounts.rs b/runtime/benches/accounts.rs index 993c22d2a04e18..7efc0a11ac0d75 100644 --- a/runtime/benches/accounts.rs +++ b/runtime/benches/accounts.rs @@ -10,11 +10,12 @@ use { solana_accounts_db::{ accounts::{AccountAddressFilter, Accounts}, accounts_db::{ - test_utils::create_test_accounts, AccountShrinkThreshold, - VerifyAccountsHashAndLamportsConfig, + test_utils::create_test_accounts, AccountShrinkThreshold, AccountsDb, + VerifyAccountsHashAndLamportsConfig, ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, }, accounts_index::{AccountSecondaryIndexes, ScanConfig}, ancestors::Ancestors, + epoch_accounts_hash::EpochAccountsHash, rent_collector::RentCollector, }, solana_runtime::bank::*, @@ -35,6 +36,18 @@ use { test::Bencher, }; +fn new_accounts_db(account_paths: Vec) -> AccountsDb { + AccountsDb::new_with_config( + account_paths, + &ClusterType::Development, + AccountSecondaryIndexes::default(), + AccountShrinkThreshold::default(), + Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS), + None, + Arc::default(), + ) +} + fn deposit_many(bank: &Bank, pubkeys: &mut Vec, num: usize) -> Result<(), LamportsError> { for t in 0..num { let pubkey = solana_sdk::pubkey::new_rand(); @@ -70,6 +83,15 @@ fn test_accounts_squash(bencher: &mut Bencher) { deposit_many(&prev_bank, &mut pubkeys, 250_000).unwrap(); prev_bank.freeze(); + // Need to set the EAH to Valid so that `Bank::new_from_parent()` doesn't panic during + // freeze when parent is in the EAH calculation window. + prev_bank + .rc + .accounts + .accounts_db + .epoch_accounts_hash_manager + .set_valid(EpochAccountsHash::new(Hash::new_unique()), 0); + // Measures the performance of the squash operation. // This mainly consists of the freeze operation which calculates the // merkle hash of the account state and distribution of fees and rent @@ -89,12 +111,8 @@ fn test_accounts_squash(bencher: &mut Bencher) { #[bench] fn test_accounts_hash_bank_hash(bencher: &mut Bencher) { - let accounts = Accounts::new_with_config_for_benches( - vec![PathBuf::from("bench_accounts_hash_internal")], - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = new_accounts_db(vec![PathBuf::from("bench_accounts_hash_internal")]); + let accounts = Accounts::new(Arc::new(accounts_db)); let mut pubkeys: Vec = vec![]; let num_accounts = 60_000; let slot = 0; @@ -126,12 +144,8 @@ fn test_accounts_hash_bank_hash(bencher: &mut Bencher) { #[bench] fn test_update_accounts_hash(bencher: &mut Bencher) { solana_logger::setup(); - let accounts = Accounts::new_with_config_for_benches( - vec![PathBuf::from("update_accounts_hash")], - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = new_accounts_db(vec![PathBuf::from("update_accounts_hash")]); + let accounts = Accounts::new(Arc::new(accounts_db)); let mut pubkeys: Vec = vec![]; create_test_accounts(&accounts, &mut pubkeys, 50_000, 0); let ancestors = Ancestors::from(vec![0]); @@ -145,12 +159,8 @@ fn test_update_accounts_hash(bencher: &mut Bencher) { #[bench] fn test_accounts_delta_hash(bencher: &mut Bencher) { solana_logger::setup(); - let accounts = Accounts::new_with_config_for_benches( - vec![PathBuf::from("accounts_delta_hash")], - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = new_accounts_db(vec![PathBuf::from("accounts_delta_hash")]); + let accounts = Accounts::new(Arc::new(accounts_db)); let mut pubkeys: Vec = vec![]; create_test_accounts(&accounts, &mut pubkeys, 100_000, 0); bencher.iter(|| { @@ -161,12 +171,8 @@ fn test_accounts_delta_hash(bencher: &mut Bencher) { #[bench] fn bench_delete_dependencies(bencher: &mut Bencher) { solana_logger::setup(); - let accounts = Accounts::new_with_config_for_benches( - vec![PathBuf::from("accounts_delete_deps")], - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = new_accounts_db(vec![PathBuf::from("accounts_delete_deps")]); + let accounts = Accounts::new(Arc::new(accounts_db)); let mut old_pubkey = Pubkey::default(); let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); for i in 0..1000 { @@ -190,15 +196,11 @@ fn store_accounts_with_possible_contention( F: Fn(&Accounts, &[Pubkey]) + Send + Copy, { let num_readers = 5; - let accounts = Arc::new(Accounts::new_with_config_for_benches( - vec![ - PathBuf::from(std::env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string())) - .join(bench_name), - ], - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - )); + let accounts_db = new_accounts_db(vec![PathBuf::from( + std::env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string()), + ) + .join(bench_name)]); + let accounts = Arc::new(Accounts::new(Arc::new(accounts_db))); let num_keys = 1000; let slot = 0; accounts.add_root(slot); @@ -326,15 +328,11 @@ fn bench_rwlock_hashmap_single_reader_with_n_writers(bencher: &mut Bencher) { } fn setup_bench_dashmap_iter() -> (Arc, DashMap) { - let accounts = Arc::new(Accounts::new_with_config_for_benches( - vec![ - PathBuf::from(std::env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string())) - .join("bench_dashmap_par_iter"), - ], - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - )); + let accounts_db = new_accounts_db(vec![PathBuf::from( + std::env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string()), + ) + .join("bench_dashmap_par_iter")]); + let accounts = Arc::new(Accounts::new(Arc::new(accounts_db))); let dashmap = DashMap::new(); let num_keys = std::env::var("NUM_BENCH_KEYS") @@ -383,12 +381,8 @@ fn bench_dashmap_iter(bencher: &mut Bencher) { #[bench] fn bench_load_largest_accounts(b: &mut Bencher) { - let accounts = Accounts::new_with_config_for_benches( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = new_accounts_db(Vec::new()); + let accounts = Accounts::new(Arc::new(accounts_db)); let mut rng = rand::thread_rng(); for _ in 0..10_000 { let lamports = rng.gen(); diff --git a/runtime/benches/bank.rs b/runtime/benches/bank.rs index 21f4976d695e73..867b549b24706a 100644 --- a/runtime/benches/bank.rs +++ b/runtime/benches/bank.rs @@ -89,7 +89,7 @@ fn async_bencher(bank: &Bank, bank_client: &BankClient, transactions: &[Transact } for _ in 0..1_000_000_000_u64 { if bank - .get_signature_status(transactions.last().unwrap().signatures.get(0).unwrap()) + .get_signature_status(transactions.last().unwrap().signatures.first().unwrap()) .is_some() { break; @@ -97,13 +97,13 @@ fn async_bencher(bank: &Bank, bank_client: &BankClient, transactions: &[Transact sleep(Duration::from_nanos(1)); } if bank - .get_signature_status(transactions.last().unwrap().signatures.get(0).unwrap()) + .get_signature_status(transactions.last().unwrap().signatures.first().unwrap()) .unwrap() .is_err() { error!( "transaction failed: {:?}", - bank.get_signature_status(transactions.last().unwrap().signatures.get(0).unwrap()) + bank.get_signature_status(transactions.last().unwrap().signatures.first().unwrap()) .unwrap() ); panic!(); diff --git a/accounts-db/src/account_rent_state.rs b/runtime/src/accounts/account_rent_state.rs similarity index 97% rename from accounts-db/src/account_rent_state.rs rename to runtime/src/accounts/account_rent_state.rs index ce261ec16c644d..0949e21acfd7d5 100644 --- a/accounts-db/src/account_rent_state.rs +++ b/runtime/src/accounts/account_rent_state.rs @@ -10,7 +10,7 @@ use { }; #[derive(Debug, PartialEq, Eq)] -pub enum RentState { +pub(crate) enum RentState { /// account.lamports == 0 Uninitialized, /// 0 < account.lamports < rent-exempt-minimum @@ -58,7 +58,7 @@ impl RentState { } } -pub(crate) fn submit_rent_state_metrics(pre_rent_state: &RentState, post_rent_state: &RentState) { +pub(super) fn submit_rent_state_metrics(pre_rent_state: &RentState, post_rent_state: &RentState) { match (pre_rent_state, post_rent_state) { (&RentState::Uninitialized, &RentState::RentPaying { .. }) => { inc_new_counter_info!("rent_paying_err-new_account", 1); @@ -73,7 +73,7 @@ pub(crate) fn submit_rent_state_metrics(pre_rent_state: &RentState, post_rent_st } } -pub fn check_rent_state( +pub(crate) fn check_rent_state( pre_rent_state: Option<&RentState>, post_rent_state: Option<&RentState>, transaction_context: &TransactionContext, @@ -97,7 +97,7 @@ pub fn check_rent_state( Ok(()) } -pub(crate) fn check_rent_state_with_account( +pub(super) fn check_rent_state_with_account( pre_rent_state: &RentState, post_rent_state: &RentState, address: &Pubkey, diff --git a/runtime/src/accounts/mod.rs b/runtime/src/accounts/mod.rs new file mode 100644 index 00000000000000..ef801be65ab08e --- /dev/null +++ b/runtime/src/accounts/mod.rs @@ -0,0 +1,1738 @@ +pub mod account_rent_state; + +use { + crate::{ + accounts::account_rent_state::{check_rent_state_with_account, RentState}, + bank::RewardInterval, + }, + itertools::Itertools, + log::warn, + solana_accounts_db::{ + account_overrides::AccountOverrides, + accounts::{LoadedTransaction, TransactionLoadResult, TransactionRent}, + accounts_db::AccountsDb, + ancestors::Ancestors, + blockhash_queue::BlockhashQueue, + nonce_info::{NonceFull, NonceInfo}, + rent_collector::{RentCollector, RENT_EXEMPT_RENT_EPOCH}, + rent_debits::RentDebits, + transaction_error_metrics::TransactionErrorMetrics, + transaction_results::TransactionCheckResult, + }, + solana_program_runtime::{ + compute_budget_processor::process_compute_budget_instructions, + loaded_programs::LoadedProgramsForTxBatch, + }, + solana_sdk::{ + account::{ + create_executable_meta, is_builtin, is_executable, Account, AccountSharedData, + ReadableAccount, WritableAccount, + }, + account_utils::StateMut, + bpf_loader_upgradeable::{self, UpgradeableLoaderState}, + feature_set::{ + include_loaded_accounts_data_size_in_fee_calculation, + simplify_writable_program_account_check, FeatureSet, + }, + fee::FeeStructure, + message::SanitizedMessage, + native_loader, + nonce::State as NonceState, + pubkey::Pubkey, + rent::RentDue, + saturating_add_assign, + sysvar::{self, instructions::construct_instructions_data}, + transaction::{Result, SanitizedTransaction, TransactionError}, + transaction_context::IndexOfAccount, + }, + solana_system_program::{get_system_account_kind, SystemAccountKind}, + std::{collections::HashMap, num::NonZeroUsize}, +}; + +#[allow(clippy::too_many_arguments)] +pub(super) fn load_accounts( + accounts_db: &AccountsDb, + ancestors: &Ancestors, + txs: &[SanitizedTransaction], + lock_results: Vec, + hash_queue: &BlockhashQueue, + error_counters: &mut TransactionErrorMetrics, + rent_collector: &RentCollector, + feature_set: &FeatureSet, + fee_structure: &FeeStructure, + account_overrides: Option<&AccountOverrides>, + in_reward_interval: RewardInterval, + program_accounts: &HashMap, + loaded_programs: &LoadedProgramsForTxBatch, + should_collect_rent: bool, +) -> Vec { + txs.iter() + .zip(lock_results) + .map(|etx| match etx { + (tx, (Ok(()), nonce)) => { + let lamports_per_signature = nonce + .as_ref() + .map(|nonce| nonce.lamports_per_signature()) + .unwrap_or_else(|| { + hash_queue.get_lamports_per_signature(tx.message().recent_blockhash()) + }); + let fee = if let Some(lamports_per_signature) = lamports_per_signature { + fee_structure.calculate_fee( + tx.message(), + lamports_per_signature, + &process_compute_budget_instructions( + tx.message().program_instructions_iter(), + ) + .unwrap_or_default() + .into(), + feature_set + .is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()), + ) + } else { + return (Err(TransactionError::BlockhashNotFound), None); + }; + + // load transactions + let loaded_transaction = match load_transaction_accounts( + accounts_db, + ancestors, + tx, + fee, + error_counters, + rent_collector, + feature_set, + account_overrides, + in_reward_interval, + program_accounts, + loaded_programs, + should_collect_rent, + ) { + Ok(loaded_transaction) => loaded_transaction, + Err(e) => return (Err(e), None), + }; + + // Update nonce with fee-subtracted accounts + let nonce = if let Some(nonce) = nonce { + match NonceFull::from_partial( + nonce, + tx.message(), + &loaded_transaction.accounts, + &loaded_transaction.rent_debits, + ) { + Ok(nonce) => Some(nonce), + Err(e) => return (Err(e), None), + } + } else { + None + }; + + (Ok(loaded_transaction), nonce) + } + (_, (Err(e), _nonce)) => (Err(e), None), + }) + .collect() +} + +#[allow(clippy::too_many_arguments)] +fn load_transaction_accounts( + accounts_db: &AccountsDb, + ancestors: &Ancestors, + tx: &SanitizedTransaction, + fee: u64, + error_counters: &mut TransactionErrorMetrics, + rent_collector: &RentCollector, + feature_set: &FeatureSet, + account_overrides: Option<&AccountOverrides>, + reward_interval: RewardInterval, + program_accounts: &HashMap, + loaded_programs: &LoadedProgramsForTxBatch, + should_collect_rent: bool, +) -> Result { + let in_reward_interval = reward_interval == RewardInterval::InsideInterval; + + // NOTE: this check will never fail because `tx` is sanitized + if tx.signatures().is_empty() && fee != 0 { + return Err(TransactionError::MissingSignatureForFee); + } + + // There is no way to predict what program will execute without an error + // If a fee can pay for execution then the program will be scheduled + let mut validated_fee_payer = false; + let mut tx_rent: TransactionRent = 0; + let message = tx.message(); + let account_keys = message.account_keys(); + let mut accounts_found = Vec::with_capacity(account_keys.len()); + let mut account_deps = Vec::with_capacity(account_keys.len()); + let mut rent_debits = RentDebits::default(); + + let set_exempt_rent_epoch_max = + feature_set.is_active(&solana_sdk::feature_set::set_exempt_rent_epoch_max::id()); + + let requested_loaded_accounts_data_size_limit = + get_requested_loaded_accounts_data_size_limit(tx)?; + let mut accumulated_accounts_data_size: usize = 0; + + let instruction_accounts = message + .instructions() + .iter() + .flat_map(|instruction| &instruction.accounts) + .unique() + .collect::>(); + + let mut accounts = account_keys + .iter() + .enumerate() + .map(|(i, key)| { + let mut account_found = true; + #[allow(clippy::collapsible_else_if)] + let account = if solana_sdk::sysvar::instructions::check_id(key) { + construct_instructions_account(message) + } else { + let instruction_account = u8::try_from(i) + .map(|i| instruction_accounts.contains(&&i)) + .unwrap_or(false); + let (account_size, mut account, rent) = if let Some(account_override) = + account_overrides.and_then(|overrides| overrides.get(key)) + { + (account_override.data().len(), account_override.clone(), 0) + } else if let Some(program) = (feature_set + .is_active(&simplify_writable_program_account_check::id()) + && !instruction_account + && !message.is_writable(i)) + .then_some(()) + .and_then(|_| loaded_programs.find(key)) + { + // Optimization to skip loading of accounts which are only used as + // programs in top-level instructions and not passed as instruction accounts. + account_shared_data_from_program(key, program_accounts) + .map(|program_account| (program.account_size, program_account, 0))? + } else { + accounts_db + .load_with_fixed_root(ancestors, key) + .map(|(mut account, _)| { + if message.is_writable(i) { + if should_collect_rent { + let rent_due = rent_collector + .collect_from_existing_account( + key, + &mut account, + set_exempt_rent_epoch_max, + ) + .rent_amount; + + (account.data().len(), account, rent_due) + } else { + // When rent fee collection is disabled, we won't collect rent for any account. If there + // are any rent paying accounts, their `rent_epoch` won't change either. However, if the + // account itself is rent-exempted but its `rent_epoch` is not u64::MAX, we will set its + // `rent_epoch` to u64::MAX. In such case, the behavior stays the same as before. + if set_exempt_rent_epoch_max + && (account.rent_epoch() != RENT_EXEMPT_RENT_EPOCH + && rent_collector.get_rent_due(&account) + == RentDue::Exempt) + { + account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); + } + (account.data().len(), account, 0) + } + } else { + (account.data().len(), account, 0) + } + }) + .unwrap_or_else(|| { + account_found = false; + let mut default_account = AccountSharedData::default(); + if set_exempt_rent_epoch_max { + // All new accounts must be rent-exempt (enforced in Bank::execute_loaded_transaction). + // Currently, rent collection sets rent_epoch to u64::MAX, but initializing the account + // with this field already set would allow us to skip rent collection for these accounts. + default_account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); + } + (default_account.data().len(), default_account, 0) + }) + }; + accumulate_and_check_loaded_account_data_size( + &mut accumulated_accounts_data_size, + account_size, + requested_loaded_accounts_data_size_limit, + error_counters, + )?; + + if !validated_fee_payer && message.is_non_loader_key(i) { + if i != 0 { + warn!("Payer index should be 0! {:?}", tx); + } + + validate_fee_payer( + key, + &mut account, + i as IndexOfAccount, + error_counters, + rent_collector, + fee, + )?; + + validated_fee_payer = true; + } + + if !feature_set.is_active(&simplify_writable_program_account_check::id()) { + if bpf_loader_upgradeable::check_id(account.owner()) { + if message.is_writable(i) && !message.is_upgradeable_loader_present() { + error_counters.invalid_writable_account += 1; + return Err(TransactionError::InvalidWritableAccount); + } + + if is_builtin(&account) || is_executable(&account, feature_set) { + // The upgradeable loader requires the derived ProgramData account + if let Ok(UpgradeableLoaderState::Program { + programdata_address, + }) = account.state() + { + if accounts_db + .load_with_fixed_root(ancestors, &programdata_address) + .is_none() + { + error_counters.account_not_found += 1; + return Err(TransactionError::ProgramAccountNotFound); + } + } else { + error_counters.invalid_program_for_execution += 1; + return Err(TransactionError::InvalidProgramForExecution); + } + } + } else { + if (is_builtin(&account) || is_executable(&account, feature_set)) + && message.is_writable(i) + { + error_counters.invalid_writable_account += 1; + return Err(TransactionError::InvalidWritableAccount); + } + } + } + + if in_reward_interval + && message.is_writable(i) + && solana_stake_program::check_id(account.owner()) + { + error_counters.program_execution_temporarily_restricted += 1; + return Err(TransactionError::ProgramExecutionTemporarilyRestricted { + account_index: i as u8, + }); + } + + tx_rent += rent; + rent_debits.insert(key, rent, account.lamports()); + + account + }; + + accounts_found.push(account_found); + Ok((*key, account)) + }) + .collect::>>()?; + + if !validated_fee_payer { + error_counters.account_not_found += 1; + return Err(TransactionError::AccountNotFound); + } + + // Appends the account_deps at the end of the accounts, + // this way they can be accessed in a uniform way. + // At places where only the accounts are needed, + // the account_deps are truncated using e.g: + // accounts.iter().take(message.account_keys.len()) + accounts.append(&mut account_deps); + + let builtins_start_index = accounts.len(); + let program_indices = message + .instructions() + .iter() + .map(|instruction| { + let mut account_indices = Vec::new(); + let mut program_index = instruction.program_id_index as usize; + let (program_id, program_account) = accounts + .get(program_index) + .ok_or(TransactionError::ProgramAccountNotFound)?; + if native_loader::check_id(program_id) { + return Ok(account_indices); + } + + let account_found = accounts_found.get(program_index).unwrap_or(&true); + if !account_found { + error_counters.account_not_found += 1; + return Err(TransactionError::ProgramAccountNotFound); + } + + if !(is_builtin(program_account) || is_executable(program_account, feature_set)) { + error_counters.invalid_program_for_execution += 1; + return Err(TransactionError::InvalidProgramForExecution); + } + account_indices.insert(0, program_index as IndexOfAccount); + let owner_id = program_account.owner(); + if native_loader::check_id(owner_id) { + return Ok(account_indices); + } + program_index = if let Some(owner_index) = accounts + .get(builtins_start_index..) + .ok_or(TransactionError::ProgramAccountNotFound)? + .iter() + .position(|(key, _)| key == owner_id) + { + builtins_start_index.saturating_add(owner_index) + } else { + let owner_index = accounts.len(); + if let Some((owner_account, _)) = + accounts_db.load_with_fixed_root(ancestors, owner_id) + { + if !native_loader::check_id(owner_account.owner()) + || !(is_builtin(&owner_account) + || is_executable(&owner_account, feature_set)) + { + error_counters.invalid_program_for_execution += 1; + return Err(TransactionError::InvalidProgramForExecution); + } + accumulate_and_check_loaded_account_data_size( + &mut accumulated_accounts_data_size, + owner_account.data().len(), + requested_loaded_accounts_data_size_limit, + error_counters, + )?; + accounts.push((*owner_id, owner_account)); + } else { + error_counters.account_not_found += 1; + return Err(TransactionError::ProgramAccountNotFound); + } + owner_index + }; + account_indices.insert(0, program_index as IndexOfAccount); + Ok(account_indices) + }) + .collect::>>>()?; + + Ok(LoadedTransaction { + accounts, + program_indices, + rent: tx_rent, + rent_debits, + }) +} + +/// Total accounts data a transaction can load is limited to +/// if `set_tx_loaded_accounts_data_size` instruction is not activated or not used, then +/// default value of 64MiB to not break anyone in Mainnet-beta today +/// else +/// user requested loaded accounts size. +/// Note, requesting zero bytes will result transaction error +fn get_requested_loaded_accounts_data_size_limit( + tx: &SanitizedTransaction, +) -> Result> { + let compute_budget_limits = + process_compute_budget_instructions(tx.message().program_instructions_iter()) + .unwrap_or_default(); + // sanitize against setting size limit to zero + NonZeroUsize::new( + usize::try_from(compute_budget_limits.loaded_accounts_bytes).unwrap_or_default(), + ) + .map_or( + Err(TransactionError::InvalidLoadedAccountsDataSizeLimit), + |v| Ok(Some(v)), + ) +} + +fn account_shared_data_from_program( + key: &Pubkey, + program_accounts: &HashMap, +) -> Result { + // It's an executable program account. The program is already loaded in the cache. + // So the account data is not needed. Return a dummy AccountSharedData with meta + // information. + let mut program_account = AccountSharedData::default(); + let (program_owner, _count) = program_accounts + .get(key) + .ok_or(TransactionError::AccountNotFound)?; + program_account.set_owner(**program_owner); + program_account.set_executable(true); + program_account.set_data_from_slice(create_executable_meta(program_owner)); + Ok(program_account) +} + +/// Accumulate loaded account data size into `accumulated_accounts_data_size`. +/// Returns TransactionErr::MaxLoadedAccountsDataSizeExceeded if +/// `requested_loaded_accounts_data_size_limit` is specified and +/// `accumulated_accounts_data_size` exceeds it. +fn accumulate_and_check_loaded_account_data_size( + accumulated_loaded_accounts_data_size: &mut usize, + account_data_size: usize, + requested_loaded_accounts_data_size_limit: Option, + error_counters: &mut TransactionErrorMetrics, +) -> Result<()> { + if let Some(requested_loaded_accounts_data_size) = requested_loaded_accounts_data_size_limit { + saturating_add_assign!(*accumulated_loaded_accounts_data_size, account_data_size); + if *accumulated_loaded_accounts_data_size > requested_loaded_accounts_data_size.get() { + error_counters.max_loaded_accounts_data_size_exceeded += 1; + Err(TransactionError::MaxLoadedAccountsDataSizeExceeded) + } else { + Ok(()) + } + } else { + Ok(()) + } +} + +fn validate_fee_payer( + payer_address: &Pubkey, + payer_account: &mut AccountSharedData, + payer_index: IndexOfAccount, + error_counters: &mut TransactionErrorMetrics, + rent_collector: &RentCollector, + fee: u64, +) -> Result<()> { + if payer_account.lamports() == 0 { + error_counters.account_not_found += 1; + return Err(TransactionError::AccountNotFound); + } + let system_account_kind = get_system_account_kind(payer_account).ok_or_else(|| { + error_counters.invalid_account_for_fee += 1; + TransactionError::InvalidAccountForFee + })?; + let min_balance = match system_account_kind { + SystemAccountKind::System => 0, + SystemAccountKind::Nonce => { + // Should we ever allow a fees charge to zero a nonce account's + // balance. The state MUST be set to uninitialized in that case + rent_collector.rent.minimum_balance(NonceState::size()) + } + }; + + payer_account + .lamports() + .checked_sub(min_balance) + .and_then(|v| v.checked_sub(fee)) + .ok_or_else(|| { + error_counters.insufficient_funds += 1; + TransactionError::InsufficientFundsForFee + })?; + + let payer_pre_rent_state = RentState::from_account(payer_account, &rent_collector.rent); + payer_account + .checked_sub_lamports(fee) + .map_err(|_| TransactionError::InsufficientFundsForFee)?; + + let payer_post_rent_state = RentState::from_account(payer_account, &rent_collector.rent); + check_rent_state_with_account( + &payer_pre_rent_state, + &payer_post_rent_state, + payer_address, + payer_account, + payer_index, + ) +} + +pub fn construct_instructions_account(message: &SanitizedMessage) -> AccountSharedData { + AccountSharedData::from(Account { + data: construct_instructions_data(&message.decompile_instructions()), + owner: sysvar::id(), + ..Account::default() + }) +} + +#[cfg(test)] +mod tests { + use { + super::*, + nonce::state::Versions as NonceVersions, + solana_accounts_db::{accounts::Accounts, rent_collector::RentCollector}, + solana_program_runtime::{ + compute_budget_processor, + prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, + }, + solana_sdk::{ + account::{AccountSharedData, WritableAccount}, + compute_budget::ComputeBudgetInstruction, + epoch_schedule::EpochSchedule, + hash::Hash, + instruction::CompiledInstruction, + message::{Message, SanitizedMessage}, + nonce, + rent::Rent, + signature::{Keypair, Signer}, + system_program, sysvar, + transaction::{Result, Transaction, TransactionError}, + transaction_context::TransactionAccount, + }, + std::{convert::TryFrom, sync::Arc}, + }; + + fn load_accounts_with_fee_and_rent( + tx: Transaction, + ka: &[TransactionAccount], + lamports_per_signature: u64, + rent_collector: &RentCollector, + error_counters: &mut TransactionErrorMetrics, + feature_set: &FeatureSet, + fee_structure: &FeeStructure, + ) -> Vec { + let mut hash_queue = BlockhashQueue::new(100); + hash_queue.register_hash(&tx.message().recent_blockhash, lamports_per_signature); + let accounts_db = AccountsDb::new_single_for_tests(); + let accounts = Accounts::new(Arc::new(accounts_db)); + for ka in ka.iter() { + accounts.accounts_db.store_for_tests(0, &[(&ka.0, &ka.1)]); + } + + let ancestors = vec![(0, 0)].into_iter().collect(); + let sanitized_tx = SanitizedTransaction::from_transaction_for_tests(tx); + load_accounts( + &accounts.accounts_db, + &ancestors, + &[sanitized_tx], + vec![(Ok(()), None)], + &hash_queue, + error_counters, + rent_collector, + feature_set, + fee_structure, + None, + RewardInterval::OutsideInterval, + &HashMap::new(), + &LoadedProgramsForTxBatch::default(), + true, + ) + } + + /// get a feature set with all features activated + /// with the optional except of 'exclude' + fn all_features_except(exclude: Option<&[Pubkey]>) -> FeatureSet { + let mut features = FeatureSet::all_enabled(); + if let Some(exclude) = exclude { + features.active.retain(|k, _v| !exclude.contains(k)); + } + features + } + + fn load_accounts_with_fee( + tx: Transaction, + ka: &[TransactionAccount], + lamports_per_signature: u64, + error_counters: &mut TransactionErrorMetrics, + exclude_features: Option<&[Pubkey]>, + ) -> Vec { + load_accounts_with_fee_and_rent( + tx, + ka, + lamports_per_signature, + &RentCollector::default(), + error_counters, + &all_features_except(exclude_features), + &FeeStructure::default(), + ) + } + + fn load_accounts_aux_test( + tx: Transaction, + ka: &[TransactionAccount], + error_counters: &mut TransactionErrorMetrics, + ) -> Vec { + load_accounts_with_fee(tx, ka, 0, error_counters, None) + } + + fn load_accounts_with_excluded_features( + tx: Transaction, + ka: &[TransactionAccount], + error_counters: &mut TransactionErrorMetrics, + exclude_features: Option<&[Pubkey]>, + ) -> Vec { + load_accounts_with_fee(tx, ka, 0, error_counters, exclude_features) + } + + #[test] + fn test_load_accounts_no_account_0_exists() { + let accounts: Vec = Vec::new(); + let mut error_counters = TransactionErrorMetrics::default(); + + let keypair = Keypair::new(); + + let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; + let tx = Transaction::new_with_compiled_instructions( + &[&keypair], + &[], + Hash::default(), + vec![native_loader::id()], + instructions, + ); + + let loaded_accounts = load_accounts_aux_test(tx, &accounts, &mut error_counters); + + assert_eq!(error_counters.account_not_found, 1); + assert_eq!(loaded_accounts.len(), 1); + assert_eq!( + loaded_accounts[0], + (Err(TransactionError::AccountNotFound), None,), + ); + } + + #[test] + fn test_load_accounts_unknown_program_id() { + let mut accounts: Vec = Vec::new(); + let mut error_counters = TransactionErrorMetrics::default(); + + let keypair = Keypair::new(); + let key0 = keypair.pubkey(); + let key1 = Pubkey::from([5u8; 32]); + + let account = AccountSharedData::new(1, 0, &Pubkey::default()); + accounts.push((key0, account)); + + let account = AccountSharedData::new(2, 1, &Pubkey::default()); + accounts.push((key1, account)); + + let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; + let tx = Transaction::new_with_compiled_instructions( + &[&keypair], + &[], + Hash::default(), + vec![Pubkey::default()], + instructions, + ); + + let loaded_accounts = load_accounts_aux_test(tx, &accounts, &mut error_counters); + + assert_eq!(error_counters.account_not_found, 1); + assert_eq!(loaded_accounts.len(), 1); + assert_eq!( + loaded_accounts[0], + (Err(TransactionError::ProgramAccountNotFound), None,) + ); + } + + #[test] + fn test_load_accounts_insufficient_funds() { + let lamports_per_signature = 5000; + let mut accounts: Vec = Vec::new(); + let mut error_counters = TransactionErrorMetrics::default(); + + let keypair = Keypair::new(); + let key0 = keypair.pubkey(); + + let account = AccountSharedData::new(1, 0, &Pubkey::default()); + accounts.push((key0, account)); + + let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; + let tx = Transaction::new_with_compiled_instructions( + &[&keypair], + &[], + Hash::default(), + vec![native_loader::id()], + instructions, + ); + + let message = SanitizedMessage::try_from(tx.message().clone()).unwrap(); + let fee = FeeStructure::default().calculate_fee( + &message, + lamports_per_signature, + &process_compute_budget_instructions(message.program_instructions_iter()) + .unwrap_or_default() + .into(), + false, + ); + assert_eq!(fee, lamports_per_signature); + + let loaded_accounts = load_accounts_with_fee( + tx, + &accounts, + lamports_per_signature, + &mut error_counters, + None, + ); + + assert_eq!(error_counters.insufficient_funds, 1); + assert_eq!(loaded_accounts.len(), 1); + assert_eq!( + loaded_accounts[0].clone(), + (Err(TransactionError::InsufficientFundsForFee), None,), + ); + } + + #[test] + fn test_load_accounts_invalid_account_for_fee() { + let mut accounts: Vec = Vec::new(); + let mut error_counters = TransactionErrorMetrics::default(); + + let keypair = Keypair::new(); + let key0 = keypair.pubkey(); + + let account = AccountSharedData::new(1, 1, &solana_sdk::pubkey::new_rand()); // <-- owner is not the system program + accounts.push((key0, account)); + + let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; + let tx = Transaction::new_with_compiled_instructions( + &[&keypair], + &[], + Hash::default(), + vec![native_loader::id()], + instructions, + ); + + let loaded_accounts = load_accounts_aux_test(tx, &accounts, &mut error_counters); + + assert_eq!(error_counters.invalid_account_for_fee, 1); + assert_eq!(loaded_accounts.len(), 1); + assert_eq!( + loaded_accounts[0], + (Err(TransactionError::InvalidAccountForFee), None,), + ); + } + + #[test] + fn test_load_accounts_fee_payer_is_nonce() { + let lamports_per_signature = 5000; + let mut error_counters = TransactionErrorMetrics::default(); + let rent_collector = RentCollector::new( + 0, + EpochSchedule::default(), + 500_000.0, + Rent { + lamports_per_byte_year: 42, + ..Rent::default() + }, + ); + let min_balance = rent_collector.rent.minimum_balance(NonceState::size()); + let nonce = Keypair::new(); + let mut accounts = vec![( + nonce.pubkey(), + AccountSharedData::new_data( + min_balance + lamports_per_signature, + &NonceVersions::new(NonceState::Initialized(nonce::state::Data::default())), + &system_program::id(), + ) + .unwrap(), + )]; + let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; + let tx = Transaction::new_with_compiled_instructions( + &[&nonce], + &[], + Hash::default(), + vec![native_loader::id()], + instructions, + ); + + // Fee leaves min_balance balance succeeds + let loaded_accounts = load_accounts_with_fee_and_rent( + tx.clone(), + &accounts, + lamports_per_signature, + &rent_collector, + &mut error_counters, + &all_features_except(None), + &FeeStructure::default(), + ); + assert_eq!(loaded_accounts.len(), 1); + let (load_res, _nonce) = &loaded_accounts[0]; + let loaded_transaction = load_res.as_ref().unwrap(); + assert_eq!(loaded_transaction.accounts[0].1.lamports(), min_balance); + + // Fee leaves zero balance fails + accounts[0].1.set_lamports(lamports_per_signature); + let loaded_accounts = load_accounts_with_fee_and_rent( + tx.clone(), + &accounts, + lamports_per_signature, + &rent_collector, + &mut error_counters, + &FeatureSet::all_enabled(), + &FeeStructure::default(), + ); + assert_eq!(loaded_accounts.len(), 1); + let (load_res, _nonce) = &loaded_accounts[0]; + assert_eq!(*load_res, Err(TransactionError::InsufficientFundsForFee)); + + // Fee leaves non-zero, but sub-min_balance balance fails + accounts[0] + .1 + .set_lamports(lamports_per_signature + min_balance / 2); + let loaded_accounts = load_accounts_with_fee_and_rent( + tx, + &accounts, + lamports_per_signature, + &rent_collector, + &mut error_counters, + &FeatureSet::all_enabled(), + &FeeStructure::default(), + ); + assert_eq!(loaded_accounts.len(), 1); + let (load_res, _nonce) = &loaded_accounts[0]; + assert_eq!(*load_res, Err(TransactionError::InsufficientFundsForFee)); + } + + #[test] + fn test_load_accounts_no_loaders() { + let mut accounts: Vec = Vec::new(); + let mut error_counters = TransactionErrorMetrics::default(); + + let keypair = Keypair::new(); + let key0 = keypair.pubkey(); + let key1 = Pubkey::from([5u8; 32]); + + let mut account = AccountSharedData::new(1, 0, &Pubkey::default()); + account.set_rent_epoch(1); + accounts.push((key0, account)); + + let mut account = AccountSharedData::new(2, 1, &Pubkey::default()); + account.set_rent_epoch(1); + accounts.push((key1, account)); + + let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; + let tx = Transaction::new_with_compiled_instructions( + &[&keypair], + &[key1], + Hash::default(), + vec![native_loader::id()], + instructions, + ); + + let loaded_accounts = + load_accounts_with_excluded_features(tx, &accounts, &mut error_counters, None); + + assert_eq!(error_counters.account_not_found, 0); + assert_eq!(loaded_accounts.len(), 1); + match &loaded_accounts[0] { + (Ok(loaded_transaction), _nonce) => { + assert_eq!(loaded_transaction.accounts.len(), 3); + assert_eq!(loaded_transaction.accounts[0].1, accounts[0].1); + assert_eq!(loaded_transaction.program_indices.len(), 1); + assert_eq!(loaded_transaction.program_indices[0].len(), 0); + } + (Err(e), _nonce) => panic!("{e}"), + } + } + + #[test] + fn test_load_accounts_bad_owner() { + let mut accounts: Vec = Vec::new(); + let mut error_counters = TransactionErrorMetrics::default(); + + let keypair = Keypair::new(); + let key0 = keypair.pubkey(); + let key1 = Pubkey::from([5u8; 32]); + + let account = AccountSharedData::new(1, 0, &Pubkey::default()); + accounts.push((key0, account)); + + let mut account = AccountSharedData::new(40, 1, &Pubkey::default()); + account.set_owner(bpf_loader_upgradeable::id()); + account.set_data(create_executable_meta(account.owner()).to_vec()); + accounts.push((key1, account)); + + let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; + let tx = Transaction::new_with_compiled_instructions( + &[&keypair], + &[], + Hash::default(), + vec![key1], + instructions, + ); + + let loaded_accounts = load_accounts_aux_test(tx, &accounts, &mut error_counters); + + assert_eq!(error_counters.account_not_found, 1); + assert_eq!(loaded_accounts.len(), 1); + assert_eq!( + loaded_accounts[0], + (Err(TransactionError::ProgramAccountNotFound), None,) + ); + } + + #[test] + fn test_load_accounts_not_executable() { + let mut accounts: Vec = Vec::new(); + let mut error_counters = TransactionErrorMetrics::default(); + + let keypair = Keypair::new(); + let key0 = keypair.pubkey(); + let key1 = Pubkey::from([5u8; 32]); + + let account = AccountSharedData::new(1, 0, &Pubkey::default()); + accounts.push((key0, account)); + + let account = AccountSharedData::new(40, 0, &native_loader::id()); + accounts.push((key1, account)); + + let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; + let tx = Transaction::new_with_compiled_instructions( + &[&keypair], + &[], + Hash::default(), + vec![key1], + instructions, + ); + + let loaded_accounts = load_accounts_aux_test(tx, &accounts, &mut error_counters); + + assert_eq!(error_counters.invalid_program_for_execution, 1); + assert_eq!(loaded_accounts.len(), 1); + assert_eq!( + loaded_accounts[0], + (Err(TransactionError::InvalidProgramForExecution), None,) + ); + } + + #[test] + fn test_load_accounts_multiple_loaders() { + let mut accounts: Vec = Vec::new(); + let mut error_counters = TransactionErrorMetrics::default(); + + let keypair = Keypair::new(); + let key0 = keypair.pubkey(); + let key1 = bpf_loader_upgradeable::id(); + let key2 = Pubkey::from([6u8; 32]); + + let mut account = AccountSharedData::new(1, 0, &Pubkey::default()); + account.set_rent_epoch(1); + accounts.push((key0, account)); + + let mut account = AccountSharedData::new(40, 1, &Pubkey::default()); + account.set_executable(true); + account.set_rent_epoch(1); + account.set_owner(native_loader::id()); + accounts.push((key1, account)); + + let mut account = AccountSharedData::new(41, 1, &Pubkey::default()); + account.set_executable(true); + account.set_rent_epoch(1); + account.set_owner(key1); + account.set_data(create_executable_meta(account.owner()).to_vec()); + accounts.push((key2, account)); + + let instructions = vec![ + CompiledInstruction::new(1, &(), vec![0]), + CompiledInstruction::new(2, &(), vec![0]), + ]; + let tx = Transaction::new_with_compiled_instructions( + &[&keypair], + &[], + Hash::default(), + vec![key1, key2], + instructions, + ); + + let loaded_accounts = + load_accounts_with_excluded_features(tx, &accounts, &mut error_counters, None); + + assert_eq!(error_counters.account_not_found, 0); + assert_eq!(loaded_accounts.len(), 1); + match &loaded_accounts[0] { + (Ok(loaded_transaction), _nonce) => { + assert_eq!(loaded_transaction.accounts.len(), 4); + assert_eq!(loaded_transaction.accounts[0].1, accounts[0].1); + assert_eq!(loaded_transaction.program_indices.len(), 2); + assert_eq!(loaded_transaction.program_indices[0].len(), 1); + assert_eq!(loaded_transaction.program_indices[1].len(), 2); + for program_indices in loaded_transaction.program_indices.iter() { + for (i, program_index) in program_indices.iter().enumerate() { + // +1 to skip first not loader account + assert_eq!( + loaded_transaction.accounts[*program_index as usize].0, + accounts[i + 1].0 + ); + assert_eq!( + loaded_transaction.accounts[*program_index as usize].1, + accounts[i + 1].1 + ); + } + } + } + (Err(e), _nonce) => panic!("{e}"), + } + } + + #[test] + fn test_load_accounts_executable_with_write_lock() { + let mut accounts: Vec = Vec::new(); + let mut error_counters = TransactionErrorMetrics::default(); + + let keypair = Keypair::new(); + let key0 = keypair.pubkey(); + let key1 = Pubkey::from([5u8; 32]); + let key2 = Pubkey::from([6u8; 32]); + + let mut account = AccountSharedData::new(1, 0, &Pubkey::default()); + account.set_rent_epoch(1); + accounts.push((key0, account)); + + let mut account = AccountSharedData::new(40, 1, &native_loader::id()); + account.set_executable(true); + account.set_rent_epoch(1); + accounts.push((key1, account)); + + let mut account = AccountSharedData::new(40, 1, &native_loader::id()); + account.set_executable(true); + account.set_rent_epoch(1); + accounts.push((key2, account)); + + let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; + let mut message = Message::new_with_compiled_instructions( + 1, + 0, + 1, // only one executable marked as readonly + vec![key0, key1, key2], + Hash::default(), + instructions, + ); + let tx = Transaction::new(&[&keypair], message.clone(), Hash::default()); + let loaded_accounts = load_accounts_with_excluded_features( + tx, + &accounts, + &mut error_counters, + Some(&[simplify_writable_program_account_check::id()]), + ); + + assert_eq!(error_counters.invalid_writable_account, 1); + assert_eq!(loaded_accounts.len(), 1); + assert_eq!( + loaded_accounts[0], + (Err(TransactionError::InvalidWritableAccount), None) + ); + + // Mark executables as readonly + message.account_keys = vec![key0, key1, key2]; // revert key change + message.header.num_readonly_unsigned_accounts = 2; // mark both executables as readonly + let tx = Transaction::new(&[&keypair], message, Hash::default()); + let loaded_accounts = load_accounts_with_excluded_features( + tx, + &accounts, + &mut error_counters, + Some(&[simplify_writable_program_account_check::id()]), + ); + + assert_eq!(error_counters.invalid_writable_account, 1); + assert_eq!(loaded_accounts.len(), 1); + let result = loaded_accounts[0].0.as_ref().unwrap(); + assert_eq!(result.accounts[..2], accounts[..2]); + assert_eq!( + result.accounts[result.program_indices[0][0] as usize], + accounts[2] + ); + } + + #[test] + fn test_load_accounts_upgradeable_with_write_lock() { + let mut accounts: Vec = Vec::new(); + let mut error_counters = TransactionErrorMetrics::default(); + + let keypair = Keypair::new(); + let key0 = keypair.pubkey(); + let key1 = Pubkey::from([5u8; 32]); + let key2 = Pubkey::from([6u8; 32]); + let programdata_key1 = Pubkey::from([7u8; 32]); + let programdata_key2 = Pubkey::from([8u8; 32]); + + let mut account = AccountSharedData::new(1, 0, &Pubkey::default()); + account.set_rent_epoch(1); + accounts.push((key0, account)); + + let program_data = UpgradeableLoaderState::ProgramData { + slot: 42, + upgrade_authority_address: None, + }; + + let program = UpgradeableLoaderState::Program { + programdata_address: programdata_key1, + }; + let mut account = + AccountSharedData::new_data(40, &program, &bpf_loader_upgradeable::id()).unwrap(); + account.set_executable(true); + account.set_rent_epoch(1); + accounts.push((key1, account)); + let mut account = + AccountSharedData::new_data(40, &program_data, &bpf_loader_upgradeable::id()).unwrap(); + account.set_rent_epoch(1); + accounts.push((programdata_key1, account)); + + let program = UpgradeableLoaderState::Program { + programdata_address: programdata_key2, + }; + let mut account = + AccountSharedData::new_data(40, &program, &bpf_loader_upgradeable::id()).unwrap(); + account.set_executable(true); + account.set_rent_epoch(1); + accounts.push((key2, account)); + let mut account = + AccountSharedData::new_data(40, &program_data, &bpf_loader_upgradeable::id()).unwrap(); + account.set_rent_epoch(1); + accounts.push((programdata_key2, account)); + + let mut account = AccountSharedData::new(40, 1, &native_loader::id()); // create mock bpf_loader_upgradeable + account.set_executable(true); + account.set_rent_epoch(1); + accounts.push((bpf_loader_upgradeable::id(), account)); + + let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; + let mut message = Message::new_with_compiled_instructions( + 1, + 0, + 1, // only one executable marked as readonly + vec![key0, key1, key2], + Hash::default(), + instructions, + ); + let tx = Transaction::new(&[&keypair], message.clone(), Hash::default()); + let loaded_accounts = load_accounts_with_excluded_features( + tx.clone(), + &accounts, + &mut error_counters, + Some(&[simplify_writable_program_account_check::id()]), + ); + + assert_eq!(error_counters.invalid_writable_account, 1); + assert_eq!(loaded_accounts.len(), 1); + assert_eq!( + loaded_accounts[0], + (Err(TransactionError::InvalidWritableAccount), None) + ); + + // Solution 0: Include feature simplify_writable_program_account_check + let loaded_accounts = + load_accounts_with_excluded_features(tx, &accounts, &mut error_counters, None); + + assert_eq!(error_counters.invalid_writable_account, 1); + assert_eq!(loaded_accounts.len(), 1); + + // Solution 1: include bpf_loader_upgradeable account + message.account_keys = vec![key0, key1, bpf_loader_upgradeable::id()]; + let tx = Transaction::new(&[&keypair], message.clone(), Hash::default()); + let loaded_accounts = load_accounts_with_excluded_features( + tx, + &accounts, + &mut error_counters, + Some(&[simplify_writable_program_account_check::id()]), + ); + + assert_eq!(error_counters.invalid_writable_account, 1); + assert_eq!(loaded_accounts.len(), 1); + let result = loaded_accounts[0].0.as_ref().unwrap(); + assert_eq!(result.accounts[..2], accounts[..2]); + assert_eq!( + result.accounts[result.program_indices[0][0] as usize], + accounts[5] + ); + + // Solution 2: mark programdata as readonly + message.account_keys = vec![key0, key1, key2]; // revert key change + message.header.num_readonly_unsigned_accounts = 2; // mark both executables as readonly + let tx = Transaction::new(&[&keypair], message, Hash::default()); + let loaded_accounts = load_accounts_with_excluded_features( + tx, + &accounts, + &mut error_counters, + Some(&[simplify_writable_program_account_check::id()]), + ); + + assert_eq!(error_counters.invalid_writable_account, 1); + assert_eq!(loaded_accounts.len(), 1); + let result = loaded_accounts[0].0.as_ref().unwrap(); + assert_eq!(result.accounts[..2], accounts[..2]); + assert_eq!( + result.accounts[result.program_indices[0][0] as usize], + accounts[5] + ); + assert_eq!( + result.accounts[result.program_indices[0][1] as usize], + accounts[3] + ); + } + + #[test] + fn test_load_accounts_programdata_with_write_lock() { + let mut accounts: Vec = Vec::new(); + let mut error_counters = TransactionErrorMetrics::default(); + + let keypair = Keypair::new(); + let key0 = keypair.pubkey(); + let key1 = Pubkey::from([5u8; 32]); + let key2 = Pubkey::from([6u8; 32]); + + let mut account = AccountSharedData::new(1, 0, &Pubkey::default()); + account.set_rent_epoch(1); + accounts.push((key0, account)); + + let program_data = UpgradeableLoaderState::ProgramData { + slot: 42, + upgrade_authority_address: None, + }; + let mut account = + AccountSharedData::new_data(40, &program_data, &bpf_loader_upgradeable::id()).unwrap(); + account.set_rent_epoch(1); + accounts.push((key1, account)); + + let mut account = AccountSharedData::new(40, 1, &native_loader::id()); + account.set_executable(true); + account.set_rent_epoch(1); + accounts.push((key2, account)); + + let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; + let mut message = Message::new_with_compiled_instructions( + 1, + 0, + 1, // only the program marked as readonly + vec![key0, key1, key2], + Hash::default(), + instructions, + ); + let tx = Transaction::new(&[&keypair], message.clone(), Hash::default()); + let loaded_accounts = load_accounts_with_excluded_features( + tx.clone(), + &accounts, + &mut error_counters, + Some(&[simplify_writable_program_account_check::id()]), + ); + + assert_eq!(error_counters.invalid_writable_account, 1); + assert_eq!(loaded_accounts.len(), 1); + assert_eq!( + loaded_accounts[0], + (Err(TransactionError::InvalidWritableAccount), None) + ); + + // Solution 0: Include feature simplify_writable_program_account_check + let loaded_accounts = + load_accounts_with_excluded_features(tx, &accounts, &mut error_counters, None); + + assert_eq!(error_counters.invalid_writable_account, 1); + assert_eq!(loaded_accounts.len(), 1); + + // Solution 1: include bpf_loader_upgradeable account + let mut account = AccountSharedData::new(40, 1, &native_loader::id()); // create mock bpf_loader_upgradeable + account.set_executable(true); + account.set_rent_epoch(1); + let accounts_with_upgradeable_loader = vec![ + accounts[0].clone(), + accounts[1].clone(), + (bpf_loader_upgradeable::id(), account), + ]; + message.account_keys = vec![key0, key1, bpf_loader_upgradeable::id()]; + let tx = Transaction::new(&[&keypair], message.clone(), Hash::default()); + let loaded_accounts = load_accounts_with_excluded_features( + tx, + &accounts_with_upgradeable_loader, + &mut error_counters, + Some(&[simplify_writable_program_account_check::id()]), + ); + + assert_eq!(error_counters.invalid_writable_account, 1); + assert_eq!(loaded_accounts.len(), 1); + let result = loaded_accounts[0].0.as_ref().unwrap(); + assert_eq!(result.accounts[..2], accounts_with_upgradeable_loader[..2]); + assert_eq!( + result.accounts[result.program_indices[0][0] as usize], + accounts_with_upgradeable_loader[2] + ); + + // Solution 2: mark programdata as readonly + message.account_keys = vec![key0, key1, key2]; // revert key change + message.header.num_readonly_unsigned_accounts = 2; // extend readonly set to include programdata + let tx = Transaction::new(&[&keypair], message, Hash::default()); + let loaded_accounts = load_accounts_with_excluded_features( + tx, + &accounts, + &mut error_counters, + Some(&[simplify_writable_program_account_check::id()]), + ); + + assert_eq!(error_counters.invalid_writable_account, 1); + assert_eq!(loaded_accounts.len(), 1); + let result = loaded_accounts[0].0.as_ref().unwrap(); + assert_eq!(result.accounts[..2], accounts[..2]); + assert_eq!( + result.accounts[result.program_indices[0][0] as usize], + accounts[2] + ); + } + + fn load_accounts_no_store( + accounts: &Accounts, + tx: Transaction, + account_overrides: Option<&AccountOverrides>, + ) -> Vec { + let tx = SanitizedTransaction::from_transaction_for_tests(tx); + let rent_collector = RentCollector::default(); + let mut hash_queue = BlockhashQueue::new(100); + hash_queue.register_hash(tx.message().recent_blockhash(), 10); + + let ancestors = vec![(0, 0)].into_iter().collect(); + let mut error_counters = TransactionErrorMetrics::default(); + load_accounts( + &accounts.accounts_db, + &ancestors, + &[tx], + vec![(Ok(()), None)], + &hash_queue, + &mut error_counters, + &rent_collector, + &FeatureSet::all_enabled(), + &FeeStructure::default(), + account_overrides, + RewardInterval::OutsideInterval, + &HashMap::new(), + &LoadedProgramsForTxBatch::default(), + true, + ) + } + + #[test] + fn test_instructions() { + solana_logger::setup(); + let accounts_db = AccountsDb::new_single_for_tests(); + let accounts = Accounts::new(Arc::new(accounts_db)); + + let instructions_key = solana_sdk::sysvar::instructions::id(); + let keypair = Keypair::new(); + let instructions = vec![CompiledInstruction::new(1, &(), vec![0, 1])]; + let tx = Transaction::new_with_compiled_instructions( + &[&keypair], + &[solana_sdk::pubkey::new_rand(), instructions_key], + Hash::default(), + vec![native_loader::id()], + instructions, + ); + + let loaded_accounts = load_accounts_no_store(&accounts, tx, None); + assert_eq!(loaded_accounts.len(), 1); + assert!(loaded_accounts[0].0.is_err()); + } + + #[test] + fn test_overrides() { + solana_logger::setup(); + let accounts_db = AccountsDb::new_single_for_tests(); + let accounts = Accounts::new(Arc::new(accounts_db)); + let mut account_overrides = AccountOverrides::default(); + let slot_history_id = sysvar::slot_history::id(); + let account = AccountSharedData::new(42, 0, &Pubkey::default()); + account_overrides.set_slot_history(Some(account)); + + let keypair = Keypair::new(); + let account = AccountSharedData::new(1_000_000, 0, &Pubkey::default()); + accounts.store_slow_uncached(0, &keypair.pubkey(), &account); + + let instructions = vec![CompiledInstruction::new(2, &(), vec![0])]; + let tx = Transaction::new_with_compiled_instructions( + &[&keypair], + &[slot_history_id], + Hash::default(), + vec![native_loader::id()], + instructions, + ); + + let loaded_accounts = load_accounts_no_store(&accounts, tx, Some(&account_overrides)); + assert_eq!(loaded_accounts.len(), 1); + let loaded_transaction = loaded_accounts[0].0.as_ref().unwrap(); + assert_eq!(loaded_transaction.accounts[0].0, keypair.pubkey()); + assert_eq!(loaded_transaction.accounts[1].0, slot_history_id); + assert_eq!(loaded_transaction.accounts[1].1.lamports(), 42); + } + + #[test] + fn test_accumulate_and_check_loaded_account_data_size() { + let mut error_counter = TransactionErrorMetrics::default(); + + // assert check is OK if data limit is not enabled + { + let mut accumulated_data_size: usize = 0; + let data_size = usize::MAX; + let requested_data_size_limit = None; + + assert!(accumulate_and_check_loaded_account_data_size( + &mut accumulated_data_size, + data_size, + requested_data_size_limit, + &mut error_counter + ) + .is_ok()); + } + + // assert check will fail with correct error if loaded data exceeds limit + { + let mut accumulated_data_size: usize = 0; + let data_size: usize = 123; + let requested_data_size_limit = NonZeroUsize::new(data_size); + + // OK - loaded data size is up to limit + assert!(accumulate_and_check_loaded_account_data_size( + &mut accumulated_data_size, + data_size, + requested_data_size_limit, + &mut error_counter + ) + .is_ok()); + assert_eq!(data_size, accumulated_data_size); + + // fail - loading more data that would exceed limit + let another_byte: usize = 1; + assert_eq!( + accumulate_and_check_loaded_account_data_size( + &mut accumulated_data_size, + another_byte, + requested_data_size_limit, + &mut error_counter + ), + Err(TransactionError::MaxLoadedAccountsDataSizeExceeded) + ); + } + } + + #[test] + fn test_get_requested_loaded_accounts_data_size_limit() { + // an prrivate helper function + fn test( + instructions: &[solana_sdk::instruction::Instruction], + expected_result: &Result>, + ) { + let payer_keypair = Keypair::new(); + let tx = SanitizedTransaction::from_transaction_for_tests(Transaction::new( + &[&payer_keypair], + Message::new(instructions, Some(&payer_keypair.pubkey())), + Hash::default(), + )); + assert_eq!( + *expected_result, + get_requested_loaded_accounts_data_size_limit(&tx) + ); + } + + let tx_not_set_limit = &[solana_sdk::instruction::Instruction::new_with_bincode( + Pubkey::new_unique(), + &0_u8, + vec![], + )]; + let tx_set_limit_99 = + &[ + solana_sdk::compute_budget::ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(99u32), + solana_sdk::instruction::Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ]; + let tx_set_limit_0 = + &[ + solana_sdk::compute_budget::ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(0u32), + solana_sdk::instruction::Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ]; + + let result_default_limit = Ok(Some( + NonZeroUsize::new( + usize::try_from(compute_budget_processor::MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES) + .unwrap(), + ) + .unwrap(), + )); + let result_requested_limit: Result> = + Ok(Some(NonZeroUsize::new(99).unwrap())); + let result_invalid_limit = Err(TransactionError::InvalidLoadedAccountsDataSizeLimit); + + // the results should be: + // if tx doesn't set limit, then default limit (64MiB) + // if tx sets limit, then requested limit + // if tx sets limit to zero, then TransactionError::InvalidLoadedAccountsDataSizeLimit + test(tx_not_set_limit, &result_default_limit); + test(tx_set_limit_99, &result_requested_limit); + test(tx_set_limit_0, &result_invalid_limit); + } + + #[test] + fn test_load_accounts_too_high_prioritization_fee() { + solana_logger::setup(); + let lamports_per_signature = 5000_u64; + let request_units = 1_000_000_u32; + let request_unit_price = 2_000_000_000_u64; + let prioritization_fee_details = PrioritizationFeeDetails::new( + PrioritizationFeeType::ComputeUnitPrice(request_unit_price), + request_units as u64, + ); + let prioritization_fee = prioritization_fee_details.get_fee(); + + let keypair = Keypair::new(); + let key0 = keypair.pubkey(); + // set up account with balance of `prioritization_fee` + let account = AccountSharedData::new(prioritization_fee, 0, &Pubkey::default()); + let accounts = vec![(key0, account)]; + + let instructions = &[ + ComputeBudgetInstruction::set_compute_unit_limit(request_units), + ComputeBudgetInstruction::set_compute_unit_price(request_unit_price), + ]; + let tx = Transaction::new( + &[&keypair], + Message::new(instructions, Some(&key0)), + Hash::default(), + ); + + let message = SanitizedMessage::try_from(tx.message().clone()).unwrap(); + let fee = FeeStructure::default().calculate_fee( + &message, + lamports_per_signature, + &process_compute_budget_instructions(message.program_instructions_iter()) + .unwrap_or_default() + .into(), + false, + ); + assert_eq!(fee, lamports_per_signature + prioritization_fee); + + // assert fail to load account with 2B lamport balance for transaction asking for 2B + // lamports as prioritization fee. + let mut error_counters = TransactionErrorMetrics::default(); + let loaded_accounts = load_accounts_with_fee( + tx, + &accounts, + lamports_per_signature, + &mut error_counters, + None, + ); + + assert_eq!(error_counters.insufficient_funds, 1); + assert_eq!(loaded_accounts.len(), 1); + assert_eq!( + loaded_accounts[0].clone(), + (Err(TransactionError::InsufficientFundsForFee), None), + ); + } + + struct ValidateFeePayerTestParameter { + is_nonce: bool, + payer_init_balance: u64, + fee: u64, + expected_result: Result<()>, + payer_post_balance: u64, + } + fn validate_fee_payer_account( + test_parameter: ValidateFeePayerTestParameter, + rent_collector: &RentCollector, + ) { + let payer_account_keys = Keypair::new(); + let mut account = if test_parameter.is_nonce { + AccountSharedData::new_data( + test_parameter.payer_init_balance, + &NonceVersions::new(NonceState::Initialized(nonce::state::Data::default())), + &system_program::id(), + ) + .unwrap() + } else { + AccountSharedData::new(test_parameter.payer_init_balance, 0, &system_program::id()) + }; + let result = validate_fee_payer( + &payer_account_keys.pubkey(), + &mut account, + 0, + &mut TransactionErrorMetrics::default(), + rent_collector, + test_parameter.fee, + ); + + assert_eq!(result, test_parameter.expected_result); + assert_eq!(account.lamports(), test_parameter.payer_post_balance); + } + + #[test] + fn test_validate_fee_payer() { + let rent_collector = RentCollector::new( + 0, + EpochSchedule::default(), + 500_000.0, + Rent { + lamports_per_byte_year: 1, + ..Rent::default() + }, + ); + let min_balance = rent_collector.rent.minimum_balance(NonceState::size()); + let fee = 5_000; + + // If payer account has sufficient balance, expect successful fee deduction, + // regardless feature gate status, or if payer is nonce account. + { + for (is_nonce, min_balance) in [(true, min_balance), (false, 0)] { + validate_fee_payer_account( + ValidateFeePayerTestParameter { + is_nonce, + payer_init_balance: min_balance + fee, + fee, + expected_result: Ok(()), + payer_post_balance: min_balance, + }, + &rent_collector, + ); + } + } + + // If payer account has no balance, expected AccountNotFound Error + // regardless feature gate status, or if payer is nonce account. + { + for is_nonce in [true, false] { + validate_fee_payer_account( + ValidateFeePayerTestParameter { + is_nonce, + payer_init_balance: 0, + fee, + expected_result: Err(TransactionError::AccountNotFound), + payer_post_balance: 0, + }, + &rent_collector, + ); + } + } + + // If payer account has insufficient balance, expect InsufficientFundsForFee error + // regardless feature gate status, or if payer is nonce account. + { + for (is_nonce, min_balance) in [(true, min_balance), (false, 0)] { + validate_fee_payer_account( + ValidateFeePayerTestParameter { + is_nonce, + payer_init_balance: min_balance + fee - 1, + fee, + expected_result: Err(TransactionError::InsufficientFundsForFee), + payer_post_balance: min_balance + fee - 1, + }, + &rent_collector, + ); + } + } + + // normal payer account has balance of u64::MAX, so does fee; since it does not require + // min_balance, expect successful fee deduction, regardless of feature gate status + { + validate_fee_payer_account( + ValidateFeePayerTestParameter { + is_nonce: false, + payer_init_balance: u64::MAX, + fee: u64::MAX, + expected_result: Ok(()), + payer_post_balance: 0, + }, + &rent_collector, + ); + } + } + + #[test] + fn test_validate_nonce_fee_payer_with_checked_arithmetic() { + let rent_collector = RentCollector::new( + 0, + EpochSchedule::default(), + 500_000.0, + Rent { + lamports_per_byte_year: 1, + ..Rent::default() + }, + ); + + // nonce payer account has balance of u64::MAX, so does fee; due to nonce account + // requires additional min_balance, expect InsufficientFundsForFee error if feature gate is + // enabled + validate_fee_payer_account( + ValidateFeePayerTestParameter { + is_nonce: true, + payer_init_balance: u64::MAX, + fee: u64::MAX, + expected_result: Err(TransactionError::InsufficientFundsForFee), + payer_post_balance: u64::MAX, + }, + &rent_collector, + ); + } +} diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index 72a6f72f11927c..efc17176d7337b 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -707,9 +707,21 @@ impl AccountsBackgroundService { bank.force_flush_accounts_cache(); bank.clean_accounts(last_full_snapshot_slot); last_cleaned_block_height = bank.block_height(); - bank.shrink_ancient_slots(); + // See justification below for why we skip 'shrink' here. + if bank.is_startup_verification_complete() { + bank.shrink_ancient_slots(); + } + } + // Do not 'shrink' until *after* the startup verification is complete. + // This is because startup verification needs to get the snapshot + // storages *as they existed at startup* (to calculate the accounts hash). + // If 'shrink' were to run, then it is possible startup verification + // (1) could race with 'shrink', and fail to assert that shrinking is not in + // progress, or (2) could get snapshot storages that were newer than what + // was in the snapshot itself. + if bank.is_startup_verification_complete() { + bank.shrink_candidate_slots(); } - bank.shrink_candidate_slots(); } stats.record_and_maybe_submit(start_time.elapsed()); sleep(Duration::from_millis(INTERVAL_MS)); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 3bf2d720933443..9d0342fb2fc25a 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -33,6 +33,10 @@ //! It offers a high-level API that signs transactions //! on behalf of the caller, and a low-level API for when they have //! already been signed and verified. +#[cfg(feature = "dev-context-only-utils")] +use solana_accounts_db::accounts_db::{ + ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, ACCOUNTS_DB_CONFIG_FOR_TESTING, +}; #[allow(deprecated)] use solana_sdk::recent_blockhashes_account; pub use solana_sdk::reward_type::RewardType; @@ -70,13 +74,12 @@ use { solana_accounts_db::{ account_overrides::AccountOverrides, accounts::{ - AccountAddressFilter, Accounts, LoadedTransaction, PubkeyAccountSlot, RewardInterval, + AccountAddressFilter, Accounts, LoadedTransaction, PubkeyAccountSlot, TransactionLoadResult, }, accounts_db::{ AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AccountsDbConfig, CalcAccountsHashDataSource, VerifyAccountsHashAndLamportsConfig, - ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, ACCOUNTS_DB_CONFIG_FOR_TESTING, }, accounts_hash::{ AccountHash, AccountsHash, CalcAccountsHashConfig, HashStats, IncrementalAccountsHash, @@ -89,7 +92,7 @@ use { epoch_accounts_hash::EpochAccountsHash, nonce_info::{NonceInfo, NoncePartial}, partitioned_rewards::PartitionedEpochRewardsConfig, - rent_collector::{CollectedInfo, RentCollector}, + rent_collector::{CollectedInfo, RentCollector, RENT_EXEMPT_RENT_EPOCH}, rent_debits::RentDebits, sorted_storages::SortedStorages, stake_rewards::{RewardInfo, StakeReward}, @@ -113,7 +116,8 @@ use { invoke_context::BuiltinFunctionWithContext, loaded_programs::{ LoadProgramMetrics, LoadedProgram, LoadedProgramMatchCriteria, LoadedProgramType, - LoadedPrograms, LoadedProgramsForTxBatch, WorkingSlot, DELAY_VISIBILITY_SLOT_OFFSET, + LoadedPrograms, LoadedProgramsForTxBatch, ProgramRuntimeEnvironment, + ProgramRuntimeEnvironments, DELAY_VISIBILITY_SLOT_OFFSET, }, log_collector::LogCollector, message_processor::MessageProcessor, @@ -122,11 +126,11 @@ use { }, solana_sdk::{ account::{ - create_account_shared_data_with_fields as create_account, from_account, Account, - AccountSharedData, InheritableAccountFields, ReadableAccount, WritableAccount, + create_account_shared_data_with_fields as create_account, create_executable_meta, + from_account, Account, AccountSharedData, InheritableAccountFields, ReadableAccount, + WritableAccount, PROGRAM_OWNERS, }, account_utils::StateMut, - bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, clock::{ BankId, Epoch, Slot, SlotCount, SlotIndex, UnixTimestamp, DEFAULT_HASHES_PER_TICK, @@ -138,10 +142,7 @@ use { epoch_info::EpochInfo, epoch_schedule::EpochSchedule, feature, - feature_set::{ - self, include_loaded_accounts_data_size_in_fee_calculation, - remove_congestion_multiplier_from_fee_calculation, FeatureSet, - }, + feature_set::{self, include_loaded_accounts_data_size_in_fee_calculation, FeatureSet}, fee::FeeStructure, fee_calculator::{FeeCalculator, FeeRateGovernor}, genesis_config::{ClusterType, GenesisConfig}, @@ -149,6 +150,7 @@ use { hash::{extend_and_hash, hashv, Hash}, incinerator, inflation::Inflation, + inner_instruction::InnerInstructions, instruction::InstructionError, loader_v4::{self, LoaderV4State, LoaderV4Status}, message::{AccountKeys, SanitizedMessage}, @@ -159,6 +161,7 @@ use { packet::PACKET_DATA_SIZE, precompiles::get_precompiles, pubkey::Pubkey, + rent::RentDue, saturating_add_assign, signature::{Keypair, Signature}, slot_hashes::SlotHashes, @@ -276,9 +279,9 @@ pub struct BankRc { pub(crate) bank_id_generator: Arc, } +use crate::accounts::load_accounts; #[cfg(RUSTC_WITH_SPECIALIZATION)] use solana_frozen_abi::abi_example::AbiExample; -use solana_program_runtime::loaded_programs::ExtractedPrograms; #[cfg(RUSTC_WITH_SPECIALIZATION)] impl AbiExample for BankRc { @@ -307,8 +310,7 @@ impl BankRc { enum ProgramAccountLoadResult { AccountNotFound, - InvalidAccountData, - InvalidV4Program, + InvalidAccountData(ProgramRuntimeEnvironment), ProgramOfLoaderV1orV2(AccountSharedData), ProgramOfLoaderV3(AccountSharedData, AccountSharedData, Slot), ProgramOfLoaderV4(AccountSharedData, Slot), @@ -337,6 +339,7 @@ pub struct TransactionSimulationResult { pub post_simulation_accounts: Vec, pub units_consumed: u64, pub return_data: Option, + pub inner_instructions: Option>, } pub struct TransactionBalancesSet { pub pre_balances: TransactionBalances, @@ -934,19 +937,6 @@ pub struct CommitTransactionCounts { pub signature_count: u64, } -impl WorkingSlot for Bank { - fn current_slot(&self) -> Slot { - self.slot - } - - fn current_epoch(&self) -> Epoch { - self.epoch - } - - fn is_ancestor(&self, other: Slot) -> bool { - self.ancestors.contains_key(&other) - } -} #[derive(Debug, Default)] /// result of calculating the stake rewards at end of epoch struct StakeRewardCalculation { @@ -956,66 +946,15 @@ struct StakeRewardCalculation { total_stake_rewards_lamports: u64, } -impl Bank { - pub fn default_for_tests() -> Self { - Self::default_with_accounts(Accounts::default_for_tests()) - } - - pub fn new_for_benches(genesis_config: &GenesisConfig) -> Self { - Self::new_with_paths_for_benches(genesis_config, Vec::new()) - } - - pub fn new_for_tests(genesis_config: &GenesisConfig) -> Self { - Self::new_for_tests_with_config(genesis_config, BankTestConfig::default()) - } - - pub fn new_for_tests_with_config( - genesis_config: &GenesisConfig, - test_config: BankTestConfig, - ) -> Self { - Self::new_with_config_for_tests( - genesis_config, - test_config.secondary_indexes, - AccountShrinkThreshold::default(), - ) - } - - /// Intended for use by tests only. - /// create new bank with the given configs. - pub fn new_with_runtime_config_for_tests( - genesis_config: &GenesisConfig, - runtime_config: Arc, - ) -> Self { - Self::new_with_paths_for_tests( - genesis_config, - runtime_config, - Vec::new(), - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ) - } - - pub fn new_no_wallclock_throttle_for_tests(genesis_config: &GenesisConfig) -> Self { - let mut bank = Self::new_for_tests(genesis_config); - - bank.ns_per_slot = std::u128::MAX; - bank - } - - pub(crate) fn new_with_config_for_tests( - genesis_config: &GenesisConfig, - account_indexes: AccountSecondaryIndexes, - shrink_ratio: AccountShrinkThreshold, - ) -> Self { - Self::new_with_paths_for_tests( - genesis_config, - Arc::new(RuntimeConfig::default()), - Vec::new(), - account_indexes, - shrink_ratio, - ) - } +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +pub(super) enum RewardInterval { + /// the slot within the epoch is INSIDE the reward distribution interval + InsideInterval, + /// the slot within the epoch is OUTSIDE the reward distribution interval + OutsideInterval, +} +impl Bank { fn default_with_accounts(accounts: Accounts) -> Self { let mut bank = Self { skipped_rewrites: Mutex::default(), @@ -1076,7 +1015,10 @@ impl Bank { accounts_data_size_delta_on_chain: AtomicI64::new(0), accounts_data_size_delta_off_chain: AtomicI64::new(0), fee_structure: FeeStructure::default(), - loaded_programs_cache: Arc::>>::default(), + loaded_programs_cache: Arc::new(RwLock::new(LoadedPrograms::new( + Slot::default(), + Epoch::default(), + ))), check_program_modification_slot: false, epoch_reward_status: EpochRewardStatus::default(), }; @@ -1087,46 +1029,6 @@ impl Bank { bank } - pub fn new_with_paths_for_tests( - genesis_config: &GenesisConfig, - runtime_config: Arc, - paths: Vec, - account_indexes: AccountSecondaryIndexes, - shrink_ratio: AccountShrinkThreshold, - ) -> Self { - Self::new_with_paths( - genesis_config, - runtime_config, - paths, - None, - None, - account_indexes, - shrink_ratio, - false, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), - None, - Arc::default(), - ) - } - - /// Intended for use by benches only. - /// create new bank with the given config and paths. - pub fn new_with_paths_for_benches(genesis_config: &GenesisConfig, paths: Vec) -> Self { - Self::new_with_paths( - genesis_config, - Arc::::default(), - paths, - None, - None, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - false, - Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS), - None, - Arc::default(), - ) - } - #[allow(clippy::too_many_arguments)] pub fn new_with_paths( genesis_config: &GenesisConfig, @@ -1141,7 +1043,7 @@ impl Bank { accounts_update_notifier: Option, exit: Arc, ) -> Self { - let accounts = Accounts::new_with_config( + let accounts_db = AccountsDb::new_with_config( paths, &genesis_config.cluster_type, account_indexes, @@ -1150,6 +1052,7 @@ impl Bank { accounts_update_notifier, exit, ); + let accounts = Accounts::new(Arc::new(accounts_db)); let mut bank = Self::default_with_accounts(accounts); bank.ancestors = Ancestors::from(vec![bank.slot()]); bank.transaction_debug_keys = debug_keys; @@ -1305,7 +1208,7 @@ impl Bank { parent.freeze(); assert_ne!(slot, parent.slot()); - let epoch_schedule = parent.epoch_schedule; + let epoch_schedule = parent.epoch_schedule().clone(); let epoch = epoch_schedule.get_epoch(slot); let (rc, bank_rc_creation_time_us) = measure_us!({ @@ -1453,7 +1356,7 @@ impl Bank { ); } else { // Save a snapshot of stakes for use in consensus and stake weighted networking - let leader_schedule_epoch = epoch_schedule.get_leader_schedule_epoch(slot); + let leader_schedule_epoch = new.epoch_schedule().get_leader_schedule_epoch(slot); new.update_epoch_stakes(leader_schedule_epoch); } if new.is_partitioned_rewards_code_enabled() { @@ -1514,10 +1417,10 @@ impl Bank { } loaded_programs_cache.upcoming_environments = Some(upcoming_environments); loaded_programs_cache.programs_to_recompile = loaded_programs_cache - .get_entries_sorted_by_tx_usage( - changed_program_runtime_v1, - changed_program_runtime_v2, - ); + .get_flattened_entries(changed_program_runtime_v1, changed_program_runtime_v2); + loaded_programs_cache + .programs_to_recompile + .sort_by_cached_key(|(_id, program)| program.decayed_usage_counter(slot)); } }); @@ -1924,7 +1827,10 @@ impl Bank { accounts_data_size_delta_on_chain: AtomicI64::new(0), accounts_data_size_delta_off_chain: AtomicI64::new(0), fee_structure: FeeStructure::default(), - loaded_programs_cache: Arc::>>::default(), + loaded_programs_cache: Arc::new(RwLock::new(LoadedPrograms::new( + fields.slot, + fields.epoch, + ))), check_program_modification_slot: false, epoch_reward_status: EpochRewardStatus::default(), }; @@ -1934,6 +1840,7 @@ impl Bank { debug_do_not_add_builtins, ); bank.fill_missing_sysvar_cache_entries(); + bank.rebuild_skipped_rewrites(); // Sanity assertions between bank snapshot and genesis config // Consider removing from serializable bank state @@ -2014,7 +1921,7 @@ impl Bank { fee_rate_governor: self.fee_rate_governor.clone(), collected_rent: self.collected_rent.load(Relaxed), rent_collector: self.rent_collector.clone(), - epoch_schedule: self.epoch_schedule, + epoch_schedule: self.epoch_schedule.clone(), inflation: *self.inflation.read().unwrap(), stakes: &self.stakes_cache, epoch_stakes: &self.epoch_stakes, @@ -3077,7 +2984,7 @@ impl Bank { stake_state::calculate_points( stake_account.stake_state(), vote_state, - Some(stake_history), + stake_history, new_warmup_cooldown_rate_epoch, ) .unwrap_or(0) @@ -3114,7 +3021,7 @@ impl Bank { stake_state::calculate_points( stake_account.stake_state(), vote_state, - Some(stake_history), + stake_history, new_warmup_cooldown_rate_epoch, ) .unwrap_or(0) @@ -3183,15 +3090,11 @@ impl Bank { let (mut stake_account, stake_state) = <(AccountSharedData, StakeStateV2)>::from(stake_account); let vote_pubkey = delegation.voter_pubkey; - let Some(vote_account) = get_vote_account(&vote_pubkey) else { - return None; - }; + let vote_account = get_vote_account(&vote_pubkey)?; if vote_account.owner() != &solana_vote_program { return None; } - let Ok(vote_state) = vote_account.vote_state().cloned() else { - return None; - }; + let vote_state = vote_account.vote_state().cloned().ok()?; let pre_lamport = stake_account.lamports(); @@ -3201,7 +3104,7 @@ impl Bank { &mut stake_account, &vote_state, &point_value, - Some(stake_history), + stake_history, reward_calc_tracer.as_ref(), new_warmup_cooldown_rate_epoch, ); @@ -3320,7 +3223,7 @@ impl Bank { &mut stake_account, &vote_state, &point_value, - Some(stake_history), + stake_history, reward_calc_tracer.as_ref(), new_warmup_cooldown_rate_epoch, ); @@ -3889,15 +3792,15 @@ impl Bank { self.max_tick_height = (self.slot + 1) * self.ticks_per_slot; self.slots_per_year = genesis_config.slots_per_year(); - self.epoch_schedule = genesis_config.epoch_schedule; + self.epoch_schedule = genesis_config.epoch_schedule.clone(); self.inflation = Arc::new(RwLock::new(genesis_config.inflation)); self.rent_collector = RentCollector::new( self.epoch, - *self.epoch_schedule(), + self.epoch_schedule().clone(), self.slots_per_year, - genesis_config.rent, + genesis_config.rent.clone(), ); // Add additional builtin programs specified in the genesis config @@ -3979,7 +3882,6 @@ impl Bank { fn add_precompiled_account_with_owner(&self, program_id: &Pubkey, owner: Pubkey) { if let Some(account) = self.get_account_with_fixed_root(program_id) { if account.executable() { - // The account is already executable, that's all we need return; } else { // malicious account is pre-occupying at program_id @@ -3995,10 +3897,13 @@ impl Bank { // Add a bogus executable account, which will be loaded and ignored. let (lamports, rent_epoch) = self.inherit_specially_retained_account_fields(&None); + + // Mock account_data with executable_meta so that the account is executable. + let account_data = create_executable_meta(&owner); let account = AccountSharedData::from(Account { lamports, owner, - data: vec![], + data: account_data.to_vec(), executable: true, rent_epoch, }); @@ -4103,14 +4008,9 @@ impl Bank { self.fee_structure.calculate_fee( message, lamports_per_signature, - &process_compute_budget_instructions( - message.program_instructions_iter(), - &self.feature_set, - ) - .unwrap_or_default() - .into(), - self.feature_set - .is_active(&remove_congestion_multiplier_from_fee_calculation::id()), + &process_compute_budget_instructions(message.program_instructions_iter()) + .unwrap_or_default() + .into(), self.feature_set .is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()), ) @@ -4277,20 +4177,6 @@ impl Bank { } } - /// Prepare a transaction batch from a list of legacy transactions. Used for tests only. - pub fn prepare_batch_for_tests(&self, txs: Vec) -> TransactionBatch { - let transaction_account_lock_limit = self.get_transaction_account_lock_limit(); - let sanitized_txs = txs - .into_iter() - .map(SanitizedTransaction::from_transaction_for_tests) - .collect::>(); - let lock_results = self - .rc - .accounts - .lock_accounts(sanitized_txs.iter(), transaction_account_lock_limit); - TransactionBatch::new(lock_results, self, Cow::Owned(sanitized_txs)) - } - /// Prepare a transaction batch from a list of versioned transactions from /// an entry. Used for tests only. pub fn prepare_entry_batch(&self, txs: Vec) -> Result { @@ -4341,7 +4227,7 @@ impl Bank { } /// Prepare a transaction batch from a single transaction without locking accounts - pub(crate) fn prepare_unlocked_batch_from_single_tx<'a>( + pub fn prepare_unlocked_batch_from_single_tx<'a>( &'a self, transaction: &'a SanitizedTransaction, ) -> TransactionBatch<'_, '_> { @@ -4361,23 +4247,25 @@ impl Bank { /// Run transactions against a frozen bank without committing the results pub fn simulate_transaction( &self, - transaction: SanitizedTransaction, + transaction: &SanitizedTransaction, + enable_cpi_recording: bool, ) -> TransactionSimulationResult { assert!(self.is_frozen(), "simulation bank must be frozen"); - self.simulate_transaction_unchecked(transaction) + self.simulate_transaction_unchecked(transaction, enable_cpi_recording) } /// Run transactions against a bank without committing the results; does not check if the bank /// is frozen, enabling use in single-Bank test frameworks pub fn simulate_transaction_unchecked( &self, - transaction: SanitizedTransaction, + transaction: &SanitizedTransaction, + enable_cpi_recording: bool, ) -> TransactionSimulationResult { let account_keys = transaction.message().account_keys(); let number_of_accounts = account_keys.len(); let account_overrides = self.get_account_overrides_for_simulation(&account_keys); - let batch = self.prepare_unlocked_batch_from_single_tx(&transaction); + let batch = self.prepare_unlocked_batch_from_single_tx(transaction); let mut timings = ExecuteTimings::default(); let LoadAndExecuteTransactionsOutput { @@ -4390,7 +4278,7 @@ impl Bank { // for processing. During forwarding, the transaction could expire if the // delay is not accounted for. MAX_PROCESSING_AGE - MAX_TRANSACTION_FORWARDING_DELAY, - false, + enable_cpi_recording, true, true, &mut timings, @@ -4413,23 +4301,27 @@ impl Bank { }) .unwrap_or_default(); - let units_consumed = timings - .details - .per_program_timings - .iter() - .fold(0, |acc: u64, (_, program_timing)| { - acc.saturating_add(program_timing.accumulated_units) - }); + let units_consumed = + timings + .details + .per_program_timings + .iter() + .fold(0, |acc: u64, (_, program_timing)| { + acc.saturating_add(program_timing.accumulated_units) + .saturating_add(program_timing.total_errored_units) + }); debug!("simulate_transaction: {:?}", timings); let execution_result = execution_results.pop().unwrap(); let flattened_result = execution_result.flattened_result(); - let (logs, return_data) = match execution_result { - TransactionExecutionResult::Executed { details, .. } => { - (details.log_messages, details.return_data) - } - TransactionExecutionResult::NotExecuted(_) => (None, None), + let (logs, return_data, inner_instructions) = match execution_result { + TransactionExecutionResult::Executed { details, .. } => ( + details.log_messages, + details.return_data, + details.inner_instructions, + ), + TransactionExecutionResult::NotExecuted(_) => (None, None, None), }; let logs = logs.unwrap_or_default(); @@ -4439,6 +4331,7 @@ impl Bank { post_simulation_accounts, units_consumed, return_data, + inner_instructions, } } @@ -4482,7 +4375,7 @@ impl Bank { fn check_age<'a>( &self, - txs: impl Iterator, + txs: impl Iterator + 'a)>, lock_results: &[Result<()>], max_age: usize, error_counters: &mut TransactionErrorMetrics, @@ -4494,7 +4387,7 @@ impl Bank { txs.zip(lock_results) .map(|(tx, lock_res)| match lock_res { Ok(()) => self.check_transaction_age( - tx, + tx.borrow(), max_age, &next_durable_nonce, &hash_queue, @@ -4540,7 +4433,7 @@ impl Bank { fn check_status_cache( &self, - sanitized_txs: &[SanitizedTransaction], + sanitized_txs: &[impl core::borrow::Borrow], lock_results: Vec, error_counters: &mut TransactionErrorMetrics, ) -> Vec { @@ -4549,6 +4442,7 @@ impl Bank { .iter() .zip(lock_results) .map(|(sanitized_tx, (lock_result, nonce))| { + let sanitized_tx = sanitized_tx.borrow(); if lock_result.is_ok() && self.is_transaction_already_processed(sanitized_tx, &rcache) { @@ -4603,7 +4497,7 @@ impl Bank { pub fn check_transactions( &self, - sanitized_txs: &[SanitizedTransaction], + sanitized_txs: &[impl core::borrow::Borrow], lock_results: &[Result<()>], max_age: usize, error_counters: &mut TransactionErrorMetrics, @@ -4655,7 +4549,11 @@ impl Bank { } } - fn load_program_accounts(&self, pubkey: &Pubkey) -> ProgramAccountLoadResult { + fn load_program_accounts( + &self, + pubkey: &Pubkey, + environments: &ProgramRuntimeEnvironments, + ) -> ProgramAccountLoadResult { let program_account = match self.get_account_with_fixed_root(pubkey) { None => return ProgramAccountLoadResult::AccountNotFound, Some(account) => account, @@ -4672,7 +4570,9 @@ impl Bank { (!matches!(state.status, LoaderV4Status::Retracted)).then_some(state.slot) }) .map(|slot| ProgramAccountLoadResult::ProgramOfLoaderV4(program_account, slot)) - .unwrap_or(ProgramAccountLoadResult::InvalidV4Program); + .unwrap_or(ProgramAccountLoadResult::InvalidAccountData( + environments.program_runtime_v2.clone(), + )); } if !bpf_loader_upgradeable::check_id(program_account.owner()) { @@ -4700,7 +4600,44 @@ impl Bank { ); } } - ProgramAccountLoadResult::InvalidAccountData + ProgramAccountLoadResult::InvalidAccountData(environments.program_runtime_v1.clone()) + } + + fn load_program_from_bytes( + load_program_metrics: &mut LoadProgramMetrics, + programdata: &[u8], + loader_key: &Pubkey, + account_size: usize, + deployment_slot: Slot, + program_runtime_environment: ProgramRuntimeEnvironment, + reloading: bool, + ) -> std::result::Result> { + if reloading { + // Safety: this is safe because the program is being reloaded in the cache. + unsafe { + LoadedProgram::reload( + loader_key, + program_runtime_environment.clone(), + deployment_slot, + deployment_slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET), + None, + programdata, + account_size, + load_program_metrics, + ) + } + } else { + LoadedProgram::new( + loader_key, + program_runtime_environment.clone(), + deployment_slot, + deployment_slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET), + None, + programdata, + account_size, + load_program_metrics, + ) + } } pub fn load_program( @@ -4721,21 +4658,16 @@ impl Bank { ..LoadProgramMetrics::default() }; - let mut loaded_program = match self.load_program_accounts(pubkey) { + let mut loaded_program = match self.load_program_accounts(pubkey, environments) { ProgramAccountLoadResult::AccountNotFound => Ok(LoadedProgram::new_tombstone( self.slot, LoadedProgramType::Closed, )), - ProgramAccountLoadResult::InvalidAccountData => { - Err(InstructionError::InvalidAccountData) - } + ProgramAccountLoadResult::InvalidAccountData(env) => Err((self.slot, env)), ProgramAccountLoadResult::ProgramOfLoaderV1orV2(program_account) => { - solana_bpf_loader_program::load_program_from_bytes( - self.feature_set - .is_active(&feature_set::delay_visibility_of_program_deployment::id()), - None, + Self::load_program_from_bytes( &mut load_program_metrics, program_account.data(), program_account.owner(), @@ -4744,6 +4676,7 @@ impl Bank { environments.program_runtime_v1.clone(), reload, ) + .map_err(|_| (0, environments.program_runtime_v1.clone())) } ProgramAccountLoadResult::ProgramOfLoaderV3( @@ -4753,12 +4686,9 @@ impl Bank { ) => programdata_account .data() .get(UpgradeableLoaderState::size_of_programdata_metadata()..) - .ok_or(InstructionError::InvalidAccountData) + .ok_or(Box::new(InstructionError::InvalidAccountData).into()) .and_then(|programdata| { - solana_bpf_loader_program::load_program_from_bytes( - self.feature_set - .is_active(&feature_set::delay_visibility_of_program_deployment::id()), - None, + Self::load_program_from_bytes( &mut load_program_metrics, programdata, program_account.owner(), @@ -4770,60 +4700,28 @@ impl Bank { environments.program_runtime_v1.clone(), reload, ) - }), - - ProgramAccountLoadResult::ProgramOfLoaderV4(program_account, slot) => { - let loaded_program = program_account - .data() - .get(LoaderV4State::program_data_offset()..) - .and_then(|elf_bytes| { - if reload { - // Safety: this is safe because the program is being reloaded in the cache. - unsafe { - LoadedProgram::reload( - &loader_v4::id(), - environments.program_runtime_v2.clone(), - slot, - slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET), - None, - elf_bytes, - program_account.data().len(), - &mut load_program_metrics, - ) - } - } else { - LoadedProgram::new( - &loader_v4::id(), - environments.program_runtime_v2.clone(), - slot, - slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET), - None, - elf_bytes, - program_account.data().len(), - &mut load_program_metrics, - ) - } - .ok() - }) - .unwrap_or(LoadedProgram::new_tombstone( - self.slot, - LoadedProgramType::FailedVerification( - environments.program_runtime_v2.clone(), - ), - )); - Ok(loaded_program) - } + }) + .map_err(|_| (slot, environments.program_runtime_v1.clone())), - ProgramAccountLoadResult::InvalidV4Program => Ok(LoadedProgram::new_tombstone( - self.slot, - LoadedProgramType::FailedVerification(environments.program_runtime_v2.clone()), - )), + ProgramAccountLoadResult::ProgramOfLoaderV4(program_account, slot) => program_account + .data() + .get(LoaderV4State::program_data_offset()..) + .ok_or(Box::new(InstructionError::InvalidAccountData).into()) + .and_then(|elf_bytes| { + Self::load_program_from_bytes( + &mut load_program_metrics, + elf_bytes, + &loader_v4::id(), + program_account.data().len(), + slot, + environments.program_runtime_v2.clone(), + reload, + ) + }) + .map_err(|_| (slot, environments.program_runtime_v2.clone())), } - .unwrap_or_else(|_| { - LoadedProgram::new_tombstone( - self.slot, - LoadedProgramType::FailedVerification(environments.program_runtime_v1.clone()), - ) + .unwrap_or_else(|(slot, env)| { + LoadedProgram::new_tombstone(slot, LoadedProgramType::FailedVerification(env)) }); let mut timings = ExecuteDetailsTimings::default(); @@ -4838,6 +4736,7 @@ impl Bank { loaded_program.ix_usage_counter = AtomicU64::new(recompile.ix_usage_counter.load(Ordering::Relaxed)); } + loaded_program.update_access_slot(self.slot()); Arc::new(loaded_program) } @@ -4874,9 +4773,7 @@ impl Bank { ) -> Option { let mut lamports_sum = 0u128; for i in 0..message.account_keys().len() { - let Some((_, account)) = accounts.get(i) else { - return None; - }; + let (_, account) = accounts.get(i)?; lamports_sum = lamports_sum.checked_add(u128::from(account.lamports()))?; } Some(lamports_sum) @@ -4887,16 +4784,9 @@ impl Bank { let mut transaction_context = TransactionContext::new( transaction_accounts, - self.rent_collector.rent, + self.rent_collector.rent.clone(), compute_budget.max_invoke_stack_height, - if self - .feature_set - .is_active(&feature_set::limit_max_instruction_trace_length::id()) - { - compute_budget.max_instruction_trace_length - } else { - std::usize::MAX - }, + compute_budget.max_instruction_trace_length, ); #[cfg(debug_assertions)] transaction_context.set_signature(tx.signature()); @@ -4922,10 +4812,6 @@ impl Bank { self.slot, programs_loaded_for_tx_batch.environments.clone(), ); - let mut programs_updated_only_for_global_cache = LoadedProgramsForTxBatch::new( - self.slot, - programs_loaded_for_tx_batch.environments.clone(), - ); let mut process_message_time = Measure::start("process_message_time"); let process_result = MessageProcessor::process_message( tx.message(), @@ -4934,7 +4820,6 @@ impl Bank { log_collector.clone(), programs_loaded_for_tx_batch, &mut programs_modified_by_tx, - &mut programs_updated_only_for_global_cache, self.feature_set.clone(), compute_budget, timings, @@ -5034,9 +4919,6 @@ impl Bank { accounts_data_len_delta, }, programs_modified_by_tx: Box::new(programs_modified_by_tx), - programs_updated_only_for_global_cache: Box::new( - programs_updated_only_for_global_cache, - ), } } @@ -5044,7 +4926,7 @@ impl Bank { &self, program_accounts_map: &HashMap, ) -> LoadedProgramsForTxBatch { - let programs_and_slots: Vec<(Pubkey, (LoadedProgramMatchCriteria, u64))> = + let mut missing_programs: Vec<(Pubkey, (LoadedProgramMatchCriteria, u64))> = if self.check_program_modification_slot { program_accounts_map .iter() @@ -5070,50 +4952,55 @@ impl Bank { .collect() }; - let ExtractedPrograms { - loaded: mut loaded_programs_for_txs, - missing, - unloaded, - } = { - // Lock the global cache to figure out which programs need to be loaded - let loaded_programs_cache = self.loaded_programs_cache.read().unwrap(); - loaded_programs_cache.extract(self, programs_and_slots.into_iter()) - }; - - // Load missing programs while global cache is unlocked - let missing_programs: Vec<(Pubkey, Arc)> = missing - .iter() - .map(|(key, count)| { - let program = self.load_program(key, false, None); - program.tx_usage_counter.store(*count, Ordering::Relaxed); - (*key, program) - }) - .collect(); - - // Reload unloaded programs while global cache is unlocked - let unloaded_programs: Vec<(Pubkey, Arc)> = unloaded - .iter() - .map(|(key, count)| { - let program = self.load_program(key, true, None); - program.tx_usage_counter.store(*count, Ordering::Relaxed); - (*key, program) - }) - .collect(); + let mut loaded_programs_for_txs = None; + let mut program_to_store = None; + loop { + let (program_to_load, task_cookie, task_waiter) = { + // Lock the global cache. + let mut loaded_programs_cache = self.loaded_programs_cache.write().unwrap(); + // Initialize our local cache. + if loaded_programs_for_txs.is_none() { + loaded_programs_for_txs = Some(LoadedProgramsForTxBatch::new( + self.slot, + loaded_programs_cache + .get_environments_for_epoch(self.epoch) + .clone(), + )); + } + // Submit our last completed loading task. + if let Some((key, program)) = program_to_store.take() { + loaded_programs_cache.finish_cooperative_loading_task( + self.slot(), + key, + program, + ); + } + // Figure out which program needs to be loaded next. + let program_to_load = loaded_programs_cache.extract( + &mut missing_programs, + loaded_programs_for_txs.as_mut().unwrap(), + ); + let task_waiter = Arc::clone(&loaded_programs_cache.loading_task_waiter); + (program_to_load, task_waiter.cookie(), task_waiter) + // Unlock the global cache again. + }; - // Lock the global cache again to replenish the missing programs - let mut loaded_programs_cache = self.loaded_programs_cache.write().unwrap(); - for (key, program) in missing_programs { - let (_was_occupied, entry) = loaded_programs_cache.replenish(key, program); - // Use the returned entry as that might have been deduplicated globally - loaded_programs_for_txs.replenish(key, entry); - } - for (key, program) in unloaded_programs { - let (_was_occupied, entry) = loaded_programs_cache.replenish(key, program); - // Use the returned entry as that might have been deduplicated globally - loaded_programs_for_txs.replenish(key, entry); + if let Some((key, count)) = program_to_load { + // Load, verify and compile one program. + let program = self.load_program(&key, false, None); + program.tx_usage_counter.store(count, Ordering::Relaxed); + program_to_store = Some((key, program)); + } else if missing_programs.is_empty() { + break; + } else { + // Sleep until the next finish_cooperative_loading_task() call. + // Once a task completes we'll wake up and try to load the + // missing programs inside the tx batch again. + let _new_cookie = task_waiter.wait(task_cookie); + } } - loaded_programs_for_txs + loaded_programs_for_txs.unwrap() } /// Returns a hash map of executable program accounts (program accounts that are not writable @@ -5231,12 +5118,6 @@ impl Bank { ); check_time.stop(); - const PROGRAM_OWNERS: &[Pubkey] = &[ - bpf_loader_upgradeable::id(), - bpf_loader::id(), - bpf_loader_deprecated::id(), - loader_v4::id(), - ]; let mut program_accounts_map = self.filter_executable_program_accounts( &self.ancestors, sanitized_txs, @@ -5254,7 +5135,8 @@ impl Bank { )); let mut load_time = Measure::start("accounts_load"); - let mut loaded_transactions = self.rc.accounts.load_accounts( + let mut loaded_transactions = load_accounts( + &self.rc.accounts.accounts_db, &self.ancestors, sanitized_txs, check_results, @@ -5267,6 +5149,7 @@ impl Bank { self.get_reward_interval(), &program_accounts_map, &programs_loaded_for_tx_batch.borrow(), + self.should_collect_rent(), ); load_time.stop(); @@ -5287,7 +5170,6 @@ impl Bank { Measure::start("compute_budget_process_transaction_time"); let maybe_compute_budget = ComputeBudget::try_from_instructions( tx.message().program_instructions_iter(), - &self.feature_set, ); compute_budget_process_transaction_time.stop(); saturating_add_assign!( @@ -5319,7 +5201,6 @@ impl Bank { if let TransactionExecutionResult::Executed { details, programs_modified_by_tx, - programs_updated_only_for_global_cache: _, } = &result { // Update batch specific cache of the loaded programs with the modifications @@ -5342,7 +5223,10 @@ impl Bank { self.loaded_programs_cache .write() .unwrap() - .sort_and_unload(Percentage::from(SHRINK_LOADED_PROGRAMS_TO_PERCENTAGE)); + .evict_using_2s_random_selection( + Percentage::from(SHRINK_LOADED_PROGRAMS_TO_PERCENTAGE), + self.slot(), + ); debug!( "check: {}us load: {}us execute: {}us txs_len={}", @@ -5550,18 +5434,6 @@ impl Bank { self.update_accounts_data_size_delta_off_chain(data_size_delta); } - /// Set the initial accounts data size - /// NOTE: This fn is *ONLY FOR TESTS* - pub fn set_accounts_data_size_initial_for_tests(&mut self, amount: u64) { - self.accounts_data_size_initial = amount; - } - - /// Update the accounts data size off-chain delta - /// NOTE: This fn is *ONLY FOR TESTS* - pub fn update_accounts_data_size_delta_off_chain_for_tests(&self, amount: i64) { - self.update_accounts_data_size_delta_off_chain(amount) - } - fn filter_program_errors_and_collect_fee( &self, txs: &[SanitizedTransaction], @@ -5696,13 +5568,11 @@ impl Bank { if let TransactionExecutionResult::Executed { details, programs_modified_by_tx, - programs_updated_only_for_global_cache, } = execution_result { if details.status.is_ok() { let mut cache = self.loaded_programs_cache.write().unwrap(); cache.merge(programs_modified_by_tx); - cache.merge(programs_updated_only_for_global_cache); } } } @@ -5797,6 +5667,70 @@ impl Bank { }); } + /// After deserialize, populate skipped rewrites with accounts that would normally + /// have had their data rewritten in this slot due to rent collection (but didn't). + /// + /// This is required when starting up from a snapshot to verify the bank hash. + /// + /// A second usage is from the `bank_to_xxx_snapshot_archive()` functions. These fns call + /// `Bank::rehash()` to handle if the user manually modified any accounts and thus requires + /// calculating the bank hash again. Since calculating the bank hash *takes* the skipped + /// rewrites, this second time will not have any skipped rewrites, and thus the hash would be + /// updated to the wrong value. So, rebuild the skipped rewrites before rehashing. + fn rebuild_skipped_rewrites(&self) { + // If the feature gate to *not* add rent collection rewrites to the bank hash is enabled, + // then do *not* add anything to our skipped_rewrites. + if self.bank_hash_skips_rent_rewrites() { + return; + } + + let (skipped_rewrites, measure_skipped_rewrites) = + measure!(self.calculate_skipped_rewrites()); + info!( + "Rebuilding skipped rewrites of {} accounts{measure_skipped_rewrites}", + skipped_rewrites.len() + ); + + *self.skipped_rewrites.lock().unwrap() = skipped_rewrites; + } + + /// Calculates (and returns) skipped rewrites for this bank + /// + /// Refer to `rebuild_skipped_rewrites()` for more documentation. + /// This implementation is purposely separate to facilitate testing. + /// + /// The key observation is that accounts in Bank::skipped_rewrites are only used IFF the + /// specific account is *not* already in the accounts delta hash. If an account is not in + /// the accounts delta hash, then it means the account was not modified. Since (basically) + /// all accounts are rent exempt, this means (basically) all accounts are unmodified by rent + /// collection. So we just need to load the accounts that would've been checked for rent + /// collection, hash them, and add them to Bank::skipped_rewrites. + /// + /// As of this writing, there are ~350 million acounts on mainnet-beta. + /// Rent collection almost always collects a single slot at a time. + /// So 1 slot of 432,000, of 350 million accounts, is ~800 accounts per slot. + /// Since we haven't started processing anything yet, it should be fast enough to simply + /// load the accounts directly. + /// Empirically, this takes about 3-4 milliseconds. + fn calculate_skipped_rewrites(&self) -> HashMap { + // The returned skipped rewrites may include accounts that were actually *not* skipped! + // (This is safe, as per the fn's documentation above.) + HashMap::from_iter( + self.rent_collection_partitions() + .into_iter() + .map(accounts_partition::pubkey_range_from_partition) + .flat_map(|pubkey_range| { + self.rc + .accounts + .load_to_collect_rent_eagerly(&self.ancestors, pubkey_range) + }) + .map(|(pubkey, account, _slot)| { + let account_hash = AccountsDb::hash_account(&account, &pubkey); + (pubkey, account_hash) + }), + ) + } + fn collect_rent_eagerly(&self) { if self.lazy_rent_collection.load(Relaxed) { return; @@ -5871,11 +5805,6 @@ impl Bank { ); } - #[cfg(test)] - fn restore_old_behavior_for_fragile_tests(&self) { - self.lazy_rent_collection.store(true, Relaxed); - } - fn rent_collection_partitions(&self) -> Vec { if !self.use_fixed_collection_cycle() { // This mode is for production/development/testing. @@ -5905,6 +5834,13 @@ impl Bank { .is_active(&feature_set::skip_rent_rewrites::id()) } + /// true if rent fees should be collected (i.e. disable_rent_fees_collection is NOT enabled) + fn should_collect_rent(&self) -> bool { + !self + .feature_set + .is_active(&feature_set::disable_rent_fees_collection::id()) + } + /// Collect rent from `accounts` /// /// This fn is called inside a parallel loop from `collect_rent_in_partition()`. Avoid adding @@ -5938,15 +5874,25 @@ impl Bank { .is_active(&solana_sdk::feature_set::set_exempt_rent_epoch_max::id()); let mut skipped_rewrites = Vec::default(); for (pubkey, account, _loaded_slot) in accounts.iter_mut() { - let (rent_collected_info, measure) = - measure!(self.rent_collector.collect_from_existing_account( - pubkey, - account, - self.rc.accounts.accounts_db.filler_account_suffix.as_ref(), - set_exempt_rent_epoch_max, - )); - time_collecting_rent_us += measure.as_us(); - + let rent_collected_info = if self.should_collect_rent() { + let (rent_collected_info, measure) = measure!(self + .rent_collector + .collect_from_existing_account(pubkey, account, set_exempt_rent_epoch_max,)); + time_collecting_rent_us += measure.as_us(); + rent_collected_info + } else { + // When rent fee collection is disabled, we won't collect rent for any account. If there + // are any rent paying accounts, their `rent_epoch` won't change either. However, if the + // account itself is rent-exempted but its `rent_epoch` is not u64::MAX, we will set its + // `rent_epoch` to u64::MAX. In such case, the behavior stays the same as before. + if set_exempt_rent_epoch_max + && (account.rent_epoch() != RENT_EXEMPT_RENT_EPOCH + && self.rent_collector.get_rent_due(account) == RentDue::Exempt) + { + account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); + } + CollectedInfo::default() + }; // only store accounts where we collected rent // but get the hash for all these accounts even if collected rent is 0 (= not updated). // Also, there's another subtle side-effect from rewrites: this @@ -6044,8 +5990,6 @@ impl Bank { /// collect rent and update 'account.rent_epoch' as necessary /// store accounts, whether rent was collected or not (depending on whether we skipping rewrites is enabled) /// update bank's rewrites set for all rewrites that were skipped - /// if 'just_rewrites', function will only update bank's rewrites set and not actually store any accounts. - /// This flag is used when restoring from a snapshot to calculate and verify the initial bank's delta hash. fn collect_rent_in_range( &self, partition: Partition, @@ -6110,7 +6054,7 @@ impl Bank { self.skipped_rewrites .lock() .unwrap() - .extend(&mut results.skipped_rewrites.into_iter()); + .extend(results.skipped_rewrites); // We cannot assert here that we collected from all expected keys. // Some accounts may have been topped off or may have had all funds removed and gone to 0 lamports. @@ -6384,7 +6328,7 @@ impl Bank { pub fn process_transaction(&self, tx: &Transaction) -> Result<()> { self.try_process_transactions(std::iter::once(tx))?[0].clone()?; tx.signatures - .get(0) + .first() .map_or(Ok(()), |sig| self.get_signature_status(sig).unwrap()) } @@ -6421,19 +6365,6 @@ impl Bank { execution_results.remove(0) } - /// Process multiple transaction in a single batch. This is used for benches and unit tests. - /// - /// # Panics - /// - /// Panics if any of the transactions do not pass sanitization checks. - #[must_use] - pub fn process_transactions<'a>( - &self, - txs: impl Iterator, - ) -> Vec> { - self.try_process_transactions(txs).unwrap() - } - /// Process multiple transaction in a single batch. This is used for benches and unit tests. /// Short circuits if any of the transactions do not pass sanitization checks. pub fn try_process_transactions<'a>( @@ -6446,16 +6377,6 @@ impl Bank { self.try_process_entry_transactions(txs) } - /// Process entry transactions in a single batch. This is used for benches and unit tests. - /// - /// # Panics - /// - /// Panics if any of the transactions do not pass sanitization checks. - #[must_use] - pub fn process_entry_transactions(&self, txs: Vec) -> Vec> { - self.try_process_entry_transactions(txs).unwrap() - } - /// Process multiple transaction in a single batch. This is used for benches and unit tests. /// Short circuits if any of the transactions do not pass sanitization checks. pub fn try_process_entry_transactions( @@ -6568,14 +6489,6 @@ impl Bank { .flush_accounts_cache(false, Some(self.slot())) } - #[cfg(test)] - pub fn flush_accounts_cache_slot_for_tests(&self) { - self.rc - .accounts - .accounts_db - .flush_accounts_cache_slot_for_tests(self.slot()) - } - pub fn expire_old_recycle_stores(&self) { self.rc.accounts.accounts_db.expire_old_recycle_stores() } @@ -7013,7 +6926,7 @@ impl Bank { .calculate_accounts_delta_hash_internal( slot, ignore, - std::mem::take(&mut self.skipped_rewrites.lock().unwrap()), + self.skipped_rewrites.lock().unwrap().clone(), ); let mut signature_count_buf = [0u8; 8]; @@ -7067,13 +6980,6 @@ impl Bank { /// The epoch accounts hash is hashed into the bank's hash once per epoch at a predefined slot. /// Should it be included in *this* bank? fn should_include_epoch_accounts_hash(&self) -> bool { - if !self - .feature_set - .is_active(&feature_set::epoch_accounts_hash::id()) - { - return false; - } - if !epoch_accounts_hash_utils::is_enabled_this_epoch(self) { return false; } @@ -7158,7 +7064,7 @@ impl Bank { if config.run_in_background { let ancestors = ancestors.clone(); let accounts = Arc::clone(accounts); - let epoch_schedule = *epoch_schedule; + let epoch_schedule = epoch_schedule.clone(); let rent_collector = rent_collector.clone(); let accounts_ = Arc::clone(&accounts); accounts.accounts_db.verify_accounts_hash_in_bg.start(|| { @@ -7232,16 +7138,6 @@ impl Bank { .check_complete() } - /// This is only valid to call from tests. - /// block until initial accounts hash verification has completed - pub fn wait_for_initial_accounts_hash_verification_completed_for_tests(&self) { - self.rc - .accounts - .accounts_db - .verify_accounts_hash_in_bg - .wait_for_complete() - } - /// Get this bank's storages to use for snapshots. /// /// If a base slot is provided, return only the storages that are *higher* than this slot. @@ -7358,7 +7254,7 @@ impl Bank { /// This should only be used for developing purposes. pub fn set_capitalization(&self) -> u64 { let old = self.capitalization(); - // We cannot debug verify the hash calculation here becuase calculate_capitalization will use the index calculation due to callers using the write cache. + // We cannot debug verify the hash calculation here because calculate_capitalization will use the index calculation due to callers using the write cache. // debug_verify only exists as an extra debugging step under the assumption that this code path is only used for tests. But, this is used by ledger-tool create-snapshot // for example. let debug_verify = false; @@ -7481,10 +7377,6 @@ impl Bank { accounts_hash } - pub fn update_accounts_hash_for_tests(&self) -> AccountsHash { - self.update_accounts_hash(CalcAccountsHashDataSource::IndexForTests, false, false) - } - /// Calculate the incremental accounts hash from `base_slot` to `self` pub fn update_incremental_accounts_hash(&self, base_slot: Slot) -> IncrementalAccountsHash { let config = CalcAccountsHashConfig { @@ -7582,22 +7474,9 @@ impl Bank { } }); - let (verified_bank, verify_bank_time_us) = measure_us!({ - let should_verify_bank = !self - .rc - .accounts - .accounts_db - .test_skip_rewrites_but_include_in_bank_hash; - if should_verify_bank { - info!("Verifying bank..."); - let verified = self.verify_hash(); - info!("Verifying bank... Done."); - verified - } else { - info!("Verifying bank... Skipped."); - true - } - }); + info!("Verifying bank..."); + let (verified_bank, verify_bank_time_us) = measure_us!(self.verify_hash()); + info!("Verifying bank... Done."); datapoint_info!( "verify_snapshot_bank", @@ -7879,16 +7758,6 @@ impl Bank { .shrink_ancient_slots(self.epoch_schedule()) } - pub fn no_overflow_rent_distribution_enabled(&self) -> bool { - self.feature_set - .is_active(&feature_set::no_overflow_rent_distribution::id()) - } - - pub fn prevent_rent_paying_rent_recipients(&self) -> bool { - self.feature_set - .is_active(&feature_set::prevent_rent_paying_rent_recipients::id()) - } - pub fn validate_fee_collector_account(&self) -> bool { self.feature_set .is_active(&feature_set::validate_fee_collector_account::id()) @@ -7948,16 +7817,29 @@ impl Bank { caller: ApplyFeatureActivationsCaller, debug_do_not_add_builtins: bool, ) { - use ApplyFeatureActivationsCaller::*; + use ApplyFeatureActivationsCaller as Caller; let allow_new_activations = match caller { - FinishInit => false, - NewFromParent => true, - WarpFromParent => false, + Caller::FinishInit => false, + Caller::NewFromParent => true, + Caller::WarpFromParent => false, }; let (feature_set, new_feature_activations) = self.compute_active_feature_set(allow_new_activations); self.feature_set = Arc::new(feature_set); + // Update activation slot of features in `new_feature_activations` + for feature_id in new_feature_activations.iter() { + if let Some(mut account) = self.get_account_with_fixed_root(feature_id) { + if let Some(mut feature) = feature::from_account(&account) { + feature.activated_at = Some(self.slot()); + if feature::to_account(&feature, &mut account).is_some() { + self.store_account(feature_id, &account); + } + info!("Feature {} activated at slot {}", feature_id, self.slot()); + } + } + } + if new_feature_activations.contains(&feature_set::pico_inflation::id()) { *self.inflation.write().unwrap() = Inflation::pico(); self.fee_rate_governor.burn_percent = 50; // 50% fee burn @@ -8026,38 +7908,27 @@ impl Bank { /// Compute the active feature set based on the current bank state, /// and return it together with the set of newly activated features. - fn compute_active_feature_set( - &mut self, - allow_new_activations: bool, - ) -> (FeatureSet, HashSet) { + fn compute_active_feature_set(&self, include_pending: bool) -> (FeatureSet, HashSet) { let mut active = self.feature_set.active.clone(); let mut inactive = HashSet::new(); - let mut newly_activated = HashSet::new(); + let mut pending = HashSet::new(); let slot = self.slot(); for feature_id in &self.feature_set.inactive { let mut activated = None; - if let Some(mut account) = self.get_account_with_fixed_root(feature_id) { - if let Some(mut feature) = feature::from_account(&account) { + if let Some(account) = self.get_account_with_fixed_root(feature_id) { + if let Some(feature) = feature::from_account(&account) { match feature.activated_at { - None => { - if allow_new_activations { - // Feature has been requested, activate it now - feature.activated_at = Some(slot); - if feature::to_account(&feature, &mut account).is_some() { - self.store_account(feature_id, &account); - } - newly_activated.insert(*feature_id); - activated = Some(slot); - info!("Feature {} activated at slot {}", feature_id, slot); - } + None if include_pending => { + // Feature activation is pending + pending.insert(*feature_id); + activated = Some(slot); } - Some(activation_slot) => { - if slot >= activation_slot { - // Feature is already active - activated = Some(activation_slot); - } + Some(activation_slot) if slot >= activation_slot => { + // Feature has been activated already + activated = Some(activation_slot); } + _ => {} } } } @@ -8068,7 +7939,7 @@ impl Bank { } } - (FeatureSet { active, inactive }, newly_activated) + (FeatureSet { active, inactive }, pending) } fn apply_builtin_program_feature_transitions( @@ -8098,10 +7969,12 @@ impl Bank { } } for precompile in get_precompiles() { - #[allow(clippy::blocks_in_if_conditions)] - if precompile.feature.map_or(false, |ref feature_id| { - self.feature_set.is_active(feature_id) - }) { + let should_add_precompile = precompile + .feature + .as_ref() + .map(|feature_id| self.feature_set.is_active(feature_id)) + .unwrap_or(false); + if should_add_precompile { self.add_precompile(&precompile.program_id); } } @@ -8173,10 +8046,7 @@ impl Bank { /// EAH *must* be included. This means if an EAH calculation is currently in-flight we will /// wait for it to complete. pub fn get_epoch_accounts_hash_to_serialize(&self) -> Option { - let should_get_epoch_accounts_hash = self - .feature_set - .is_active(&feature_set::epoch_accounts_hash::id()) - && epoch_accounts_hash_utils::is_enabled_this_epoch(self) + let should_get_epoch_accounts_hash = epoch_accounts_hash_utils::is_enabled_this_epoch(self) && epoch_accounts_hash_utils::is_in_calculation_window(self); if !should_get_epoch_accounts_hash { return None; @@ -8255,6 +8125,196 @@ impl Bank { } } +#[cfg(feature = "dev-context-only-utils")] +impl Bank { + pub fn wrap_with_bank_forks_for_tests(self) -> (Arc, Arc>) { + let bank_forks = BankForks::new_rw_arc(self); + let bank = bank_forks.read().unwrap().root_bank(); + (bank, bank_forks) + } + + pub fn default_for_tests() -> Self { + let accounts_db = AccountsDb::default_for_tests(); + let accounts = Accounts::new(Arc::new(accounts_db)); + Self::default_with_accounts(accounts) + } + + pub fn new_with_bank_forks_for_tests( + genesis_config: &GenesisConfig, + ) -> (Arc, Arc>) { + let bank = Self::new_for_tests(genesis_config); + bank.wrap_with_bank_forks_for_tests() + } + + pub fn new_for_tests(genesis_config: &GenesisConfig) -> Self { + Self::new_for_tests_with_config(genesis_config, BankTestConfig::default()) + } + + pub fn new_with_mockup_builtin_for_tests( + genesis_config: &GenesisConfig, + program_id: Pubkey, + builtin_function: BuiltinFunctionWithContext, + ) -> (Arc, Arc>) { + let mut bank = Self::new_for_tests(genesis_config); + bank.add_mockup_builtin(program_id, builtin_function); + bank.wrap_with_bank_forks_for_tests() + } + + pub fn new_for_tests_with_config( + genesis_config: &GenesisConfig, + test_config: BankTestConfig, + ) -> Self { + Self::new_with_config_for_tests( + genesis_config, + test_config.secondary_indexes, + AccountShrinkThreshold::default(), + ) + } + + pub fn new_no_wallclock_throttle_for_tests( + genesis_config: &GenesisConfig, + ) -> (Arc, Arc>) { + let mut bank = Self::new_for_tests(genesis_config); + + bank.ns_per_slot = std::u128::MAX; + bank.wrap_with_bank_forks_for_tests() + } + + pub(crate) fn new_with_config_for_tests( + genesis_config: &GenesisConfig, + account_indexes: AccountSecondaryIndexes, + shrink_ratio: AccountShrinkThreshold, + ) -> Self { + Self::new_with_paths_for_tests( + genesis_config, + Arc::new(RuntimeConfig::default()), + Vec::new(), + account_indexes, + shrink_ratio, + ) + } + + pub fn new_with_paths_for_tests( + genesis_config: &GenesisConfig, + runtime_config: Arc, + paths: Vec, + account_indexes: AccountSecondaryIndexes, + shrink_ratio: AccountShrinkThreshold, + ) -> Self { + Self::new_with_paths( + genesis_config, + runtime_config, + paths, + None, + None, + account_indexes, + shrink_ratio, + false, + Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + None, + Arc::default(), + ) + } + + pub fn new_for_benches(genesis_config: &GenesisConfig) -> Self { + Self::new_with_paths_for_benches(genesis_config, Vec::new()) + } + + /// Intended for use by benches only. + /// create new bank with the given config and paths. + pub fn new_with_paths_for_benches(genesis_config: &GenesisConfig, paths: Vec) -> Self { + Self::new_with_paths( + genesis_config, + Arc::::default(), + paths, + None, + None, + AccountSecondaryIndexes::default(), + AccountShrinkThreshold::default(), + false, + Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS), + None, + Arc::default(), + ) + } + + /// Prepare a transaction batch from a list of legacy transactions. Used for tests only. + pub fn prepare_batch_for_tests(&self, txs: Vec) -> TransactionBatch { + let transaction_account_lock_limit = self.get_transaction_account_lock_limit(); + let sanitized_txs = txs + .into_iter() + .map(SanitizedTransaction::from_transaction_for_tests) + .collect::>(); + let lock_results = self + .rc + .accounts + .lock_accounts(sanitized_txs.iter(), transaction_account_lock_limit); + TransactionBatch::new(lock_results, self, Cow::Owned(sanitized_txs)) + } + + /// Set the initial accounts data size + /// NOTE: This fn is *ONLY FOR TESTS* + pub fn set_accounts_data_size_initial_for_tests(&mut self, amount: u64) { + self.accounts_data_size_initial = amount; + } + + /// Update the accounts data size off-chain delta + /// NOTE: This fn is *ONLY FOR TESTS* + pub fn update_accounts_data_size_delta_off_chain_for_tests(&self, amount: i64) { + self.update_accounts_data_size_delta_off_chain(amount) + } + + #[cfg(test)] + fn restore_old_behavior_for_fragile_tests(&self) { + self.lazy_rent_collection.store(true, Relaxed); + } + + /// Process multiple transaction in a single batch. This is used for benches and unit tests. + /// + /// # Panics + /// + /// Panics if any of the transactions do not pass sanitization checks. + #[must_use] + pub fn process_transactions<'a>( + &self, + txs: impl Iterator, + ) -> Vec> { + self.try_process_transactions(txs).unwrap() + } + + /// Process entry transactions in a single batch. This is used for benches and unit tests. + /// + /// # Panics + /// + /// Panics if any of the transactions do not pass sanitization checks. + #[must_use] + pub fn process_entry_transactions(&self, txs: Vec) -> Vec> { + self.try_process_entry_transactions(txs).unwrap() + } + + #[cfg(test)] + pub fn flush_accounts_cache_slot_for_tests(&self) { + self.rc + .accounts + .accounts_db + .flush_accounts_cache_slot_for_tests(self.slot()) + } + + /// This is only valid to call from tests. + /// block until initial accounts hash verification has completed + pub fn wait_for_initial_accounts_hash_verification_completed_for_tests(&self) { + self.rc + .accounts + .accounts_db + .verify_accounts_hash_in_bg + .wait_for_complete() + } + + pub fn update_accounts_hash_for_tests(&self) -> AccountsHash { + self.update_accounts_hash(CalcAccountsHashDataSource::IndexForTests, false, false) + } +} + /// Compute how much an account has changed size. This function is useful when the data size delta /// needs to be computed and passed to an `update_accounts_data_size_delta` function. fn calculate_data_size_delta(old_data_size: usize, new_data_size: usize) -> i64 { diff --git a/runtime/src/bank/bank_hash_details.rs b/runtime/src/bank/bank_hash_details.rs index a1b4fa74f2ff73..9072f6a12f1496 100644 --- a/runtime/src/bank/bank_hash_details.rs +++ b/runtime/src/bank/bank_hash_details.rs @@ -23,9 +23,51 @@ use { #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] pub(crate) struct BankHashDetails { - /// client version + /// The client version pub version: String, + /// The encoding format for account data buffers pub account_data_encoding: String, + /// Bank hash details for a collection of banks + pub bank_hash_details: Vec, +} + +impl BankHashDetails { + pub fn new(bank_hash_details: Vec) -> Self { + Self { + version: solana_version::version!().to_string(), + account_data_encoding: "base64".to_string(), + bank_hash_details, + } + } + + /// Determines a filename given the currently held bank details + pub fn filename(&self) -> Result { + if self.bank_hash_details.is_empty() { + return Err("BankHashDetails does not contains details for any banks".to_string()); + } + // From here on, .unwrap() on .first() and .second() is safe as + // self.bank_hash_details is known to be non-empty + let (first_slot, first_hash) = { + let details = self.bank_hash_details.first().unwrap(); + (details.slot, &details.bank_hash) + }; + + let filename = if self.bank_hash_details.len() == 1 { + format!("{first_slot}-{first_hash}.json") + } else { + let (last_slot, last_hash) = { + let details = self.bank_hash_details.last().unwrap(); + (details.slot, &details.bank_hash) + }; + format!("{first_slot}-{first_hash}_{last_slot}-{last_hash}.json") + }; + Ok(filename) + } +} + +/// The components that go into a bank hash calculation for a single bank/slot. +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub(crate) struct BankHashSlotDetails { pub slot: Slot, pub bank_hash: String, pub parent_bank_hash: String, @@ -35,7 +77,7 @@ pub(crate) struct BankHashDetails { pub accounts: BankHashAccounts, } -impl BankHashDetails { +impl BankHashSlotDetails { pub fn new( slot: Slot, bank_hash: Hash, @@ -46,8 +88,6 @@ impl BankHashDetails { accounts: BankHashAccounts, ) -> Self { Self { - version: solana_version::version!().to_string(), - account_data_encoding: "base64".to_string(), slot, bank_hash: bank_hash.to_string(), parent_bank_hash: parent_bank_hash.to_string(), @@ -59,7 +99,7 @@ impl BankHashDetails { } } -impl TryFrom<&Bank> for BankHashDetails { +impl TryFrom<&Bank> for BankHashSlotDetails { type Error = String; fn try_from(bank: &Bank) -> Result { @@ -99,15 +139,16 @@ impl TryFrom<&Bank> for BankHashDetails { } } -// Wrap the Vec<...> so we can implement custom Serialize/Deserialize traits on the wrapper type +/// Wrapper around a Vec<_> to facilitate custom Serialize/Deserialize trait +/// implementations. #[derive(Clone, Debug, Eq, PartialEq)] pub(crate) struct BankHashAccounts { pub accounts: Vec, } -#[derive(Deserialize, Serialize)] /// Used as an intermediate for serializing and deserializing account fields /// into a human readable format. +#[derive(Deserialize, Serialize)] struct SerdeAccount { pubkey: String, hash: String, @@ -193,37 +234,31 @@ impl<'de> Deserialize<'de> for BankHashAccounts { } } -/// Output the components that comprise bank hash +/// Output the components that comprise the overall bank hash for the supplied `Bank` pub fn write_bank_hash_details_file(bank: &Bank) -> std::result::Result<(), String> { - let details = BankHashDetails::try_from(bank)?; + let slot_details = BankHashSlotDetails::try_from(bank)?; + let details = BankHashDetails::new(vec![slot_details]); - let slot = details.slot; - let hash = &details.bank_hash; - let file_name = format!("{slot}-{hash}.json"); let parent_dir = bank .rc .accounts .accounts_db .get_base_working_path() .join("bank_hash_details"); - let path = parent_dir.join(file_name); + let path = parent_dir.join(details.filename()?); // A file with the same name implies the same hash for this slot. Skip // rewriting a duplicate file in this scenario if !path.exists() { - info!("writing details of bank {} to {}", slot, path.display()); + info!("writing bank hash details file: {}", path.display()); // std::fs::write may fail (depending on platform) if the full directory // path does not exist. So, call std::fs_create_dir_all first. // https://doc.rust-lang.org/std/fs/fn.write.html _ = std::fs::create_dir_all(parent_dir); - let file = std::fs::File::create(&path).map_err(|err| { - format!( - "Unable to create bank hash file at {}: {err}", - path.display() - ) - })?; + let file = std::fs::File::create(&path) + .map_err(|err| format!("Unable to create file at {}: {err}", path.display()))?; serde_json::to_writer_pretty(file, &details) - .map_err(|err| format!("Unable to write bank hash file contents: {err}"))?; + .map_err(|err| format!("Unable to write file at {}: {err}", path.display()))?; } Ok(()) } @@ -232,44 +267,54 @@ pub fn write_bank_hash_details_file(bank: &Bank) -> std::result::Result<(), Stri pub mod tests { use super::*; - #[test] - fn test_serde_bank_hash_details() { - use solana_sdk::hash::hash; + fn build_details(num_slots: usize) -> BankHashDetails { + use solana_sdk::hash::{hash, hashv}; - let slot = 123_456_789; - let signature_count = 314; + let slot_details: Vec<_> = (0..num_slots) + .map(|slot| { + let signature_count = 314; - let account = AccountSharedData::from(Account { - lamports: 123_456_789, - data: vec![0, 9, 1, 8, 2, 7, 3, 6, 4, 5], - owner: Pubkey::new_unique(), - executable: true, - rent_epoch: 123, - }); - let account_pubkey = Pubkey::new_unique(); - let account_hash = AccountHash(hash("account".as_bytes())); - let accounts = BankHashAccounts { - accounts: vec![PubkeyHashAccount { - pubkey: account_pubkey, - hash: account_hash, - account, - }], - }; + let account = AccountSharedData::from(Account { + lamports: 123_456_789, + data: vec![0, 9, 1, 8, 2, 7, 3, 6, 4, 5], + owner: Pubkey::new_unique(), + executable: true, + rent_epoch: 123, + }); + let account_pubkey = Pubkey::new_unique(); + let account_hash = AccountHash(hash("account".as_bytes())); + let accounts = BankHashAccounts { + accounts: vec![PubkeyHashAccount { + pubkey: account_pubkey, + hash: account_hash, + account, + }], + }; - let bank_hash = hash("bank".as_bytes()); - let parent_bank_hash = hash("parent_bank".as_bytes()); - let accounts_delta_hash = hash("accounts_delta".as_bytes()); - let last_blockhash = hash("last_blockhash".as_bytes()); + let bank_hash = hashv(&["bank".as_bytes(), &slot.to_le_bytes()]); + let parent_bank_hash = hash("parent_bank".as_bytes()); + let accounts_delta_hash = hash("accounts_delta".as_bytes()); + let last_blockhash = hash("last_blockhash".as_bytes()); - let bank_hash_details = BankHashDetails::new( - slot, - bank_hash, - parent_bank_hash, - accounts_delta_hash, - signature_count, - last_blockhash, - accounts, - ); + BankHashSlotDetails::new( + slot as Slot, + bank_hash, + parent_bank_hash, + accounts_delta_hash, + signature_count, + last_blockhash, + accounts, + ) + }) + .collect(); + + BankHashDetails::new(slot_details) + } + + #[test] + fn test_serde_bank_hash_details() { + let num_slots = 10; + let bank_hash_details = build_details(num_slots); let serialized_bytes = serde_json::to_vec(&bank_hash_details).unwrap(); let deserialized_bank_hash_details: BankHashDetails = diff --git a/runtime/src/bank/fee_distribution.rs b/runtime/src/bank/fee_distribution.rs index e1d251c0bf478c..0ad70efbf9ca6e 100644 --- a/runtime/src/bank/fee_distribution.rs +++ b/runtime/src/bank/fee_distribution.rs @@ -1,7 +1,8 @@ use { super::Bank, + crate::accounts::account_rent_state::RentState, log::{debug, warn}, - solana_accounts_db::{account_rent_state::RentState, stake_rewards::RewardInfo}, + solana_accounts_db::stake_rewards::RewardInfo, solana_sdk::{ account::{ReadableAccount, WritableAccount}, pubkey::Pubkey, @@ -101,14 +102,14 @@ impl Bank { return Err(DepositFeeError::InvalidAccountOwner); } - let rent = self.rent_collector().rent; - let recipient_pre_rent_state = RentState::from_account(&account, &rent); + let rent = &self.rent_collector().rent; + let recipient_pre_rent_state = RentState::from_account(&account, rent); let distribution = account.checked_add_lamports(fees); if distribution.is_err() { return Err(DepositFeeError::LamportOverflow); } if options.check_rent_paying { - let recipient_post_rent_state = RentState::from_account(&account, &rent); + let recipient_post_rent_state = RentState::from_account(&account, rent); let rent_state_transition_allowed = recipient_post_rent_state.transition_allowed_from(&recipient_pre_rent_state); if !rent_state_transition_allowed { @@ -180,19 +181,14 @@ impl Bank { (staked1, pubkey1).cmp(&(staked2, pubkey2)).reverse() }); - let enforce_fix = self.no_overflow_rent_distribution_enabled(); - let mut rent_distributed_in_initial_round = 0; let validator_rent_shares = validator_stakes .into_iter() .map(|(pubkey, staked)| { - let rent_share = if !enforce_fix { - (((staked * rent_to_be_distributed) as f64) / (total_staked as f64)) as u64 - } else { - (((staked as u128) * (rent_to_be_distributed as u128)) / (total_staked as u128)) - .try_into() - .unwrap() - }; + let rent_share = (((staked as u128) * (rent_to_be_distributed as u128)) + / (total_staked as u128)) + .try_into() + .unwrap(); rent_distributed_in_initial_round += rent_share; (pubkey, rent_share) }) @@ -213,15 +209,14 @@ impl Bank { } else { rent_share }; - if !enforce_fix || rent_to_be_paid > 0 { + if rent_to_be_paid > 0 { let check_account_owner = self.validate_fee_collector_account(); - let check_rent_paying = self.prevent_rent_paying_rent_recipients(); match self.deposit_fees( &pubkey, rent_to_be_paid, DepositFeeOptions { check_account_owner, - check_rent_paying, + check_rent_paying: true, }, ) { Ok(post_balance) => { @@ -259,20 +254,19 @@ impl Bank { ); } - if enforce_fix { - assert_eq!(leftover_lamports, 0); - } else if leftover_lamports != 0 { - warn!( - "There was leftover from rent distribution: {}", - leftover_lamports - ); - self.capitalization.fetch_sub(leftover_lamports, Relaxed); - } + assert_eq!(leftover_lamports, 0); } pub(super) fn distribute_rent_fees(&self) { let total_rent_collected = self.collected_rent.load(Relaxed); + if !self.should_collect_rent() { + if total_rent_collected != 0 { + warn!("Rent fees collection is disabled, yet total rent collected was non zero! Total rent collected: {total_rent_collected}"); + } + return; + } + let (burned_portion, rent_to_be_distributed) = self .rent_collector .rent @@ -300,7 +294,6 @@ pub mod tests { create_genesis_config, create_genesis_config_with_leader, create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs, }, - log::info, solana_sdk::{ account::AccountSharedData, feature_set, native_token::sol_to_lamports, pubkey, rent::Rent, signature::Signer, @@ -578,10 +571,9 @@ pub mod tests { let genesis = create_genesis_config(initial_balance); let pubkey = genesis.mint_keypair.pubkey(); let mut genesis_config = genesis.genesis_config; - let rent = Rent::default(); - genesis_config.rent = rent; // Ensure rent is non-zero, as genesis_utils sets Rent::free by default + genesis_config.rent = Rent::default(); // Ensure rent is non-zero, as genesis_utils sets Rent::free by default let bank = Bank::new_for_tests(&genesis_config); - let min_rent_exempt_balance = rent.minimum_balance(0); + let min_rent_exempt_balance = genesis_config.rent.minimum_balance(0); let deposit_amount = 500; assert!(initial_balance + deposit_amount < min_rent_exempt_balance); @@ -615,50 +607,6 @@ pub mod tests { } } - #[test] - fn test_distribute_rent_to_validators_overflow() { - solana_logger::setup(); - - // These values are taken from the real cluster (testnet) - const RENT_TO_BE_DISTRIBUTED: u64 = 120_525; - const VALIDATOR_STAKE: u64 = 374_999_998_287_840; - - let validator_pubkey = solana_sdk::pubkey::new_rand(); - let mut genesis_config = - create_genesis_config_with_leader(10, &validator_pubkey, VALIDATOR_STAKE) - .genesis_config; - - let bank = Bank::new_for_tests(&genesis_config); - let old_validator_lamports = bank.get_balance(&validator_pubkey); - bank.distribute_rent_to_validators(&bank.vote_accounts(), RENT_TO_BE_DISTRIBUTED); - let new_validator_lamports = bank.get_balance(&validator_pubkey); - assert_eq!( - new_validator_lamports, - old_validator_lamports + RENT_TO_BE_DISTRIBUTED - ); - - genesis_config - .accounts - .remove(&feature_set::no_overflow_rent_distribution::id()) - .unwrap(); - let bank = std::panic::AssertUnwindSafe(Bank::new_for_tests(&genesis_config)); - let old_validator_lamports = bank.get_balance(&validator_pubkey); - let new_validator_lamports = std::panic::catch_unwind(|| { - bank.distribute_rent_to_validators(&bank.vote_accounts(), RENT_TO_BE_DISTRIBUTED); - bank.get_balance(&validator_pubkey) - }); - - if let Ok(new_validator_lamports) = new_validator_lamports { - info!("asserting overflowing incorrect rent distribution"); - assert_ne!( - new_validator_lamports, - old_validator_lamports + RENT_TO_BE_DISTRIBUTED - ); - } else { - info!("NOT-asserting overflowing incorrect rent distribution"); - } - } - #[test] fn test_distribute_rent_to_validators_rent_paying() { solana_logger::setup(); @@ -684,164 +632,129 @@ pub mod tests { let mut genesis_config = genesis_config_info.genesis_config; genesis_config.rent = Rent::default(); // Ensure rent is non-zero, as genesis_utils sets Rent::free by default - for deactivate_feature in [false, true] { - if deactivate_feature { - genesis_config - .accounts - .remove(&feature_set::prevent_rent_paying_rent_recipients::id()) - .unwrap(); - } - let bank = Bank::new_for_tests(&genesis_config); - let rent = bank.rent_collector().rent; - let rent_exempt_minimum = rent.minimum_balance(0); - - // Make one validator have an empty identity account - let mut empty_validator_account = bank - .get_account_with_fixed_root(&empty_validator.node_keypair.pubkey()) - .unwrap(); - empty_validator_account.set_lamports(0); - bank.store_account( - &empty_validator.node_keypair.pubkey(), - &empty_validator_account, - ); + let bank = Bank::new_for_tests(&genesis_config); + let rent = &bank.rent_collector().rent; + let rent_exempt_minimum = rent.minimum_balance(0); - // Make one validator almost rent-exempt, less RENT_PER_VALIDATOR - let mut becomes_rent_exempt_validator_account = bank - .get_account_with_fixed_root(&becomes_rent_exempt_validator.node_keypair.pubkey()) - .unwrap(); - becomes_rent_exempt_validator_account - .set_lamports(rent_exempt_minimum - RENT_PER_VALIDATOR); - bank.store_account( - &becomes_rent_exempt_validator.node_keypair.pubkey(), - &becomes_rent_exempt_validator_account, - ); + // Make one validator have an empty identity account + let mut empty_validator_account = bank + .get_account_with_fixed_root(&empty_validator.node_keypair.pubkey()) + .unwrap(); + empty_validator_account.set_lamports(0); + bank.store_account( + &empty_validator.node_keypair.pubkey(), + &empty_validator_account, + ); - // Make one validator rent-exempt - let mut rent_exempt_validator_account = bank - .get_account_with_fixed_root(&rent_exempt_validator.node_keypair.pubkey()) - .unwrap(); - rent_exempt_validator_account.set_lamports(rent_exempt_minimum); - bank.store_account( - &rent_exempt_validator.node_keypair.pubkey(), - &rent_exempt_validator_account, - ); + // Make one validator almost rent-exempt, less RENT_PER_VALIDATOR + let mut becomes_rent_exempt_validator_account = bank + .get_account_with_fixed_root(&becomes_rent_exempt_validator.node_keypair.pubkey()) + .unwrap(); + becomes_rent_exempt_validator_account + .set_lamports(rent_exempt_minimum - RENT_PER_VALIDATOR); + bank.store_account( + &becomes_rent_exempt_validator.node_keypair.pubkey(), + &becomes_rent_exempt_validator_account, + ); - let get_rent_state = |bank: &Bank, address: &Pubkey| -> RentState { - let account = bank - .get_account_with_fixed_root(address) - .unwrap_or_default(); - RentState::from_account(&account, &rent) - }; + // Make one validator rent-exempt + let mut rent_exempt_validator_account = bank + .get_account_with_fixed_root(&rent_exempt_validator.node_keypair.pubkey()) + .unwrap(); + rent_exempt_validator_account.set_lamports(rent_exempt_minimum); + bank.store_account( + &rent_exempt_validator.node_keypair.pubkey(), + &rent_exempt_validator_account, + ); - // Assert starting RentStates - assert_eq!( - get_rent_state(&bank, &empty_validator.node_keypair.pubkey()), - RentState::Uninitialized - ); - assert_eq!( - get_rent_state(&bank, &rent_paying_validator.node_keypair.pubkey()), - RentState::RentPaying { - lamports: 42, - data_size: 0, - } - ); - assert_eq!( - get_rent_state(&bank, &becomes_rent_exempt_validator.node_keypair.pubkey()), - RentState::RentPaying { - lamports: rent_exempt_minimum - RENT_PER_VALIDATOR, - data_size: 0, - } - ); - assert_eq!( - get_rent_state(&bank, &rent_exempt_validator.node_keypair.pubkey()), - RentState::RentExempt - ); + let get_rent_state = |bank: &Bank, address: &Pubkey| -> RentState { + let account = bank + .get_account_with_fixed_root(address) + .unwrap_or_default(); + RentState::from_account(&account, rent) + }; - let old_empty_validator_lamports = - bank.get_balance(&empty_validator.node_keypair.pubkey()); - let old_rent_paying_validator_lamports = - bank.get_balance(&rent_paying_validator.node_keypair.pubkey()); - let old_becomes_rent_exempt_validator_lamports = - bank.get_balance(&becomes_rent_exempt_validator.node_keypair.pubkey()); - let old_rent_exempt_validator_lamports = - bank.get_balance(&rent_exempt_validator.node_keypair.pubkey()); - - bank.distribute_rent_to_validators(&bank.vote_accounts(), TOTAL_RENT); - - let new_empty_validator_lamports = - bank.get_balance(&empty_validator.node_keypair.pubkey()); - let new_rent_paying_validator_lamports = - bank.get_balance(&rent_paying_validator.node_keypair.pubkey()); - let new_becomes_rent_exempt_validator_lamports = - bank.get_balance(&becomes_rent_exempt_validator.node_keypair.pubkey()); - let new_rent_exempt_validator_lamports = - bank.get_balance(&rent_exempt_validator.node_keypair.pubkey()); - - // Assert ending balances; rent should be withheld if test is active and ending RentState - // is RentPaying, ie. empty_validator and rent_paying_validator - assert_eq!( - if deactivate_feature { - old_empty_validator_lamports + RENT_PER_VALIDATOR - } else { - old_empty_validator_lamports - }, - new_empty_validator_lamports - ); + // Assert starting RentStates + assert_eq!( + get_rent_state(&bank, &empty_validator.node_keypair.pubkey()), + RentState::Uninitialized + ); + assert_eq!( + get_rent_state(&bank, &rent_paying_validator.node_keypair.pubkey()), + RentState::RentPaying { + lamports: 42, + data_size: 0, + } + ); + assert_eq!( + get_rent_state(&bank, &becomes_rent_exempt_validator.node_keypair.pubkey()), + RentState::RentPaying { + lamports: rent_exempt_minimum - RENT_PER_VALIDATOR, + data_size: 0, + } + ); + assert_eq!( + get_rent_state(&bank, &rent_exempt_validator.node_keypair.pubkey()), + RentState::RentExempt + ); - assert_eq!( - if deactivate_feature { - old_rent_paying_validator_lamports + RENT_PER_VALIDATOR - } else { - old_rent_paying_validator_lamports - }, - new_rent_paying_validator_lamports - ); + let old_empty_validator_lamports = bank.get_balance(&empty_validator.node_keypair.pubkey()); + let old_rent_paying_validator_lamports = + bank.get_balance(&rent_paying_validator.node_keypair.pubkey()); + let old_becomes_rent_exempt_validator_lamports = + bank.get_balance(&becomes_rent_exempt_validator.node_keypair.pubkey()); + let old_rent_exempt_validator_lamports = + bank.get_balance(&rent_exempt_validator.node_keypair.pubkey()); - assert_eq!( - old_becomes_rent_exempt_validator_lamports + RENT_PER_VALIDATOR, - new_becomes_rent_exempt_validator_lamports - ); + bank.distribute_rent_to_validators(&bank.vote_accounts(), TOTAL_RENT); - assert_eq!( - old_rent_exempt_validator_lamports + RENT_PER_VALIDATOR, - new_rent_exempt_validator_lamports - ); + let new_empty_validator_lamports = bank.get_balance(&empty_validator.node_keypair.pubkey()); + let new_rent_paying_validator_lamports = + bank.get_balance(&rent_paying_validator.node_keypair.pubkey()); + let new_becomes_rent_exempt_validator_lamports = + bank.get_balance(&becomes_rent_exempt_validator.node_keypair.pubkey()); + let new_rent_exempt_validator_lamports = + bank.get_balance(&rent_exempt_validator.node_keypair.pubkey()); - // Assert ending RentStates - assert_eq!( - if deactivate_feature { - RentState::RentPaying { - lamports: RENT_PER_VALIDATOR, - data_size: 0, - } - } else { - RentState::Uninitialized - }, - get_rent_state(&bank, &empty_validator.node_keypair.pubkey()), - ); - assert_eq!( - if deactivate_feature { - RentState::RentPaying { - lamports: old_rent_paying_validator_lamports + RENT_PER_VALIDATOR, - data_size: 0, - } - } else { - RentState::RentPaying { - lamports: old_rent_paying_validator_lamports, - data_size: 0, - } - }, - get_rent_state(&bank, &rent_paying_validator.node_keypair.pubkey()), - ); - assert_eq!( - RentState::RentExempt, - get_rent_state(&bank, &becomes_rent_exempt_validator.node_keypair.pubkey()), - ); - assert_eq!( - RentState::RentExempt, - get_rent_state(&bank, &rent_exempt_validator.node_keypair.pubkey()), - ); - } + // Assert ending balances; rent should be withheld if test is active and ending RentState + // is RentPaying, ie. empty_validator and rent_paying_validator + assert_eq!(old_empty_validator_lamports, new_empty_validator_lamports); + + assert_eq!( + old_rent_paying_validator_lamports, + new_rent_paying_validator_lamports + ); + + assert_eq!( + old_becomes_rent_exempt_validator_lamports + RENT_PER_VALIDATOR, + new_becomes_rent_exempt_validator_lamports + ); + + assert_eq!( + old_rent_exempt_validator_lamports + RENT_PER_VALIDATOR, + new_rent_exempt_validator_lamports + ); + + // Assert ending RentStates + assert_eq!( + RentState::Uninitialized, + get_rent_state(&bank, &empty_validator.node_keypair.pubkey()), + ); + assert_eq!( + RentState::RentPaying { + lamports: old_rent_paying_validator_lamports, + data_size: 0, + }, + get_rent_state(&bank, &rent_paying_validator.node_keypair.pubkey()), + ); + assert_eq!( + RentState::RentExempt, + get_rent_state(&bank, &becomes_rent_exempt_validator.node_keypair.pubkey()), + ); + assert_eq!( + RentState::RentExempt, + get_rent_state(&bank, &rent_exempt_validator.node_keypair.pubkey()), + ); } #[test] diff --git a/runtime/src/bank/metrics.rs b/runtime/src/bank/metrics.rs index ccf8c4837761db..fd2c19473931d8 100644 --- a/runtime/src/bank/metrics.rs +++ b/runtime/src/bank/metrics.rs @@ -167,7 +167,6 @@ pub(crate) struct RewardsStoreMetrics { pub(crate) post_capitalization: u64, } -#[allow(dead_code)] pub(crate) fn report_partitioned_reward_metrics(bank: &Bank, timings: RewardsStoreMetrics) { datapoint_info!( "bank-partitioned_epoch_rewards_credit", diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index e1746c52b79f75..ca6c6ee6adebd8 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -3,10 +3,10 @@ mod tests { use { crate::{ bank::{ - epoch_accounts_hash_utils, test_utils as bank_test_utils, Bank, BankTestConfig, - EpochRewardStatus, StartBlockHeightAndRewards, + epoch_accounts_hash_utils, test_utils as bank_test_utils, Bank, EpochRewardStatus, + StartBlockHeightAndRewards, }, - genesis_utils::{activate_all_features, activate_feature}, + genesis_utils::activate_all_features, runtime_config::RuntimeConfig, serde_snapshot::{ reserialize_bank_with_new_accounts_hash, BankIncrementalSnapshotPersistence, @@ -34,7 +34,6 @@ mod tests { }, solana_sdk::{ epoch_schedule::EpochSchedule, - feature_set, genesis_config::create_genesis_config, hash::Hash, pubkey::Pubkey, @@ -100,7 +99,6 @@ mod tests { ) { solana_logger::setup(); let (mut genesis_config, _) = create_genesis_config(500); - activate_feature(&mut genesis_config, feature_set::epoch_accounts_hash::id()); genesis_config.epoch_schedule = EpochSchedule::custom(400, 400, false); let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); let eah_start_slot = epoch_accounts_hash_utils::calculation_start(&bank0); @@ -342,10 +340,7 @@ mod tests { for epoch_reward_status_active in [None, Some(vec![]), Some(vec![sample_rewards])] { let (genesis_config, _) = create_genesis_config(500); - let bank0 = Arc::new(Bank::new_for_tests_with_config( - &genesis_config, - BankTestConfig::default(), - )); + let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); bank0.squash(); let mut bank = Bank::new_from_parent(bank0.clone(), &Pubkey::default(), 1); @@ -535,10 +530,7 @@ mod tests { solana_logger::setup(); let (genesis_config, _) = create_genesis_config(500); - let bank0 = Arc::new(Bank::new_for_tests_with_config( - &genesis_config, - BankTestConfig::default(), - )); + let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); bank0.squash(); let mut bank = Bank::new_from_parent(bank0.clone(), &Pubkey::default(), 1); add_root_and_flush_write_cache(&bank0); diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 50d68c6cd82288..b31706b6ecec70 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -16,6 +16,7 @@ use { create_genesis_config_with_leader, create_genesis_config_with_vote_accounts, genesis_sysvar_and_builtin_program_lamports, GenesisConfigInfo, ValidatorVoteKeypairs, }, + snapshot_bank_utils, snapshot_utils, status_cache::MAX_CACHE_ENTRIES, }, assert_matches::assert_matches, @@ -25,7 +26,7 @@ use { rayon::ThreadPoolBuilder, serde::{Deserialize, Serialize}, solana_accounts_db::{ - accounts::{AccountAddressFilter, RewardInterval}, + accounts::AccountAddressFilter, accounts_db::{AccountShrinkThreshold, DEFAULT_ACCOUNTS_SHRINK_RATIO}, accounts_index::{ AccountIndex, AccountSecondaryIndexes, IndexKey, ScanConfig, ScanError, ITER_BATCH_SIZE, @@ -127,6 +128,7 @@ use { thread::Builder, time::{Duration, Instant}, }, + tempfile::TempDir, test_case::test_case, }; @@ -154,6 +156,20 @@ impl VoteReward { } } +fn new_bank_from_parent_with_bank_forks( + bank_forks: &RwLock, + parent: Arc, + collector_id: &Pubkey, + slot: Slot, +) -> Arc { + let bank = Bank::new_from_parent(parent, collector_id, slot); + bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler() +} + #[test] fn test_race_register_tick_freeze() { solana_logger::setup(); @@ -204,7 +220,6 @@ fn new_execution_result( accounts_data_len_delta: 0, }, programs_modified_by_tx: Box::::default(), - programs_updated_only_for_global_cache: Box::::default(), } } @@ -277,13 +292,14 @@ pub(crate) fn create_simple_test_bank(lamports: u64) -> Bank { Bank::new_for_tests(&genesis_config) } -fn create_simple_test_arc_bank(lamports: u64) -> Arc { - Arc::new(create_simple_test_bank(lamports)) +fn create_simple_test_arc_bank(lamports: u64) -> (Arc, Arc>) { + let bank = create_simple_test_bank(lamports); + bank.wrap_with_bank_forks_for_tests() } #[test] fn test_bank_block_height() { - let bank0 = create_simple_test_arc_bank(1); + let bank0 = create_simple_test_arc_bank(1).0; assert_eq!(bank0.block_height(), 0); let bank1 = Arc::new(new_from_parent(bank0)); assert_eq!(bank1.block_height(), 1); @@ -421,11 +437,21 @@ fn test_credit_debit_rent_no_side_effect_on_hash() { &genesis_config.poh_config.target_tick_duration, genesis_config.ticks_per_slot, ) as u64; - let root_bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let bank = Bank::new_from_parent(root_bank, &Pubkey::default(), slot); + let (root_bank, bank_forks_1) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let bank = new_bank_from_parent_with_bank_forks( + bank_forks_1.as_ref(), + root_bank, + &Pubkey::default(), + slot, + ); - let root_bank_2 = Arc::new(Bank::new_for_tests(&genesis_config)); - let bank_with_success_txs = Bank::new_from_parent(root_bank_2, &Pubkey::default(), slot); + let (root_bank_2, bank_forks_2) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let bank_with_success_txs = new_bank_from_parent_with_bank_forks( + bank_forks_2.as_ref(), + root_bank_2, + &Pubkey::default(), + slot, + ); assert_eq!(bank.last_blockhash(), genesis_config.hash()); @@ -449,7 +475,6 @@ fn test_credit_debit_rent_no_side_effect_on_hash() { let expected_rent = bank.rent_collector().collect_from_existing_account( &keypairs[4].pubkey(), &mut account_copy, - None, set_exempt_rent_epoch_max, ); assert_eq!(expected_rent.rent_amount, too_few_lamports); @@ -616,7 +641,12 @@ fn store_accounts_for_rent_test( } } -fn create_child_bank_for_rent_test(root_bank: Arc, genesis_config: &GenesisConfig) -> Bank { +fn create_child_bank_for_rent_test( + root_bank: Arc, + genesis_config: &GenesisConfig, + bank_forks: &RwLock, + mock_builtin: Option<(Pubkey, BuiltinFunctionWithContext)>, +) -> Arc { let mut bank = Bank::new_from_parent( root_bank, &Pubkey::default(), @@ -627,7 +657,14 @@ fn create_child_bank_for_rent_test(root_bank: Arc, genesis_config: &Genesi ) as u64, ); bank.rent_collector.slots_per_year = 421_812.0; - bank + if let Some((program_id, builtin_function)) = mock_builtin { + bank.add_mockup_builtin(program_id, builtin_function); + } + bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler() } /// if asserter returns true, check the capitalization @@ -869,6 +906,7 @@ fn test_rent_distribution() { // Enable rent collection bank.rent_collector.epoch = 5; bank.rent_collector.slots_per_year = 192.0; + let bank = bank.wrap_with_bank_forks_for_tests().0; let payer = Keypair::new(); let payer_account = AccountSharedData::new(400, 0, &system_program::id()); @@ -979,13 +1017,16 @@ fn test_rent_exempt_executable_account() { let (mut genesis_config, mint_keypair) = create_genesis_config(100_000); genesis_config.rent = rent_with_exemption_threshold(1000.0); - let root_bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let bank = create_child_bank_for_rent_test(root_bank, &genesis_config); + let (root_bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let bank = + create_child_bank_for_rent_test(root_bank, &genesis_config, bank_forks.as_ref(), None); let account_pubkey = solana_sdk::pubkey::new_rand(); let account_balance = 1; let mut account = AccountSharedData::new(account_balance, 0, &solana_sdk::pubkey::new_rand()); account.set_executable(true); + account.set_owner(bpf_loader_upgradeable::id()); + account.set_data(create_executable_meta(account.owner()).to_vec()); bank.store_account(&account_pubkey, &account); let transfer_lamports = 1; @@ -1024,10 +1065,10 @@ fn test_rent_complex() { MockInstruction::Deduction => { instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .checked_add_lamports(1)?; + .checked_add_lamports(1, &invoke_context.feature_set)?; instruction_context .try_borrow_instruction_account(transaction_context, 2)? - .checked_sub_lamports(1)?; + .checked_sub_lamports(1, &invoke_context.feature_set)?; Ok(()) } } @@ -1044,13 +1085,16 @@ fn test_rent_complex() { genesis_config.rent = rent_with_exemption_threshold(1000.0); - let root_bank = Bank::new_for_tests(&genesis_config); + let (root_bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); // until we completely transition to the eager rent collection, - // we must ensure lazy rent collection doens't get broken! + // we must ensure lazy rent collection doesn't get broken! root_bank.restore_old_behavior_for_fragile_tests(); - let root_bank = Arc::new(root_bank); - let mut bank = create_child_bank_for_rent_test(root_bank, &genesis_config); - bank.add_mockup_builtin(mock_program_id, MockBuiltin::vm); + let bank = create_child_bank_for_rent_test( + root_bank, + &genesis_config, + bank_forks.as_ref(), + Some((mock_program_id, MockBuiltin::vm)), + ); assert_eq!(bank.last_blockhash(), genesis_config.hash()); @@ -1233,7 +1277,7 @@ fn test_rent_collection_partitions(bank: &Bank) -> Vec { #[test] fn test_rent_eager_across_epoch_without_gap() { - let mut bank = create_simple_test_arc_bank(1); + let mut bank = create_simple_test_arc_bank(1).0; assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 32)]); bank = Arc::new(new_from_parent(bank)); @@ -1579,13 +1623,17 @@ impl Bank { } } -#[test] -fn test_rent_eager_collect_rent_in_partition() { +#[test_case(true; "enable rent fees collection")] +#[test_case(false; "disable rent fees collection")] +fn test_rent_eager_collect_rent_in_partition(should_collect_rent: bool) { solana_logger::setup(); let (mut genesis_config, _mint_keypair) = create_genesis_config(1_000_000); for feature_id in FeatureSet::default().inactive { - if feature_id != solana_sdk::feature_set::set_exempt_rent_epoch_max::id() { + if feature_id != solana_sdk::feature_set::set_exempt_rent_epoch_max::id() + && (!should_collect_rent + || feature_id != solana_sdk::feature_set::disable_rent_fees_collection::id()) + { activate_feature(&mut genesis_config, feature_id); } } @@ -1594,12 +1642,19 @@ fn test_rent_eager_collect_rent_in_partition() { let rent_due_pubkey = solana_sdk::pubkey::new_rand(); let rent_exempt_pubkey = solana_sdk::pubkey::new_rand(); let mut bank = Arc::new(Bank::new_for_tests(&genesis_config)); + + assert_eq!(should_collect_rent, bank.should_collect_rent()); + let zero_lamports = 0; let little_lamports = 1234; let large_lamports = 123_456_789; // genesis_config.epoch_schedule.slots_per_epoch == 432_000 and is unsuitable for this test let some_slot = MINIMUM_SLOTS_PER_EPOCH; // chosen to cause epoch to be +1 - let rent_collected = 1; // this is a function of 'some_slot' + let rent_collected = if bank.should_collect_rent() { + 1 /* this is a function of 'some_slot' */ + } else { + 0 + }; bank.store_account( &zero_lamport_pubkey, @@ -1649,9 +1704,9 @@ fn test_rent_eager_collect_rent_in_partition() { bank.get_account(&rent_due_pubkey).unwrap().lamports(), little_lamports - rent_collected ); - assert_eq!( - bank.get_account(&rent_due_pubkey).unwrap().rent_epoch(), - current_epoch + 1 + assert!( + bank.get_account(&rent_due_pubkey).unwrap().rent_epoch() == current_epoch + 1 + || !bank.should_collect_rent() ); assert_eq!( bank.get_account(&rent_exempt_pubkey).unwrap().lamports(), @@ -1677,7 +1732,11 @@ fn test_rent_eager_collect_rent_in_partition() { ); } -fn new_from_parent_next_epoch(parent: Arc, epochs: Epoch) -> Bank { +fn new_from_parent_next_epoch( + parent: Arc, + bank_forks: &RwLock, + epochs: Epoch, +) -> Arc { let mut slot = parent.slot(); let mut epoch = parent.epoch(); for _ in 0..epochs { @@ -1685,7 +1744,7 @@ fn new_from_parent_next_epoch(parent: Arc, epochs: Epoch) -> Bank { epoch = parent.epoch_schedule().get_epoch(slot); } - Bank::new_from_parent(parent, &Pubkey::default(), slot) + new_bank_from_parent_with_bank_forks(bank_forks, parent, &Pubkey::default(), slot) } #[test] @@ -1696,17 +1755,21 @@ fn test_collect_rent_from_accounts() { for skip_rewrites in [false, true] { let zero_lamport_pubkey = Pubkey::from([0; 32]); - let genesis_bank = create_simple_test_arc_bank(100000); + let (genesis_bank, bank_forks) = create_simple_test_arc_bank(100000); let mut first_bank = new_from_parent(genesis_bank.clone()); if skip_rewrites { first_bank.activate_feature(&feature_set::skip_rent_rewrites::id()); } - let first_bank = Arc::new(first_bank); + let first_bank = bank_forks + .write() + .unwrap() + .insert(first_bank) + .clone_without_scheduler(); let first_slot = 1; assert_eq!(first_slot, first_bank.slot()); let epoch_delta = 4; - let later_bank = Arc::new(new_from_parent_next_epoch(first_bank, epoch_delta)); // a bank a few epochs in the future + let later_bank = new_from_parent_next_epoch(first_bank, bank_forks.as_ref(), epoch_delta); // a bank a few epochs in the future let later_slot = later_bank.slot(); assert!(later_bank.epoch() == genesis_bank.epoch() + epoch_delta); @@ -1806,6 +1869,7 @@ fn test_bank_update_vote_stake_rewards() { bank._load_vote_and_stake_accounts(&thread_pool, null_tracer()) }); } + #[cfg(test)] fn check_bank_update_vote_stake_rewards(load_vote_and_stake_accounts: F) where @@ -2067,11 +2131,9 @@ fn test_purge_empty_accounts() { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); let amount = genesis_config.rent.minimum_balance(0); - let parent = Arc::new(Bank::new_for_tests_with_config( - &genesis_config, - BankTestConfig::default(), - )); - let mut bank = parent; + let (mut bank, bank_forks) = + Bank::new_for_tests(&genesis_config).wrap_with_bank_forks_for_tests(); + for _ in 0..10 { let blockhash = bank.last_blockhash(); let pubkey = solana_sdk::pubkey::new_rand(); @@ -2079,7 +2141,7 @@ fn test_purge_empty_accounts() { bank.process_transaction(&tx).unwrap(); bank.freeze(); bank.squash(); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); } bank.freeze(); @@ -2089,13 +2151,13 @@ fn test_purge_empty_accounts() { bank.clean_accounts_for_tests(); assert_eq!(bank.update_accounts_hash_for_tests(), hash); - let bank0 = Arc::new(new_from_parent(bank.clone())); + let bank0 = new_from_parent_with_fork_next_slot(bank.clone(), bank_forks.as_ref()); let blockhash = bank.last_blockhash(); let keypair = Keypair::new(); let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), amount, blockhash); bank0.process_transaction(&tx).unwrap(); - let bank1 = Arc::new(new_from_parent(bank0.clone())); + let bank1 = new_from_parent_with_fork_next_slot(bank0.clone(), bank_forks.as_ref()); let pubkey = solana_sdk::pubkey::new_rand(); let blockhash = bank.last_blockhash(); let tx = system_transaction::transfer(&keypair, &pubkey, amount, blockhash); @@ -2162,7 +2224,7 @@ fn test_purge_empty_accounts() { fn test_two_payments_to_one_party() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); let pubkey = solana_sdk::pubkey::new_rand(); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let amount = genesis_config.rent.minimum_balance(0); assert_eq!(bank.last_blockhash(), genesis_config.hash()); @@ -2180,7 +2242,7 @@ fn test_one_source_two_tx_one_batch() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); let key1 = solana_sdk::pubkey::new_rand(); let key2 = solana_sdk::pubkey::new_rand(); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let amount = genesis_config.rent.minimum_balance(0); assert_eq!(bank.last_blockhash(), genesis_config.hash()); @@ -2210,7 +2272,7 @@ fn test_one_tx_two_out_atomic_fail() { let (genesis_config, mint_keypair) = create_genesis_config(amount); let key1 = solana_sdk::pubkey::new_rand(); let key2 = solana_sdk::pubkey::new_rand(); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let instructions = system_instruction::transfer_many( &mint_keypair.pubkey(), &[(key1, amount), (key2, amount)], @@ -2231,7 +2293,7 @@ fn test_one_tx_two_out_atomic_pass() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); let key1 = solana_sdk::pubkey::new_rand(); let key2 = solana_sdk::pubkey::new_rand(); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let amount = genesis_config.rent.minimum_balance(0); let instructions = system_instruction::transfer_many( &mint_keypair.pubkey(), @@ -2253,7 +2315,7 @@ fn test_one_tx_two_out_atomic_pass() { fn test_detect_failed_duplicate_transactions() { let (mut genesis_config, mint_keypair) = create_genesis_config(10_000); genesis_config.fee_rate_governor = FeeRateGovernor::new(5_000, 0); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let dest = Keypair::new(); @@ -2282,7 +2344,7 @@ fn test_detect_failed_duplicate_transactions() { fn test_account_not_found() { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(0); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let keypair = Keypair::new(); assert_eq!( bank.transfer( @@ -2300,7 +2362,7 @@ fn test_account_not_found() { fn test_insufficient_funds() { let mint_amount = sol_to_lamports(1.); let (genesis_config, mint_keypair) = create_genesis_config(mint_amount); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let pubkey = solana_sdk::pubkey::new_rand(); let amount = genesis_config.rent.minimum_balance(0); bank.transfer(amount, &mint_keypair, &pubkey).unwrap(); @@ -2328,7 +2390,7 @@ fn test_insufficient_funds() { fn test_executed_transaction_count_post_bank_transaction_count_fix() { let mint_amount = sol_to_lamports(1.); let (genesis_config, mint_keypair) = create_genesis_config(mint_amount); - let bank = Bank::new_for_tests(&genesis_config); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let pubkey = solana_sdk::pubkey::new_rand(); let amount = genesis_config.rent.minimum_balance(0); bank.transfer(amount, &mint_keypair, &pubkey).unwrap(); @@ -2346,8 +2408,8 @@ fn test_executed_transaction_count_post_bank_transaction_count_fix() { assert_eq!(bank.executed_transaction_count(), 2); assert_eq!(bank.transaction_error_count(), 1); - let bank = Arc::new(bank); - let bank2 = Bank::new_from_parent( + let bank2 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), bank, &Pubkey::default(), genesis_config.epoch_schedule.first_normal_slot, @@ -2371,7 +2433,7 @@ fn test_executed_transaction_count_post_bank_transaction_count_fix() { fn test_transfer_to_newb() { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let amount = genesis_config.rent.minimum_balance(0); let pubkey = solana_sdk::pubkey::new_rand(); bank.transfer(amount, &mint_keypair, &pubkey).unwrap(); @@ -2382,7 +2444,7 @@ fn test_transfer_to_newb() { fn test_transfer_to_sysvar() { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); let normal_pubkey = solana_sdk::pubkey::new_rand(); @@ -2397,7 +2459,7 @@ fn test_transfer_to_sysvar() { assert_eq!(bank.get_balance(&normal_pubkey), amount); assert_eq!(bank.get_balance(&sysvar_pubkey), 1_169_280); - let bank = Arc::new(new_from_parent(bank)); + let bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); assert_eq!(bank.get_balance(&normal_pubkey), amount); assert_eq!(bank.get_balance(&sysvar_pubkey), 1_169_280); } @@ -2483,7 +2545,7 @@ fn test_bank_tx_fee() { let (expected_fee_collected, expected_fee_burned) = genesis_config.fee_rate_governor.burn(expected_fee_paid); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let capitalization = bank.capitalization(); @@ -2532,7 +2594,7 @@ fn test_bank_tx_fee() { ); // Verify that an InstructionError collects fees, too - let bank = Arc::new(Bank::new_from_parent(bank, &leader, 1)); + let bank = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank, &leader, 1); let mut tx = system_transaction::transfer(&mint_keypair, &key, 1, bank.last_blockhash()); // Create a bogus instruction to system_program to cause an instruction error tx.message.instructions[0].data[0] = 40; @@ -2589,14 +2651,12 @@ fn test_bank_tx_compute_unit_fee() { .create_fee_calculator() .lamports_per_signature, &FeeStructure::default(), - false, - true, ); let (expected_fee_collected, expected_fee_burned) = genesis_config.fee_rate_governor.burn(expected_fee_paid); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let capitalization = bank.capitalization(); @@ -2644,7 +2704,7 @@ fn test_bank_tx_compute_unit_fee() { ); // Verify that an InstructionError collects fees, too - let bank = Arc::new(Bank::new_from_parent(bank, &leader, 1)); + let bank = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank, &leader, 1); let mut tx = system_transaction::transfer(&mint_keypair, &key, 1, bank.last_blockhash()); // Create a bogus instruction to system_program to cause an instruction error tx.message.instructions[0].data[0] = 40; @@ -2694,19 +2754,19 @@ fn test_bank_blockhash_fee_structure() { .target_lamports_per_signature = 5000; genesis_config.fee_rate_governor.target_signatures_per_slot = 0; - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); goto_end_of_slot(bank.clone()); let cheap_blockhash = bank.last_blockhash(); let cheap_lamports_per_signature = bank.get_lamports_per_signature(); assert_eq!(cheap_lamports_per_signature, 0); - let bank = Arc::new(Bank::new_from_parent(bank, &leader, 1)); + let bank = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank, &leader, 1); goto_end_of_slot(bank.clone()); let expensive_blockhash = bank.last_blockhash(); let expensive_lamports_per_signature = bank.get_lamports_per_signature(); assert!(cheap_lamports_per_signature < expensive_lamports_per_signature); - let bank = Bank::new_from_parent(bank, &leader, 2); + let bank = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank, &leader, 2); // Send a transfer using cheap_blockhash let key = solana_sdk::pubkey::new_rand(); @@ -2746,19 +2806,19 @@ fn test_bank_blockhash_compute_unit_fee_structure() { .target_lamports_per_signature = 1000; genesis_config.fee_rate_governor.target_signatures_per_slot = 1; - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); goto_end_of_slot(bank.clone()); let cheap_blockhash = bank.last_blockhash(); let cheap_lamports_per_signature = bank.get_lamports_per_signature(); assert_eq!(cheap_lamports_per_signature, 0); - let bank = Arc::new(Bank::new_from_parent(bank, &leader, 1)); + let bank = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank, &leader, 1); goto_end_of_slot(bank.clone()); let expensive_blockhash = bank.last_blockhash(); let expensive_lamports_per_signature = bank.get_lamports_per_signature(); assert!(cheap_lamports_per_signature < expensive_lamports_per_signature); - let bank = Bank::new_from_parent(bank, &leader, 2); + let bank = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank, &leader, 2); // Send a transfer using cheap_blockhash let key = solana_sdk::pubkey::new_rand(); @@ -2770,8 +2830,6 @@ fn test_bank_blockhash_compute_unit_fee_structure() { &SanitizedMessage::try_from(Message::new(&[], Some(&Pubkey::new_unique()))).unwrap(), cheap_lamports_per_signature, &FeeStructure::default(), - false, - true, ); assert_eq!( bank.get_balance(&mint_keypair.pubkey()), @@ -2788,8 +2846,6 @@ fn test_bank_blockhash_compute_unit_fee_structure() { &SanitizedMessage::try_from(Message::new(&[], Some(&Pubkey::new_unique()))).unwrap(), expensive_lamports_per_signature, &FeeStructure::default(), - false, - true, ); assert_eq!( bank.get_balance(&mint_keypair.pubkey()), @@ -2901,8 +2957,6 @@ fn test_filter_program_errors_and_collect_compute_unit_fee() { .create_fee_calculator() .lamports_per_signature, &FeeStructure::default(), - false, - true, ) * 2 ) .0 @@ -2914,7 +2968,7 @@ fn test_filter_program_errors_and_collect_compute_unit_fee() { #[test] fn test_debits_before_credits() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(2.)); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let keypair = Keypair::new(); let tx0 = system_transaction::transfer( &mint_keypair, @@ -2944,7 +2998,7 @@ fn test_readonly_accounts() { mint_keypair, .. } = create_genesis_config_with_leader(500, &solana_sdk::pubkey::new_rand(), 0); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let vote_pubkey0 = solana_sdk::pubkey::new_rand(); let vote_pubkey1 = solana_sdk::pubkey::new_rand(); @@ -3016,7 +3070,7 @@ fn test_readonly_accounts() { #[test] fn test_interleaving_locks() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let alice = Keypair::new(); let bob = Keypair::new(); let amount = genesis_config.rent.minimum_balance(0); @@ -3153,7 +3207,7 @@ fn test_bank_invalid_account_index() { fn test_bank_pay_to_self() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); let key1 = Keypair::new(); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let amount = genesis_config.rent.minimum_balance(0); bank.transfer(amount, &mint_keypair, &key1.pubkey()) @@ -3174,6 +3228,11 @@ fn new_from_parent(parent: Arc) -> Bank { Bank::new_from_parent(parent, &collector_id, slot) } +fn new_from_parent_with_fork_next_slot(parent: Arc, fork: &RwLock) -> Arc { + let slot = parent.slot() + 1; + new_bank_from_parent_with_bank_forks(fork, parent, &Pubkey::default(), slot) +} + /// Verify that the parent's vector is computed correctly #[test] fn test_bank_parents() { @@ -3188,7 +3247,7 @@ fn test_bank_parents() { #[test] fn test_tx_already_processed() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let key1 = Keypair::new(); let mut tx = system_transaction::transfer( @@ -3223,13 +3282,13 @@ fn test_tx_already_processed() { fn test_bank_parent_already_processed() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); let key1 = Keypair::new(); - let parent = Arc::new(Bank::new_for_tests(&genesis_config)); + let (parent, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); let tx = system_transaction::transfer(&mint_keypair, &key1.pubkey(), amount, genesis_config.hash()); assert_eq!(parent.process_transaction(&tx), Ok(())); - let bank = new_from_parent(parent); + let bank = new_from_parent_with_fork_next_slot(parent, bank_forks.as_ref()); assert_eq!( bank.process_transaction(&tx), Err(TransactionError::AlreadyProcessed) @@ -3242,13 +3301,13 @@ fn test_bank_parent_account_spend() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.0)); let key1 = Keypair::new(); let key2 = Keypair::new(); - let parent = Arc::new(Bank::new_for_tests(&genesis_config)); + let (parent, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); let tx = system_transaction::transfer(&mint_keypair, &key1.pubkey(), amount, genesis_config.hash()); assert_eq!(parent.process_transaction(&tx), Ok(())); - let bank = new_from_parent(parent.clone()); + let bank = new_from_parent_with_fork_next_slot(parent.clone(), bank_forks.as_ref()); let tx = system_transaction::transfer(&key1, &key2.pubkey(), amount, genesis_config.hash()); assert_eq!(bank.process_transaction(&tx), Ok(())); assert_eq!(parent.get_signature_status(&tx.signatures[0]), None); @@ -3257,8 +3316,8 @@ fn test_bank_parent_account_spend() { #[test] fn test_bank_hash_internal_state() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); - let bank0 = Bank::new_for_tests(&genesis_config); - let bank1 = Bank::new_for_tests(&genesis_config); + let (bank0, _) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let (bank1, bank_forks_1) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); let initial_state = bank0.hash_internal_state(); assert_eq!(bank1.hash_internal_state(), initial_state); @@ -3270,8 +3329,7 @@ fn test_bank_hash_internal_state() { assert_eq!(bank0.hash_internal_state(), bank1.hash_internal_state()); // Checkpointing should always result in a new state - let bank1 = Arc::new(bank1); - let bank2 = new_from_parent(bank1.clone()); + let bank2 = new_from_parent_with_fork_next_slot(bank1.clone(), bank_forks_1.as_ref()); assert_ne!(bank0.hash_internal_state(), bank2.hash_internal_state()); let pubkey2 = solana_sdk::pubkey::new_rand(); @@ -3289,7 +3347,7 @@ fn test_bank_hash_internal_state_verify() { for pass in 0..3 { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); - let bank0 = Bank::new_for_tests(&genesis_config); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); let pubkey = solana_sdk::pubkey::new_rand(); @@ -3297,9 +3355,13 @@ fn test_bank_hash_internal_state_verify() { bank0.transfer(amount, &mint_keypair, &pubkey).unwrap(); let bank0_state = bank0.hash_internal_state(); - let bank0 = Arc::new(bank0); // Checkpointing should result in a new state while freezing the parent - let bank2 = Bank::new_from_parent(bank0.clone(), &solana_sdk::pubkey::new_rand(), 1); + let bank2 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank0.clone(), + &solana_sdk::pubkey::new_rand(), + 1, + ); assert_ne!(bank0_state, bank2.hash_internal_state()); // Checkpointing should modify the checkpoint's state when freezed assert_ne!(bank0_state, bank0.hash_internal_state()); @@ -3313,7 +3375,12 @@ fn test_bank_hash_internal_state_verify() { bank2.update_accounts_hash_for_tests(); assert!(bank2.verify_accounts_hash(None, VerifyAccountsHashConfig::default_for_test())); } - let bank3 = Bank::new_from_parent(bank0.clone(), &solana_sdk::pubkey::new_rand(), 2); + let bank3 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank0.clone(), + &solana_sdk::pubkey::new_rand(), + 2, + ); assert_eq!(bank0_state, bank0.hash_internal_state()); if pass == 0 { // this relies on us having set the bank hash in the pass==0 if above @@ -3354,7 +3421,7 @@ fn test_verify_snapshot_bank() { solana_logger::setup(); let pubkey = solana_sdk::pubkey::new_rand(); let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; bank.transfer( genesis_config.rent.minimum_balance(0), &mint_keypair, @@ -3377,9 +3444,14 @@ fn test_bank_hash_internal_state_same_account_different_fork() { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); let amount = genesis_config.rent.minimum_balance(0); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let initial_state = bank0.hash_internal_state(); - let bank1 = Bank::new_from_parent(bank0.clone(), &Pubkey::default(), 1); + let bank1 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank0.clone(), + &Pubkey::default(), + 1, + ); assert_ne!(bank1.hash_internal_state(), initial_state); info!("transfer bank1"); @@ -3389,7 +3461,8 @@ fn test_bank_hash_internal_state_same_account_different_fork() { info!("transfer bank2"); // bank2 should not hash the same as bank1 - let bank2 = Bank::new_from_parent(bank0, &Pubkey::default(), 2); + let bank2 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank0, &Pubkey::default(), 2); bank2.transfer(amount, &mint_keypair, &pubkey).unwrap(); assert_ne!(bank2.hash_internal_state(), initial_state); assert_ne!(bank1.hash_internal_state(), bank2.hash_internal_state()); @@ -3408,8 +3481,8 @@ fn test_hash_internal_state_genesis() { fn test_hash_internal_state_order() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); let amount = genesis_config.rent.minimum_balance(0); - let bank0 = Bank::new_for_tests(&genesis_config); - let bank1 = Bank::new_for_tests(&genesis_config); + let bank0 = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let bank1 = Bank::new_with_bank_forks_for_tests(&genesis_config).0; assert_eq!(bank0.hash_internal_state(), bank1.hash_internal_state()); let key0 = solana_sdk::pubkey::new_rand(); let key1 = solana_sdk::pubkey::new_rand(); @@ -3427,7 +3500,7 @@ fn test_hash_internal_state_error() { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); let amount = genesis_config.rent.minimum_balance(0); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let key0 = solana_sdk::pubkey::new_rand(); bank.transfer(amount, &mint_keypair, &key0).unwrap(); let orig = bank.hash_internal_state(); @@ -3469,7 +3542,7 @@ fn test_bank_squash() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(2.)); let key1 = Keypair::new(); let key2 = Keypair::new(); - let parent = Arc::new(Bank::new_for_tests(&genesis_config)); + let (parent, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); let tx_transfer_mint_to_1 = @@ -3485,7 +3558,7 @@ fn test_bank_squash() { ); trace!("new from parent"); - let bank = new_from_parent(parent.clone()); + let bank = new_from_parent_with_fork_next_slot(parent.clone(), bank_forks.as_ref()); trace!("done new from parent"); assert_eq!( bank.get_signature_status(&tx_transfer_mint_to_1.signatures[0]), @@ -3538,7 +3611,7 @@ fn test_bank_squash() { #[test] fn test_bank_get_account_in_parent_after_squash() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); - let parent = Arc::new(Bank::new_for_tests(&genesis_config)); + let parent = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let amount = genesis_config.rent.minimum_balance(0); let key1 = Keypair::new(); @@ -3556,7 +3629,7 @@ fn test_bank_get_account_in_parent_after_squash() { fn test_bank_get_account_in_parent_after_squash2() { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); let key1 = Keypair::new(); @@ -3566,15 +3639,31 @@ fn test_bank_get_account_in_parent_after_squash2() { .unwrap(); assert_eq!(bank0.get_balance(&key1.pubkey()), amount); - let bank1 = Arc::new(Bank::new_from_parent(bank0.clone(), &Pubkey::default(), 1)); + let bank1 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank0.clone(), + &Pubkey::default(), + 1, + ); bank1 .transfer(3 * amount, &mint_keypair, &key1.pubkey()) .unwrap(); - let bank2 = Arc::new(Bank::new_from_parent(bank0.clone(), &Pubkey::default(), 2)); + let bank2 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank0.clone(), + &Pubkey::default(), + 2, + ); bank2 .transfer(2 * amount, &mint_keypair, &key1.pubkey()) .unwrap(); - let bank3 = Arc::new(Bank::new_from_parent(bank1.clone(), &Pubkey::default(), 3)); + + let bank3 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank1.clone(), + &Pubkey::default(), + 3, + ); bank1.squash(); // This picks up the values from 1 which is the highest root: @@ -3586,16 +3675,27 @@ fn test_bank_get_account_in_parent_after_squash2() { bank3.squash(); assert_eq!(bank1.get_balance(&key1.pubkey()), 4 * amount); - let bank4 = Arc::new(Bank::new_from_parent(bank3.clone(), &Pubkey::default(), 4)); + let bank4 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank3.clone(), + &Pubkey::default(), + 4, + ); bank4 .transfer(4 * amount, &mint_keypair, &key1.pubkey()) .unwrap(); assert_eq!(bank4.get_balance(&key1.pubkey()), 8 * amount); assert_eq!(bank3.get_balance(&key1.pubkey()), 4 * amount); bank4.squash(); - let bank5 = Arc::new(Bank::new_from_parent(bank4.clone(), &Pubkey::default(), 5)); + let bank5 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank4.clone(), + &Pubkey::default(), + 5, + ); bank5.squash(); - let bank6 = Arc::new(Bank::new_from_parent(bank5, &Pubkey::default(), 6)); + let bank6 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank5, &Pubkey::default(), 6); bank6.squash(); // This picks up the values from 4 which is the highest root: @@ -3613,7 +3713,7 @@ fn test_bank_get_account_modified_since_parent_with_fixed_root() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); let amount = genesis_config.rent.minimum_balance(0); - let bank1 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank1, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); bank1.transfer(amount, &mint_keypair, &pubkey).unwrap(); let result = bank1.get_account_modified_since_parent_with_fixed_root(&pubkey); assert!(result.is_some()); @@ -3621,7 +3721,12 @@ fn test_bank_get_account_modified_since_parent_with_fixed_root() { assert_eq!(account.lamports(), amount); assert_eq!(slot, 0); - let bank2 = Arc::new(Bank::new_from_parent(bank1.clone(), &Pubkey::default(), 1)); + let bank2 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank1.clone(), + &Pubkey::default(), + 1, + ); assert!(bank2 .get_account_modified_since_parent_with_fixed_root(&pubkey) .is_none()); @@ -3639,7 +3744,8 @@ fn test_bank_get_account_modified_since_parent_with_fixed_root() { bank1.squash(); - let bank3 = Bank::new_from_parent(bank2, &Pubkey::default(), 3); + let bank3 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank2, &Pubkey::default(), 3); assert_eq!( None, bank3.get_account_modified_since_parent_with_fixed_root(&pubkey) @@ -3664,10 +3770,7 @@ fn test_bank_update_sysvar_account() { for feature_id in FeatureSet::default().inactive { activate_feature(&mut genesis_config, feature_id); } - let bank1 = Arc::new(Bank::new_for_tests_with_config( - &genesis_config, - BankTestConfig::default(), - )); + let bank1 = Arc::new(Bank::new_for_tests(&genesis_config)); if pass == 0 { add_root_and_flush_write_cache(&bank1); assert_eq!(bank1.calculate_capitalization(true), bank1.capitalization()); @@ -3865,7 +3968,7 @@ fn test_bank_epoch_vote_accounts() { // epoch_stakes are a snapshot at the leader_schedule_slot_offset boundary // in the prior epoch (0 in this case) assert_eq!( - leader_stake.stake(0, None, None), + leader_stake.stake(0, &StakeHistory::default(), None), vote_accounts.unwrap().get(&leader_vote_account).unwrap().0 ); @@ -3881,7 +3984,7 @@ fn test_bank_epoch_vote_accounts() { assert!(child.epoch_vote_accounts(epoch).is_some()); assert_eq!( - leader_stake.stake(child.epoch(), None, None), + leader_stake.stake(child.epoch(), &StakeHistory::default(), None), child .epoch_vote_accounts(epoch) .unwrap() @@ -3899,7 +4002,7 @@ fn test_bank_epoch_vote_accounts() { ); assert!(child.epoch_vote_accounts(epoch).is_some()); assert_eq!( - leader_stake.stake(child.epoch(), None, None), + leader_stake.stake(child.epoch(), &StakeHistory::default(), None), child .epoch_vote_accounts(epoch) .unwrap() @@ -3946,7 +4049,7 @@ fn test_bank_get_slots_in_epoch() { #[test] fn test_is_delta_true() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.0)); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let key1 = Keypair::new(); let tx_transfer_mint_to_1 = system_transaction::transfer( &mint_keypair, @@ -3970,7 +4073,7 @@ fn test_is_delta_true() { #[test] fn test_is_empty() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.0)); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank0 = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let key1 = Keypair::new(); // The zeroth bank is empty becasue there are no transactions @@ -3990,16 +4093,22 @@ fn test_is_empty() { #[test] fn test_bank_inherit_tx_count() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.0)); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); // Bank 1 - let bank1 = Arc::new(Bank::new_from_parent( + let bank1 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), bank0.clone(), &solana_sdk::pubkey::new_rand(), 1, - )); + ); // Bank 2 - let bank2 = Bank::new_from_parent(bank0.clone(), &solana_sdk::pubkey::new_rand(), 2); + let bank2 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank0.clone(), + &solana_sdk::pubkey::new_rand(), + 2, + ); // transfer a token assert_eq!( @@ -4028,7 +4137,12 @@ fn test_bank_inherit_tx_count() { assert_eq!(bank1.transaction_count(), 1); assert_eq!(bank1.non_vote_transaction_count_since_restart(), 1); - let bank6 = Bank::new_from_parent(bank1.clone(), &solana_sdk::pubkey::new_rand(), 3); + let bank6 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank1.clone(), + &solana_sdk::pubkey::new_rand(), + 3, + ); assert_eq!(bank1.transaction_count(), 1); assert_eq!(bank1.non_vote_transaction_count_since_restart(), 1); assert_eq!(bank6.transaction_count(), 1); @@ -4064,7 +4178,7 @@ fn test_bank_vote_accounts() { mint_keypair, .. } = create_genesis_config_with_leader(500, &solana_sdk::pubkey::new_rand(), 1); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let vote_accounts = bank.vote_accounts(); assert_eq!(vote_accounts.len(), 1); // bootstrap validator has @@ -4121,7 +4235,7 @@ fn test_bank_cloned_stake_delegations() { 123_000_000_000, ); genesis_config.rent = Rent::default(); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let stake_delegations = bank.stakes_cache.stakes().stake_delegations().clone(); assert_eq!(stake_delegations.len(), 1); // bootstrap validator has @@ -4198,7 +4312,7 @@ fn test_bank_fees_account() { #[test] fn test_is_delta_with_no_committables() { let (genesis_config, mint_keypair) = create_genesis_config(8000); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; bank.is_delta.store(false, Relaxed); let keypair1 = Keypair::new(); @@ -4414,7 +4528,7 @@ fn test_get_filtered_indexed_accounts() { #[test] fn test_status_cache_ancestors() { solana_logger::setup(); - let parent = create_simple_test_arc_bank(500); + let parent = create_simple_test_arc_bank(500).0; let bank1 = Arc::new(new_from_parent(parent)); let mut bank = bank1; for _ in 0..MAX_CACHE_ENTRIES * 2 { @@ -4475,6 +4589,7 @@ fn test_add_builtin() { bank.last_blockhash(), ); + let bank = bank.wrap_with_bank_forks_for_tests().0; assert_eq!( bank.process_transaction(&transaction), Err(TransactionError::InstructionError( @@ -4491,7 +4606,7 @@ fn test_add_duplicate_static_program() { mint_keypair, .. } = create_genesis_config_with_leader(500, &solana_sdk::pubkey::new_rand(), 0); - let bank = Bank::new_for_tests(&genesis_config); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); declare_process_instruction!(MockBuiltin, 1, |_invoke_context| { Err(InstructionError::Custom(42)) @@ -4521,10 +4636,15 @@ fn test_add_duplicate_static_program() { ); let slot = bank.slot().saturating_add(1); - let mut bank = Bank::new_from_parent(Arc::new(bank), &Pubkey::default(), slot); + let mut bank = Bank::new_from_parent(bank, &Pubkey::default(), slot); + bank.add_mockup_builtin(solana_vote_program::id(), MockBuiltin::vm); + let bank = bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler(); let vote_loader_account = bank.get_account(&solana_vote_program::id()).unwrap(); - bank.add_mockup_builtin(solana_vote_program::id(), MockBuiltin::vm); let new_vote_loader_account = bank.get_account(&solana_vote_program::id()).unwrap(); // Vote loader account should not be updated since it was included in the genesis config. assert_eq!(vote_loader_account.data(), new_vote_loader_account.data()); @@ -4633,7 +4753,7 @@ fn test_add_instruction_processor_for_existing_unrelated_accounts() { #[allow(deprecated)] #[test] fn test_recent_blockhashes_sysvar() { - let mut bank = create_simple_test_arc_bank(500); + let mut bank = create_simple_test_arc_bank(500).0; for i in 1..5 { let bhq_account = bank.get_account(&sysvar::recent_blockhashes::id()).unwrap(); let recent_blockhashes = @@ -4651,7 +4771,7 @@ fn test_recent_blockhashes_sysvar() { #[allow(deprecated)] #[test] fn test_blockhash_queue_sysvar_consistency() { - let bank = create_simple_test_arc_bank(100_000); + let bank = create_simple_test_arc_bank(100_000).0; goto_end_of_slot(bank.clone()); let bhq_account = bank.get_account(&sysvar::recent_blockhashes::id()).unwrap(); @@ -4713,7 +4833,7 @@ fn test_banks_leak() { solana_logger::setup(); let (mut genesis_config, _) = create_genesis_config(100_000_000_000_000); add_lotsa_stake_accounts(&mut genesis_config); - let mut bank = std::sync::Arc::new(Bank::new_for_tests(&genesis_config)); + let (mut bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut num_banks = 0; let pid = std::process::id(); #[cfg(not(target_os = "linux"))] @@ -4722,7 +4842,7 @@ fn test_banks_leak() { ); loop { num_banks += 1; - bank = std::sync::Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); if num_banks % 100 == 0 { #[cfg(target_os = "linux")] { @@ -4790,6 +4910,8 @@ fn nonce_setup( Ok((custodian_keypair, nonce_keypair)) } +type NonceSetup = (Arc, Keypair, Keypair, Keypair, Arc>); + fn setup_nonce_with_bank( supply_lamports: u64, mut genesis_cfg_fn: F, @@ -4797,7 +4919,7 @@ fn setup_nonce_with_bank( nonce_lamports: u64, nonce_authority: Option, feature_set: FeatureSet, -) -> Result<(Arc, Keypair, Keypair, Keypair)> +) -> Result where F: FnMut(&mut GenesisConfig), { @@ -4806,13 +4928,13 @@ where genesis_cfg_fn(&mut genesis_config); let mut bank = Bank::new_for_tests(&genesis_config); bank.feature_set = Arc::new(feature_set); - let mut bank = Arc::new(bank); + let (mut bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); // Banks 0 and 1 have no fees, wait two blocks before // initializing our nonce accounts for _ in 0..2 { goto_end_of_slot(bank.clone()); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); } let (custodian_keypair, nonce_keypair) = nonce_setup( @@ -4826,9 +4948,15 @@ where // The setup nonce is not valid to be used until the next bank // so wait one more block goto_end_of_slot(bank.clone()); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); - Ok((bank, mint_keypair, custodian_keypair, nonce_keypair)) + Ok(( + bank, + mint_keypair, + custodian_keypair, + nonce_keypair, + bank_forks, + )) } impl Bank { @@ -4841,7 +4969,7 @@ impl Bank { #[test] fn test_check_transaction_for_nonce_ok() { - let (bank, _mint_keypair, custodian_keypair, nonce_keypair) = setup_nonce_with_bank( + let (bank, _mint_keypair, custodian_keypair, nonce_keypair, _) = setup_nonce_with_bank( 10_000_000, |_| {}, 5_000_000, @@ -4875,7 +5003,7 @@ fn test_check_transaction_for_nonce_ok() { #[test] fn test_check_transaction_for_nonce_not_nonce_fail() { - let (bank, _mint_keypair, custodian_keypair, nonce_keypair) = setup_nonce_with_bank( + let (bank, _mint_keypair, custodian_keypair, nonce_keypair, _) = setup_nonce_with_bank( 10_000_000, |_| {}, 5_000_000, @@ -4907,7 +5035,7 @@ fn test_check_transaction_for_nonce_not_nonce_fail() { #[test] fn test_check_transaction_for_nonce_missing_ix_pubkey_fail() { - let (bank, _mint_keypair, custodian_keypair, nonce_keypair) = setup_nonce_with_bank( + let (bank, _mint_keypair, custodian_keypair, nonce_keypair, _) = setup_nonce_with_bank( 10_000_000, |_| {}, 5_000_000, @@ -4940,7 +5068,7 @@ fn test_check_transaction_for_nonce_missing_ix_pubkey_fail() { #[test] fn test_check_transaction_for_nonce_nonce_acc_does_not_exist_fail() { - let (bank, _mint_keypair, custodian_keypair, nonce_keypair) = setup_nonce_with_bank( + let (bank, _mint_keypair, custodian_keypair, nonce_keypair, _) = setup_nonce_with_bank( 10_000_000, |_| {}, 5_000_000, @@ -4974,7 +5102,7 @@ fn test_check_transaction_for_nonce_nonce_acc_does_not_exist_fail() { #[test] fn test_check_transaction_for_nonce_bad_tx_hash_fail() { - let (bank, _mint_keypair, custodian_keypair, nonce_keypair) = setup_nonce_with_bank( + let (bank, _mint_keypair, custodian_keypair, nonce_keypair, _) = setup_nonce_with_bank( 10_000_000, |_| {}, 5_000_000, @@ -5005,7 +5133,7 @@ fn test_check_transaction_for_nonce_bad_tx_hash_fail() { #[test] fn test_assign_from_nonce_account_fail() { - let bank = create_simple_test_arc_bank(100_000_000); + let bank = create_simple_test_arc_bank(100_000_000).0; let nonce = Keypair::new(); let nonce_account = AccountSharedData::new_data( 42_424_242, @@ -5031,7 +5159,7 @@ fn test_assign_from_nonce_account_fail() { fn test_nonce_must_be_advanceable() { let mut bank = create_simple_test_bank(100_000_000); bank.feature_set = Arc::new(FeatureSet::all_enabled()); - let bank = Arc::new(bank); + let bank = bank.wrap_with_bank_forks_for_tests().0; let nonce_keypair = Keypair::new(); let nonce_authority = nonce_keypair.pubkey(); let durable_nonce = DurableNonce::from_blockhash(&bank.last_blockhash()); @@ -5058,15 +5186,16 @@ fn test_nonce_must_be_advanceable() { #[test] fn test_nonce_transaction() { - let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair) = setup_nonce_with_bank( - 10_000_000, - |_| {}, - 5_000_000, - 250_000, - None, - FeatureSet::all_enabled(), - ) - .unwrap(); + let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair, bank_forks) = + setup_nonce_with_bank( + 10_000_000, + |_| {}, + 5_000_000, + 250_000, + None, + FeatureSet::all_enabled(), + ) + .unwrap(); let alice_keypair = Keypair::new(); let alice_pubkey = alice_keypair.pubkey(); let custodian_pubkey = custodian_keypair.pubkey(); @@ -5081,7 +5210,7 @@ fn test_nonce_transaction() { /* Kick nonce hash off the blockhash_queue */ for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { goto_end_of_slot(bank.clone()); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); } /* Expect a non-Nonce transfer to fail */ @@ -5150,7 +5279,7 @@ fn test_nonce_transaction() { /* Kick nonce hash off the blockhash_queue */ for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { goto_end_of_slot(bank.clone()); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); } let nonce_tx = Transaction::new_signed_with_payer( @@ -5192,7 +5321,7 @@ fn test_nonce_transaction() { #[test] fn test_nonce_transaction_with_tx_wide_caps() { let feature_set = FeatureSet::all_enabled(); - let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair) = + let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair, bank_forks) = setup_nonce_with_bank(10_000_000, |_| {}, 5_000_000, 250_000, None, feature_set).unwrap(); let alice_keypair = Keypair::new(); let alice_pubkey = alice_keypair.pubkey(); @@ -5208,7 +5337,7 @@ fn test_nonce_transaction_with_tx_wide_caps() { /* Kick nonce hash off the blockhash_queue */ for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { goto_end_of_slot(bank.clone()); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); } /* Expect a non-Nonce transfer to fail */ @@ -5277,7 +5406,7 @@ fn test_nonce_transaction_with_tx_wide_caps() { /* Kick nonce hash off the blockhash_queue */ for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { goto_end_of_slot(bank.clone()); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); } let nonce_tx = Transaction::new_signed_with_payer( @@ -5319,15 +5448,16 @@ fn test_nonce_transaction_with_tx_wide_caps() { #[test] fn test_nonce_authority() { solana_logger::setup(); - let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair) = setup_nonce_with_bank( - 10_000_000, - |_| {}, - 5_000_000, - 250_000, - None, - FeatureSet::all_enabled(), - ) - .unwrap(); + let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair, bank_forks) = + setup_nonce_with_bank( + 10_000_000, + |_| {}, + 5_000_000, + 250_000, + None, + FeatureSet::all_enabled(), + ) + .unwrap(); let alice_keypair = Keypair::new(); let alice_pubkey = alice_keypair.pubkey(); let custodian_pubkey = custodian_keypair.pubkey(); @@ -5345,7 +5475,7 @@ fn test_nonce_authority() { for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { goto_end_of_slot(bank.clone()); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); } let nonce_tx = Transaction::new_signed_with_payer( @@ -5380,15 +5510,16 @@ fn test_nonce_authority() { fn test_nonce_payer() { solana_logger::setup(); let nonce_starting_balance = 250_000; - let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair) = setup_nonce_with_bank( - 10_000_000, - |_| {}, - 5_000_000, - nonce_starting_balance, - None, - FeatureSet::all_enabled(), - ) - .unwrap(); + let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair, bank_forks) = + setup_nonce_with_bank( + 10_000_000, + |_| {}, + 5_000_000, + nonce_starting_balance, + None, + FeatureSet::all_enabled(), + ) + .unwrap(); let alice_keypair = Keypair::new(); let alice_pubkey = alice_keypair.pubkey(); let custodian_pubkey = custodian_keypair.pubkey(); @@ -5403,7 +5534,7 @@ fn test_nonce_payer() { for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { goto_end_of_slot(bank.clone()); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); } let nonce_tx = Transaction::new_signed_with_payer( @@ -5445,15 +5576,16 @@ fn test_nonce_payer_tx_wide_cap() { let nonce_starting_balance = 250_000 + FeeStructure::default().compute_fee_bins.last().unwrap().fee; let feature_set = FeatureSet::all_enabled(); - let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair) = setup_nonce_with_bank( - 10_000_000, - |_| {}, - 5_000_000, - nonce_starting_balance, - None, - feature_set, - ) - .unwrap(); + let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair, bank_forks) = + setup_nonce_with_bank( + 10_000_000, + |_| {}, + 5_000_000, + nonce_starting_balance, + None, + feature_set, + ) + .unwrap(); let alice_keypair = Keypair::new(); let alice_pubkey = alice_keypair.pubkey(); let custodian_pubkey = custodian_keypair.pubkey(); @@ -5468,7 +5600,7 @@ fn test_nonce_payer_tx_wide_cap() { for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { goto_end_of_slot(bank.clone()); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); } let nonce_tx = Transaction::new_signed_with_payer( @@ -5511,7 +5643,7 @@ fn test_nonce_fee_calculator_updates() { genesis_config.rent.lamports_per_byte_year = 0; let mut bank = Bank::new_for_tests(&genesis_config); bank.feature_set = Arc::new(FeatureSet::all_enabled()); - let mut bank = Arc::new(bank); + let (mut bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); // Deliberately use bank 0 to initialize nonce account, so that nonce account fee_calculator indicates 0 fees let (custodian_keypair, nonce_keypair) = @@ -5536,7 +5668,7 @@ fn test_nonce_fee_calculator_updates() { // Kick nonce hash off the blockhash_queue for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { goto_end_of_slot(bank.clone()); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); } // Nonce transfer @@ -5579,7 +5711,7 @@ fn test_nonce_fee_calculator_updates_tx_wide_cap() { genesis_config.rent.lamports_per_byte_year = 0; let mut bank = Bank::new_for_tests(&genesis_config); bank.feature_set = Arc::new(FeatureSet::all_enabled()); - let mut bank = Arc::new(bank); + let (mut bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); // Deliberately use bank 0 to initialize nonce account, so that nonce account fee_calculator indicates 0 fees let (custodian_keypair, nonce_keypair) = @@ -5604,7 +5736,7 @@ fn test_nonce_fee_calculator_updates_tx_wide_cap() { // Kick nonce hash off the blockhash_queue for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { goto_end_of_slot(bank.clone()); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); } // Nonce transfer @@ -5643,15 +5775,16 @@ fn test_nonce_fee_calculator_updates_tx_wide_cap() { #[test] fn test_check_ro_durable_nonce_fails() { - let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair) = setup_nonce_with_bank( - 10_000_000, - |_| {}, - 5_000_000, - 250_000, - None, - FeatureSet::all_enabled(), - ) - .unwrap(); + let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair, bank_forks) = + setup_nonce_with_bank( + 10_000_000, + |_| {}, + 5_000_000, + 250_000, + None, + FeatureSet::all_enabled(), + ) + .unwrap(); let custodian_pubkey = custodian_keypair.pubkey(); let nonce_pubkey = nonce_keypair.pubkey(); @@ -5684,7 +5817,7 @@ fn test_check_ro_durable_nonce_fails() { // Kick nonce hash off the blockhash_queue for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { goto_end_of_slot(bank.clone()); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()) } // Caught by the runtime because it is a nonce transaction assert_eq!( @@ -5702,7 +5835,7 @@ fn test_check_ro_durable_nonce_fails() { #[test] fn test_collect_balances() { - let parent = create_simple_test_arc_bank(500); + let parent = create_simple_test_arc_bank(500).0; let bank0 = Arc::new(new_from_parent(parent)); let keypair = Keypair::new(); @@ -5752,8 +5885,8 @@ fn test_pre_post_transaction_balances() { let (mut genesis_config, _mint_keypair) = create_genesis_config(500_000); let fee_rate_governor = FeeRateGovernor::new(5000, 0); genesis_config.fee_rate_governor = fee_rate_governor; - let parent = Arc::new(Bank::new_for_tests(&genesis_config)); - let bank0 = Arc::new(new_from_parent(parent)); + let (parent, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let bank0 = new_from_parent_with_fork_next_slot(parent, bank_forks.as_ref()); let keypair0 = Keypair::new(); let keypair1 = Keypair::new(); @@ -5837,7 +5970,11 @@ fn test_pre_post_transaction_balances() { #[test] fn test_transaction_with_duplicate_accounts_in_instruction() { let (genesis_config, mint_keypair) = create_genesis_config(500); - let mut bank = Bank::new_for_tests(&genesis_config); + + let mock_program_id = Pubkey::from([2u8; 32]); + let bank = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, mock_program_id, MockBuiltin::vm) + .0; declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let transaction_context = &invoke_context.transaction_context; @@ -5846,22 +5983,19 @@ fn test_transaction_with_duplicate_accounts_in_instruction() { let lamports = u64::from_le_bytes(instruction_data.try_into().unwrap()); instruction_context .try_borrow_instruction_account(transaction_context, 2)? - .checked_sub_lamports(lamports)?; + .checked_sub_lamports(lamports, &invoke_context.feature_set)?; instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .checked_add_lamports(lamports)?; + .checked_add_lamports(lamports, &invoke_context.feature_set)?; instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .checked_sub_lamports(lamports)?; + .checked_sub_lamports(lamports, &invoke_context.feature_set)?; instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .checked_add_lamports(lamports)?; + .checked_add_lamports(lamports, &invoke_context.feature_set)?; Ok(()) }); - let mock_program_id = Pubkey::from([2u8; 32]); - bank.add_mockup_builtin(mock_program_id, MockBuiltin::vm); - let from_pubkey = solana_sdk::pubkey::new_rand(); let to_pubkey = solana_sdk::pubkey::new_rand(); let dup_pubkey = from_pubkey; @@ -5893,10 +6027,11 @@ fn test_transaction_with_duplicate_accounts_in_instruction() { #[test] fn test_transaction_with_program_ids_passed_to_programs() { let (genesis_config, mint_keypair) = create_genesis_config(500); - let mut bank = Bank::new_for_tests(&genesis_config); let mock_program_id = Pubkey::from([2u8; 32]); - bank.add_mockup_builtin(mock_program_id, MockBuiltin::vm); + let bank = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, mock_program_id, MockBuiltin::vm) + .0; let from_pubkey = solana_sdk::pubkey::new_rand(); let to_pubkey = solana_sdk::pubkey::new_rand(); @@ -5928,7 +6063,7 @@ fn test_transaction_with_program_ids_passed_to_programs() { fn test_account_ids_after_program_ids() { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(500); - let bank = Bank::new_for_tests(&genesis_config); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let from_pubkey = solana_sdk::pubkey::new_rand(); let to_pubkey = solana_sdk::pubkey::new_rand(); @@ -5949,9 +6084,14 @@ fn test_account_ids_after_program_ids() { tx.message.account_keys.push(solana_sdk::pubkey::new_rand()); let slot = bank.slot().saturating_add(1); - let mut bank = Bank::new_from_parent(Arc::new(bank), &Pubkey::default(), slot); - + let mut bank = Bank::new_from_parent(bank, &Pubkey::default(), slot); bank.add_mockup_builtin(solana_vote_program::id(), MockBuiltin::vm); + let bank = bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler(); + let result = bank.process_transaction(&tx); assert_eq!(result, Ok(())); let account = bank.get_account(&solana_vote_program::id()).unwrap(); @@ -5962,10 +6102,11 @@ fn test_account_ids_after_program_ids() { #[test] fn test_incinerator() { let (genesis_config, mint_keypair) = create_genesis_config(1_000_000_000_000); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); // Move to the first normal slot so normal rent behaviour applies - let bank = Bank::new_from_parent( + let bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), bank0, &Pubkey::default(), genesis_config.epoch_schedule.first_normal_slot, @@ -5991,7 +6132,12 @@ fn test_incinerator() { fn test_duplicate_account_key() { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(500); - let mut bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_mockup_builtin_for_tests( + &genesis_config, + solana_vote_program::id(), + MockBuiltin::vm, + ) + .0; let from_pubkey = solana_sdk::pubkey::new_rand(); let to_pubkey = solana_sdk::pubkey::new_rand(); @@ -6001,8 +6147,6 @@ fn test_duplicate_account_key() { AccountMeta::new(to_pubkey, false), ]; - bank.add_mockup_builtin(solana_vote_program::id(), MockBuiltin::vm); - let instruction = Instruction::new_with_bincode(solana_vote_program::id(), &10, account_metas); let mut tx = Transaction::new_signed_with_payer( &[instruction], @@ -6020,7 +6164,12 @@ fn test_duplicate_account_key() { fn test_process_transaction_with_too_many_account_locks() { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(500); - let mut bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_mockup_builtin_for_tests( + &genesis_config, + solana_vote_program::id(), + MockBuiltin::vm, + ) + .0; let from_pubkey = solana_sdk::pubkey::new_rand(); let to_pubkey = solana_sdk::pubkey::new_rand(); @@ -6030,8 +6179,6 @@ fn test_process_transaction_with_too_many_account_locks() { AccountMeta::new(to_pubkey, false), ]; - bank.add_mockup_builtin(solana_vote_program::id(), MockBuiltin::vm); - let instruction = Instruction::new_with_bincode(solana_vote_program::id(), &10, account_metas); let mut tx = Transaction::new_signed_with_payer( &[instruction], @@ -6096,7 +6243,7 @@ fn test_program_id_as_payer() { #[test] fn test_ref_account_key_after_program_id() { let (genesis_config, mint_keypair) = create_genesis_config(500); - let bank = Bank::new_for_tests(&genesis_config); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let from_pubkey = solana_sdk::pubkey::new_rand(); let to_pubkey = solana_sdk::pubkey::new_rand(); @@ -6107,9 +6254,13 @@ fn test_ref_account_key_after_program_id() { ]; let slot = bank.slot().saturating_add(1); - let mut bank = Bank::new_from_parent(Arc::new(bank), &Pubkey::default(), slot); - + let mut bank = Bank::new_from_parent(bank, &Pubkey::default(), slot); bank.add_mockup_builtin(solana_vote_program::id(), MockBuiltin::vm); + let bank = bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler(); let instruction = Instruction::new_with_bincode(solana_vote_program::id(), &10, account_metas); let mut tx = Transaction::new_signed_with_payer( @@ -6148,6 +6299,7 @@ fn test_fuzz_instructions() { (key, name.as_bytes().to_vec()) }) .collect(); + let bank = bank.wrap_with_bank_forks_for_tests().0; let max_keys = 100; let keys: Vec<_> = (0..max_keys) .enumerate() @@ -6308,25 +6460,25 @@ fn test_bank_hash_consistency() { if bank.slot == 0 { assert_eq!( bank.hash().to_string(), - "3KE2bigpBiiMLGYNqmWkgbrQGSqMt5ccG6ED87CFCVpt" + "trdzvRDTAXAqo1i2GX4JfK9ReixV1NYNG7DRaVq43Do", ); } if bank.slot == 32 { assert_eq!( bank.hash().to_string(), - "FpNDsd21HXznXf6tRpMNiWhFyhZ4aCCECQm3gL4jGV22" + "2rdj8QEnDnBSyMv81rCmncss4UERACyXXB3pEvkep8eS", ); } if bank.slot == 64 { assert_eq!( bank.hash().to_string(), - "7gDCoXPfFtKPALi212akhhQHEuLdAqyf7DE3yUN4bR2p" + "7g3ofXVQB3reFt9ki8zLA8S4w1GdmEWsWuWrwkPN3SSv" ); } if bank.slot == 128 { assert_eq!( bank.hash().to_string(), - "6FREbeHdTNYnEXg4zobL2mqGfevukg75frkQJqKpYnk4" + "4uX1AZFbqwjwWBACWbAW3V8rjbWH4N3ZRTbNysSLAzj2" ); break; } @@ -6342,15 +6494,14 @@ fn test_same_program_id_uses_unique_executable_accounts() { let instruction_context = transaction_context.get_current_instruction_context()?; instruction_context .try_borrow_program_account(transaction_context, 0)? - .set_data_length(2) + .set_data_length(2, &invoke_context.feature_set) }); let (genesis_config, mint_keypair) = create_genesis_config(50000); - let mut bank = Bank::new_for_tests(&genesis_config); - - // Add a new program let program1_pubkey = solana_sdk::pubkey::new_rand(); - bank.add_mockup_builtin(program1_pubkey, MockBuiltin::vm); + let bank = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, program1_pubkey, MockBuiltin::vm) + .0; // Add a new program owned by the first let program2_pubkey = solana_sdk::pubkey::new_rand(); @@ -6559,7 +6710,7 @@ fn test_add_builtin_no_overwrite() { let program_id = solana_sdk::pubkey::new_rand(); let mut bank = Arc::new(Bank::new_from_parent( - create_simple_test_arc_bank(100_000), + create_simple_test_arc_bank(100_000).0, &Pubkey::default(), slot, )); @@ -6583,7 +6734,7 @@ fn test_add_builtin_loader_no_overwrite() { let loader_id = solana_sdk::pubkey::new_rand(); let mut bank = Arc::new(Bank::new_from_parent( - create_simple_test_arc_bank(100_000), + create_simple_test_arc_bank(100_000).0, &Pubkey::default(), slot, )); @@ -6770,7 +6921,7 @@ fn test_add_builtin_account_after_frozen() { let program_id = Pubkey::from_str("CiXgo2KHKSDmDnV1F6B69eWFgNAPiSBjjYvfB4cvRNre").unwrap(); let bank = Bank::new_from_parent( - create_simple_test_arc_bank(100_000), + create_simple_test_arc_bank(100_000).0, &Pubkey::default(), slot, ); @@ -6789,7 +6940,7 @@ fn test_add_builtin_account_replace_none() { let program_id = Pubkey::from_str("CiXgo2KHKSDmDnV1F6B69eWFgNAPiSBjjYvfB4cvRNre").unwrap(); let bank = Bank::new_from_parent( - create_simple_test_arc_bank(100_000), + create_simple_test_arc_bank(100_000).0, &Pubkey::default(), slot, ); @@ -6807,10 +6958,7 @@ fn test_add_precompiled_account() { let program_id = solana_sdk::pubkey::new_rand(); let bank = Arc::new(Bank::new_from_parent( - Arc::new(Bank::new_for_tests_with_config( - &genesis_config, - BankTestConfig::default(), - )), + Arc::new(Bank::new_for_tests(&genesis_config)), &Pubkey::default(), slot, )); @@ -6852,7 +7000,7 @@ fn test_add_precompiled_account_inherited_cap_while_replacing() { // and then want to continue modifying the bank for pass in 0..4 { let (genesis_config, mint_keypair) = create_genesis_config(100_000); - let bank = Bank::new_for_tests_with_config(&genesis_config, BankTestConfig::default()); + let bank = Bank::new_for_tests(&genesis_config); let program_id = solana_sdk::pubkey::new_rand(); bank.add_precompiled_account(&program_id); @@ -6886,7 +7034,7 @@ fn test_add_precompiled_account_inherited_cap_while_replacing() { fn test_add_precompiled_account_squatted_while_not_replacing() { for pass in 0..3 { let (genesis_config, mint_keypair) = create_genesis_config(100_000); - let bank = Bank::new_for_tests_with_config(&genesis_config, BankTestConfig::default()); + let bank = Bank::new_for_tests(&genesis_config); let program_id = solana_sdk::pubkey::new_rand(); // someone managed to squat at program_id! @@ -6922,7 +7070,7 @@ fn test_add_precompiled_account_after_frozen() { let program_id = Pubkey::from_str("CiXgo2KHKSDmDnV1F6B69eWFgNAPiSBjjYvfB4cvRNre").unwrap(); let bank = Bank::new_from_parent( - create_simple_test_arc_bank(100_000), + create_simple_test_arc_bank(100_000).0, &Pubkey::default(), slot, ); @@ -7001,7 +7149,7 @@ fn test_bpf_loader_upgradeable_deploy_with_max_len() { let (genesis_config, mint_keypair) = create_genesis_config(1_000_000_000); let mut bank = Bank::new_for_tests(&genesis_config); bank.feature_set = Arc::new(FeatureSet::all_enabled()); - let bank = Arc::new(bank); + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let mut bank_client = BankClient::new_shared(bank.clone()); // Setup keypairs and addresses @@ -7171,7 +7319,9 @@ fn test_bpf_loader_upgradeable_deploy_with_max_len() { // Test initialized program account bank.clear_signatures(); bank.store_account(&buffer_address, &buffer_account); - let bank = bank_client.advance_slot(1, &mint_keypair.pubkey()).unwrap(); + let bank = bank_client + .advance_slot(1, bank_forks.as_ref(), &mint_keypair.pubkey()) + .unwrap(); let message = Message::new( &[Instruction::new_with_bincode( bpf_loader_upgradeable::id(), @@ -7718,7 +7868,7 @@ fn test_bpf_loader_upgradeable_deploy_with_max_len() { #[test] fn test_compute_active_feature_set() { - let bank0 = create_simple_test_arc_bank(100_000); + let bank0 = create_simple_test_arc_bank(100_000).0; let mut bank = Bank::new_from_parent(bank0, &Pubkey::default(), 1); let test_feature = "TestFeature11111111111111111111111111111111" @@ -7742,25 +7892,26 @@ fn test_compute_active_feature_set() { let feature = Feature::default(); assert_eq!(feature.activated_at, None); bank.store_account(&test_feature, &feature::create_account(&feature, 42)); + let feature = feature::from_account(&bank.get_account(&test_feature).expect("get_account")) + .expect("from_account"); + assert_eq!(feature.activated_at, None); - // Run `compute_active_feature_set` disallowing new activations + // Run `compute_active_feature_set` excluding pending activation let (feature_set, new_activations) = bank.compute_active_feature_set(false); assert!(new_activations.is_empty()); assert!(!feature_set.is_active(&test_feature)); - let feature = feature::from_account(&bank.get_account(&test_feature).expect("get_account")) - .expect("from_account"); - assert_eq!(feature.activated_at, None); - // Run `compute_active_feature_set` allowing new activations - let (feature_set, new_activations) = bank.compute_active_feature_set(true); + // Run `compute_active_feature_set` including pending activation + let (_feature_set, new_activations) = bank.compute_active_feature_set(true); assert_eq!(new_activations.len(), 1); - assert!(feature_set.is_active(&test_feature)); + assert!(new_activations.contains(&test_feature)); + + // Actually activate the pending activation + bank.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, true); let feature = feature::from_account(&bank.get_account(&test_feature).expect("get_account")) .expect("from_account"); assert_eq!(feature.activated_at, Some(1)); - // Running `compute_active_feature_set` will not cause new activations, but - // `test_feature` is now be active let (feature_set, new_activations) = bank.compute_active_feature_set(true); assert!(new_activations.is_empty()); assert!(feature_set.is_active(&test_feature)); @@ -8024,7 +8175,7 @@ fn test_timestamp_fast() { #[test] fn test_program_is_native_loader() { let (genesis_config, mint_keypair) = create_genesis_config(50000); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let tx = Transaction::new_signed_with_payer( &[Instruction::new_with_bincode( @@ -8945,7 +9096,7 @@ fn test_vote_epoch_panic() { &Pubkey::new_unique(), bootstrap_validator_stake_lamports(), ); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let vote_keypair = keypair_from_seed(&[1u8; 32]).unwrap(); let stake_keypair = keypair_from_seed(&[2u8; 32]).unwrap(); @@ -8993,7 +9144,8 @@ fn test_vote_epoch_panic() { )); assert!(result.is_ok()); - let _bank = Bank::new_from_parent( + let _bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), bank, &mint_keypair.pubkey(), genesis_config.epoch_schedule.get_first_slot_in_epoch(1), @@ -9011,7 +9163,7 @@ fn test_tx_log_order() { &Pubkey::new_unique(), bootstrap_validator_stake_lamports(), ); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; *bank.transaction_log_collector_config.write().unwrap() = TransactionLogCollectorConfig { mentioned_addresses: HashSet::new(), filter: TransactionLogCollectorFilter::All, @@ -9098,7 +9250,10 @@ fn test_tx_return_data() { &Pubkey::new_unique(), bootstrap_validator_stake_lamports(), ); - let mut bank = Bank::new_for_tests(&genesis_config); + let mock_program_id = Pubkey::from([2u8; 32]); + let bank = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, mock_program_id, MockBuiltin::vm) + .0; declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let mock_program_id = Pubkey::from([2u8; 32]); @@ -9116,9 +9271,7 @@ fn test_tx_return_data() { Ok(()) }); - let mock_program_id = Pubkey::from([2u8; 32]); let blockhash = bank.last_blockhash(); - bank.add_mockup_builtin(mock_program_id, MockBuiltin::vm); for index in [ None, @@ -9296,20 +9449,20 @@ fn test_transfer_sysvar() { &Pubkey::new_unique(), bootstrap_validator_stake_lamports(), ); - let mut bank = Bank::new_for_tests(&genesis_config); + let program_id = solana_sdk::pubkey::new_rand(); + + let bank = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, program_id, MockBuiltin::vm).0; declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .set_data(vec![0; 40])?; + .set_data(vec![0; 40], &invoke_context.feature_set)?; Ok(()) }); - let program_id = solana_sdk::pubkey::new_rand(); - bank.add_mockup_builtin(program_id, MockBuiltin::vm); - let blockhash = bank.last_blockhash(); #[allow(deprecated)] let blockhash_sysvar = sysvar::clock::id(); @@ -9386,7 +9539,7 @@ fn do_test_clean_dropped_unrooted_banks(freeze_bank1: FreezeBank1) { //! - In this case, key5's ref-count should be decremented correctly let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); let collector = Pubkey::new_unique(); @@ -9403,7 +9556,8 @@ fn do_test_clean_dropped_unrooted_banks(freeze_bank1: FreezeBank1) { bank0.freeze(); let slot = 1; - let bank1 = Bank::new_from_parent(bank0.clone(), &collector, slot); + let bank1 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank0.clone(), &collector, slot); add_root_and_flush_write_cache(&bank0); bank1 .transfer(amount, &mint_keypair, &key1.pubkey()) @@ -9416,7 +9570,7 @@ fn do_test_clean_dropped_unrooted_banks(freeze_bank1: FreezeBank1) { } let slot = slot + 1; - let bank2 = Bank::new_from_parent(bank0, &collector, slot); + let bank2 = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank0, &collector, slot); bank2 .transfer(amount * 2, &mint_keypair, &key2.pubkey()) .unwrap(); @@ -9429,6 +9583,7 @@ fn do_test_clean_dropped_unrooted_banks(freeze_bank1: FreezeBank1) { bank2.squash(); add_root_and_flush_write_cache(&bank2); + bank_forks.write().unwrap().remove(1); drop(bank1); bank2.clean_accounts_for_tests(); @@ -9505,7 +9660,9 @@ fn test_compute_budget_program_noop() { &Pubkey::new_unique(), bootstrap_validator_stake_lamports(), ); - let mut bank = Bank::new_for_tests(&genesis_config); + let program_id = solana_sdk::pubkey::new_rand(); + let bank = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, program_id, MockBuiltin::vm).0; declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let compute_budget = invoke_context.get_compute_budget(); @@ -9521,8 +9678,6 @@ fn test_compute_budget_program_noop() { ); Ok(()) }); - let program_id = solana_sdk::pubkey::new_rand(); - bank.add_mockup_builtin(program_id, MockBuiltin::vm); let message = Message::new( &[ @@ -9550,7 +9705,9 @@ fn test_compute_request_instruction() { &Pubkey::new_unique(), bootstrap_validator_stake_lamports(), ); - let mut bank = Bank::new_for_tests(&genesis_config); + let program_id = solana_sdk::pubkey::new_rand(); + let bank = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, program_id, MockBuiltin::vm).0; declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let compute_budget = invoke_context.get_compute_budget(); @@ -9566,8 +9723,6 @@ fn test_compute_request_instruction() { ); Ok(()) }); - let program_id = solana_sdk::pubkey::new_rand(); - bank.add_mockup_builtin(program_id, MockBuiltin::vm); let message = Message::new( &[ @@ -9595,7 +9750,10 @@ fn test_failed_compute_request_instruction() { &Pubkey::new_unique(), bootstrap_validator_stake_lamports(), ); - let mut bank = Bank::new_for_tests(&genesis_config); + + let program_id = solana_sdk::pubkey::new_rand(); + let bank = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, program_id, MockBuiltin::vm).0; let payer0_keypair = Keypair::new(); let payer1_keypair = Keypair::new(); @@ -9618,8 +9776,6 @@ fn test_failed_compute_request_instruction() { ); Ok(()) }); - let program_id = solana_sdk::pubkey::new_rand(); - bank.add_mockup_builtin(program_id, MockBuiltin::vm); // This message will not be executed because the compute budget request is invalid let message0 = Message::new( @@ -9662,11 +9818,8 @@ fn test_verify_and_hash_transaction_sig_len() { mut genesis_config, .. } = create_genesis_config_with_leader(42, &solana_sdk::pubkey::new_rand(), 42); - // activate all features but verify_tx_signatures_len + // activate all features activate_all_features(&mut genesis_config); - genesis_config - .accounts - .remove(&feature_set::verify_tx_signatures_len::id()); let bank = Bank::new_for_tests(&genesis_config); let recent_blockhash = Hash::new_unique(); @@ -9703,18 +9856,16 @@ fn test_verify_and_hash_transaction_sig_len() { { let tx = make_transaction(TestCase::RemoveSignature); assert_eq!( - bank.verify_transaction(tx.into(), TransactionVerificationMode::FullVerification) - .err(), - Some(TransactionError::SanitizeFailure), + bank.verify_transaction(tx.into(), TransactionVerificationMode::FullVerification), + Err(TransactionError::SanitizeFailure), ); } // Too many signatures: Sanitization failure { let tx = make_transaction(TestCase::AddSignature); assert_eq!( - bank.verify_transaction(tx.into(), TransactionVerificationMode::FullVerification) - .err(), - Some(TransactionError::SanitizeFailure), + bank.verify_transaction(tx.into(), TransactionVerificationMode::FullVerification), + Err(TransactionError::SanitizeFailure), ); } } @@ -9750,9 +9901,8 @@ fn test_verify_transactions_packet_data_size() { let tx = make_transaction(25); assert!(bincode::serialized_size(&tx).unwrap() > PACKET_DATA_SIZE as u64); assert_eq!( - bank.verify_transaction(tx.into(), TransactionVerificationMode::FullVerification) - .err(), - Some(TransactionError::SanitizeFailure), + bank.verify_transaction(tx.into(), TransactionVerificationMode::FullVerification), + Err(TransactionError::SanitizeFailure), ); } // Assert that verify fails as soon as serialized @@ -9775,7 +9925,7 @@ fn test_call_precomiled_program() { .. } = create_genesis_config_with_leader(42, &Pubkey::new_unique(), 42); activate_all_features(&mut genesis_config); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; // libsecp256k1 // Since libsecp256k1 is still using the old version of rand, this test @@ -9838,30 +9988,12 @@ fn calculate_test_fee( message: &SanitizedMessage, lamports_per_signature: u64, fee_structure: &FeeStructure, - support_set_accounts_data_size_limit_ix: bool, - remove_congestion_multiplier: bool, ) -> u64 { - let mut feature_set = FeatureSet::all_enabled(); - feature_set.deactivate(&solana_sdk::feature_set::remove_deprecated_request_unit_ix::id()); - - if !support_set_accounts_data_size_limit_ix { - feature_set.deactivate( - &solana_sdk::feature_set::include_loaded_accounts_data_size_in_fee_calculation::id(), - ); - } + let budget_limits = process_compute_budget_instructions(message.program_instructions_iter()) + .unwrap_or_default() + .into(); - let budget_limits = - process_compute_budget_instructions(message.program_instructions_iter(), &feature_set) - .unwrap_or_default() - .into(); - - fee_structure.calculate_fee( - message, - lamports_per_signature, - &budget_limits, - remove_congestion_multiplier, - false, - ) + fee_structure.calculate_fee(message, lamports_per_signature, &budget_limits, false) } #[test] @@ -9869,38 +10001,30 @@ fn test_calculate_fee() { // Default: no fee. let message = SanitizedMessage::try_from(Message::new(&[], Some(&Pubkey::new_unique()))).unwrap(); - for support_set_accounts_data_size_limit_ix in [true, false] { - assert_eq!( - calculate_test_fee( - &message, - 0, - &FeeStructure { - lamports_per_signature: 0, - ..FeeStructure::default() - }, - support_set_accounts_data_size_limit_ix, - true, - ), - 0 - ); - } + assert_eq!( + calculate_test_fee( + &message, + 0, + &FeeStructure { + lamports_per_signature: 0, + ..FeeStructure::default() + }, + ), + 0 + ); // One signature, a fee. - for support_set_accounts_data_size_limit_ix in [true, false] { - assert_eq!( - calculate_test_fee( - &message, - 1, - &FeeStructure { - lamports_per_signature: 1, - ..FeeStructure::default() - }, - support_set_accounts_data_size_limit_ix, - true, - ), - 1 - ); - } + assert_eq!( + calculate_test_fee( + &message, + 1, + &FeeStructure { + lamports_per_signature: 1, + ..FeeStructure::default() + }, + ), + 1 + ); // Two signatures, double the fee. let key0 = Pubkey::new_unique(); @@ -9908,21 +10032,17 @@ fn test_calculate_fee() { let ix0 = system_instruction::transfer(&key0, &key1, 1); let ix1 = system_instruction::transfer(&key1, &key0, 1); let message = SanitizedMessage::try_from(Message::new(&[ix0, ix1], Some(&key0))).unwrap(); - for support_set_accounts_data_size_limit_ix in [true, false] { - assert_eq!( - calculate_test_fee( - &message, - 2, - &FeeStructure { - lamports_per_signature: 2, - ..FeeStructure::default() - }, - support_set_accounts_data_size_limit_ix, - true, - ), - 4 - ); - } + assert_eq!( + calculate_test_fee( + &message, + 2, + &FeeStructure { + lamports_per_signature: 2, + ..FeeStructure::default() + }, + ), + 4 + ); } #[test] @@ -9938,18 +10058,10 @@ fn test_calculate_fee_compute_units() { let message = SanitizedMessage::try_from(Message::new(&[], Some(&Pubkey::new_unique()))).unwrap(); - for support_set_accounts_data_size_limit_ix in [true, false] { - assert_eq!( - calculate_test_fee( - &message, - 1, - &fee_structure, - support_set_accounts_data_size_limit_ix, - true, - ), - max_fee + lamports_per_signature - ); - } + assert_eq!( + calculate_test_fee(&message, 1, &fee_structure,), + max_fee + lamports_per_signature + ); // Three signatures, two instructions, no unit request @@ -9957,18 +10069,10 @@ fn test_calculate_fee_compute_units() { let ix1 = system_instruction::transfer(&Pubkey::new_unique(), &Pubkey::new_unique(), 1); let message = SanitizedMessage::try_from(Message::new(&[ix0, ix1], Some(&Pubkey::new_unique()))).unwrap(); - for support_set_accounts_data_size_limit_ix in [true, false] { - assert_eq!( - calculate_test_fee( - &message, - 1, - &fee_structure, - support_set_accounts_data_size_limit_ix, - true, - ), - max_fee + 3 * lamports_per_signature - ); - } + assert_eq!( + calculate_test_fee(&message, 1, &fee_structure,), + max_fee + 3 * lamports_per_signature + ); // Explicit fee schedule @@ -9999,19 +10103,11 @@ fn test_calculate_fee_compute_units() { Some(&Pubkey::new_unique()), )) .unwrap(); - for support_set_accounts_data_size_limit_ix in [true, false] { - let fee = calculate_test_fee( - &message, - 1, - &fee_structure, - support_set_accounts_data_size_limit_ix, - true, - ); - assert_eq!( - fee, - lamports_per_signature + prioritization_fee_details.get_fee() - ); - } + let fee = calculate_test_fee(&message, 1, &fee_structure); + assert_eq!( + fee, + lamports_per_signature + prioritization_fee_details.get_fee() + ); } } @@ -10043,8 +10139,6 @@ fn test_calculate_prioritization_fee() { &message, fee_structure.lamports_per_signature, &fee_structure, - true, - true, ); assert_eq!( fee, @@ -10082,18 +10176,7 @@ fn test_calculate_fee_secp256k1() { Some(&key0), )) .unwrap(); - for support_set_accounts_data_size_limit_ix in [true, false] { - assert_eq!( - calculate_test_fee( - &message, - 1, - &fee_structure, - support_set_accounts_data_size_limit_ix, - true, - ), - 2 - ); - } + assert_eq!(calculate_test_fee(&message, 1, &fee_structure,), 2); secp_instruction1.data = vec![0]; secp_instruction2.data = vec![10]; @@ -10102,18 +10185,7 @@ fn test_calculate_fee_secp256k1() { Some(&key0), )) .unwrap(); - for support_set_accounts_data_size_limit_ix in [true, false] { - assert_eq!( - calculate_test_fee( - &message, - 1, - &fee_structure, - support_set_accounts_data_size_limit_ix, - true, - ), - 11 - ); - } + assert_eq!(calculate_test_fee(&message, 1, &fee_structure,), 11); } #[test] @@ -10125,7 +10197,7 @@ fn test_an_empty_instruction_without_program() { let message = Message::new(&[ix], Some(&mint_keypair.pubkey())); let tx = Transaction::new(&[&mint_keypair], message, genesis_config.hash()); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; assert_eq!( bank.process_transaction(&tx).unwrap_err(), TransactionError::InstructionError(0, InstructionError::UnsupportedProgramId), @@ -10154,6 +10226,7 @@ fn test_accounts_data_size_with_good_transaction() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000.)); let mut bank = Bank::new_for_tests(&genesis_config); bank.activate_feature(&feature_set::cap_accounts_data_len::id()); + let bank = bank.wrap_with_bank_forks_for_tests().0; let transaction = system_transaction::create_account( &mint_keypair, &Keypair::new(), @@ -10203,6 +10276,8 @@ fn test_accounts_data_size_with_bad_transaction() { &solana_sdk::system_program::id(), ); + let bank = bank.wrap_with_bank_forks_for_tests().0; + let accounts_data_size_before = bank.load_accounts_data_size(); let accounts_data_size_delta_before = bank.load_accounts_data_size_delta(); let accounts_data_size_delta_on_chain_before = bank.load_accounts_data_size_delta_on_chain(); @@ -10237,10 +10312,10 @@ declare_process_instruction!(MockTransferBuiltin, 1, |invoke_context| { MockTransferInstruction::Transfer(amount) => { instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .checked_sub_lamports(amount)?; + .checked_sub_lamports(amount, &invoke_context.feature_set)?; instruction_context .try_borrow_instruction_account(transaction_context, 2)? - .checked_add_lamports(amount)?; + .checked_add_lamports(amount, &invoke_context.feature_set)?; Ok(()) } } @@ -10310,8 +10385,12 @@ fn test_invalid_rent_state_changes_existing_accounts() { ), ); - let mut bank = Bank::new_for_tests(&genesis_config); - bank.add_mockup_builtin(mock_program_id, MockTransferBuiltin::vm); + let bank = Bank::new_with_mockup_builtin_for_tests( + &genesis_config, + mock_program_id, + MockTransferBuiltin::vm, + ) + .0; let recent_blockhash = bank.last_blockhash(); let check_account_is_rent_exempt = |pubkey: &Pubkey| -> bool { @@ -10393,8 +10472,12 @@ fn test_invalid_rent_state_changes_new_accounts() { let account_data_size = 100; let rent_exempt_minimum = genesis_config.rent.minimum_balance(account_data_size); - let mut bank = Bank::new_for_tests(&genesis_config); - bank.add_mockup_builtin(mock_program_id, MockTransferBuiltin::vm); + let bank = Bank::new_with_mockup_builtin_for_tests( + &genesis_config, + mock_program_id, + MockTransferBuiltin::vm, + ) + .0; let recent_blockhash = bank.last_blockhash(); let check_account_is_rent_exempt = |pubkey: &Pubkey| -> bool { @@ -10452,8 +10535,12 @@ fn test_drained_created_account() { // Create legacy accounts of various kinds let created_keypair = Keypair::new(); - let mut bank = Bank::new_for_tests(&genesis_config); - bank.add_mockup_builtin(mock_program_id, MockTransferBuiltin::vm); + let bank = Bank::new_with_mockup_builtin_for_tests( + &genesis_config, + mock_program_id, + MockTransferBuiltin::vm, + ) + .0; let recent_blockhash = bank.last_blockhash(); // Create and drain a small data size account @@ -10564,7 +10651,7 @@ fn test_rent_state_changes_sysvars() { Account::from(validator_vote_account), ); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; // Ensure transactions with sysvars succeed, even though sysvars appear RentPaying by balance let tx = Transaction::new_signed_with_payer( @@ -10607,7 +10694,7 @@ fn test_invalid_rent_state_changes_fee_payer() { Account::new(rent_exempt_minimum, 0, &system_program::id()), ); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let recent_blockhash = bank.last_blockhash(); let check_account_is_rent_exempt = |pubkey: &Pubkey| -> bool { @@ -10837,7 +10924,7 @@ fn test_rent_state_incinerator() { genesis_config.rent = Rent::default(); let rent_exempt_minimum = genesis_config.rent.minimum_balance(0); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; for amount in [rent_exempt_minimum - 1, rent_exempt_minimum] { bank.transfer(amount, &mint_keypair, &solana_sdk::incinerator::id()) @@ -10865,7 +10952,8 @@ fn test_rent_state_list_len() { let num_accounts = tx.message().account_keys.len(); let sanitized_tx = SanitizedTransaction::try_from_legacy_transaction(tx).unwrap(); let mut error_counters = TransactionErrorMetrics::default(); - let loaded_txs = bank.rc.accounts.load_accounts( + let loaded_txs = load_accounts( + &bank.accounts().accounts_db, &bank.ancestors, &[sanitized_tx.clone()], vec![(Ok(()), None)], @@ -10878,6 +10966,7 @@ fn test_rent_state_list_len() { RewardInterval::OutsideInterval, &HashMap::new(), &LoadedProgramsForTxBatch::default(), + true, ); let compute_budget = bank.runtime_config.compute_budget.unwrap_or_else(|| { @@ -10993,7 +11082,7 @@ declare_process_instruction!(MockReallocBuiltin, 1, |invoke_context| { // Set data length instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .set_data_length(new_size)?; + .set_data_length(new_size, &invoke_context.feature_set)?; // set balance let current_balance = instruction_context @@ -11004,17 +11093,17 @@ declare_process_instruction!(MockReallocBuiltin, 1, |invoke_context| { if diff_balance.is_positive() { instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .checked_sub_lamports(amount)?; + .checked_sub_lamports(amount, &invoke_context.feature_set)?; instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .set_lamports(new_balance)?; + .set_lamports(new_balance, &invoke_context.feature_set)?; } else { instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .checked_add_lamports(amount)?; + .checked_add_lamports(amount, &invoke_context.feature_set)?; instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .set_lamports(new_balance)?; + .set_lamports(new_balance, &invoke_context.feature_set)?; } Ok(()) } @@ -11060,10 +11149,14 @@ fn test_resize_and_rent() { genesis_config.rent = Rent::default(); activate_all_features(&mut genesis_config); - let mut bank = Bank::new_for_tests(&genesis_config); - let mock_program_id = Pubkey::new_unique(); - bank.add_mockup_builtin(mock_program_id, MockReallocBuiltin::vm); + let bank = Bank::new_with_mockup_builtin_for_tests( + &genesis_config, + mock_program_id, + MockReallocBuiltin::vm, + ) + .0; + let recent_blockhash = bank.last_blockhash(); let account_data_size_small = 1024; @@ -11332,9 +11425,13 @@ fn test_accounts_data_size_and_resize_transactions() { mint_keypair, .. } = genesis_utils::create_genesis_config(100 * LAMPORTS_PER_SOL); - let mut bank = Bank::new_for_tests(&genesis_config); let mock_program_id = Pubkey::new_unique(); - bank.add_mockup_builtin(mock_program_id, MockReallocBuiltin::vm); + let bank = Bank::new_with_mockup_builtin_for_tests( + &genesis_config, + mock_program_id, + MockReallocBuiltin::vm, + ) + .0; let recent_blockhash = bank.last_blockhash(); @@ -11463,15 +11560,22 @@ fn test_get_rent_paying_pubkeys() { } /// Ensure that accounts data size is updated correctly by rent collection -#[test] -fn test_accounts_data_size_and_rent_collection() { +#[test_case(true; "enable rent fees collection")] +#[test_case(false; "disable rent fees collection")] +fn test_accounts_data_size_and_rent_collection(should_collect_rent: bool) { for set_exempt_rent_epoch_max in [false, true] { let GenesisConfigInfo { mut genesis_config, .. } = genesis_utils::create_genesis_config(100 * LAMPORTS_PER_SOL); genesis_config.rent = Rent::default(); - activate_all_features(&mut genesis_config); + if should_collect_rent { + genesis_config + .accounts + .remove(&solana_sdk::feature_set::disable_rent_fees_collection::id()); + } + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let slot = bank.slot() + bank.slot_count_per_normal_epoch(); let bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); @@ -11491,24 +11595,24 @@ fn test_accounts_data_size_and_rent_collection() { let info = bank.rent_collector.collect_from_existing_account( &keypair.pubkey(), &mut account, - None, set_exempt_rent_epoch_max, ); assert_eq!(info.account_data_len_reclaimed, data_size as u64); } // Collect rent for real + assert_eq!(should_collect_rent, bank.should_collect_rent()); let accounts_data_size_delta_before_collecting_rent = bank.load_accounts_data_size_delta(); bank.collect_rent_eagerly(); let accounts_data_size_delta_after_collecting_rent = bank.load_accounts_data_size_delta(); let accounts_data_size_delta_delta = accounts_data_size_delta_after_collecting_rent - accounts_data_size_delta_before_collecting_rent; - assert!(accounts_data_size_delta_delta < 0); + assert!(!should_collect_rent || accounts_data_size_delta_delta < 0); let reclaimed_data_size = accounts_data_size_delta_delta.saturating_neg() as usize; // Ensure the account is reclaimed by rent collection - assert_eq!(reclaimed_data_size, data_size,); + assert!(!should_collect_rent || reclaimed_data_size == data_size); } } @@ -11535,7 +11639,7 @@ fn test_accounts_data_size_from_genesis() { genesis_config.rent = Rent::default(); genesis_config.ticks_per_slot = 3; - let mut bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (mut bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); assert_eq!( bank.load_accounts_data_size() as usize, bank.get_total_accounts_stats().unwrap().data_len @@ -11544,7 +11648,12 @@ fn test_accounts_data_size_from_genesis() { // Create accounts over a number of banks and ensure the accounts data size remains correct for _ in 0..10 { let slot = bank.slot() + 1; - bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); + bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank, + &Pubkey::default(), + slot, + ); // Store an account into the bank that is rent-exempt and has data let data_size = rand::thread_rng().gen_range(3333..4444); @@ -11576,7 +11685,7 @@ fn test_cap_accounts_data_allocations_per_transaction() { / MAX_PERMITTED_DATA_LENGTH as usize; let (genesis_config, mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let mut instructions = Vec::new(); let mut keypairs = vec![mint_keypair.insecure_clone()]; @@ -11817,39 +11926,17 @@ fn test_calculate_fee_with_congestion_multiplier() { // assert when lamports_per_signature is less than BASE_LAMPORTS, turnning on/off // congestion_multiplier has no effect on fee. - for remove_congestion_multiplier in [true, false] { - assert_eq!( - calculate_test_fee( - &message, - cheap_lamports_per_signature, - &fee_structure, - true, - remove_congestion_multiplier, - ), - signature_fee * signature_count - ); - } + assert_eq!( + calculate_test_fee(&message, cheap_lamports_per_signature, &fee_structure), + signature_fee * signature_count + ); // assert when lamports_per_signature is more than BASE_LAMPORTS, turnning on/off // congestion_multiplier will change calculated fee. - for remove_congestion_multiplier in [true, false] { - let denominator: u64 = if remove_congestion_multiplier { - 1 - } else { - lamports_scale - }; - - assert_eq!( - calculate_test_fee( - &message, - expensive_lamports_per_signature, - &fee_structure, - true, - remove_congestion_multiplier, - ), - signature_fee * signature_count / denominator - ); - } + assert_eq!( + calculate_test_fee(&message, expensive_lamports_per_signature, &fee_structure,), + signature_fee * signature_count + ); } #[test] @@ -11878,7 +11965,7 @@ fn test_calculate_fee_with_request_heap_frame_flag() { // assert when request_heap_frame is presented in tx, prioritization fee will be counted // into transaction fee assert_eq!( - calculate_test_fee(&message, lamports_per_signature, &fee_structure, true, true,), + calculate_test_fee(&message, lamports_per_signature, &fee_structure), signature_fee + request_cu * lamports_per_cu ); } @@ -11887,7 +11974,7 @@ fn test_calculate_fee_with_request_heap_frame_flag() { fn test_is_in_slot_hashes_history() { use solana_sdk::slot_hashes::MAX_ENTRIES; - let bank0 = create_simple_test_arc_bank(1); + let bank0 = create_simple_test_arc_bank(1).0; assert!(!bank0.is_in_slot_hashes_history(&0)); assert!(!bank0.is_in_slot_hashes_history(&1)); let mut last_bank = bank0; @@ -11909,8 +11996,7 @@ fn test_feature_activation_loaded_programs_recompilation_phase() { genesis_config .accounts .remove(&feature_set::reject_callx_r10::id()); - let bank_forks = BankForks::new_rw_arc(Bank::new_for_tests(&genesis_config)); - let root_bank = bank_forks.read().unwrap().root_bank(); + let (root_bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); // Test a basic transfer let amount = genesis_config.rent.minimum_balance(0); @@ -11940,7 +12026,7 @@ fn test_feature_activation_loaded_programs_recompilation_phase() { // Advance the bank so the next transaction can be submitted. goto_end_of_slot(root_bank.clone()); - let bank = Arc::new(new_from_parent(root_bank)); + let bank = new_from_parent_with_fork_next_slot(root_bank, bank_forks.as_ref()); // Compose second instruction using the same program with a different block hash let instruction2 = Instruction::new_with_bytes(program_keypair.pubkey(), &[], Vec::new()); @@ -11969,7 +12055,7 @@ fn test_feature_activation_loaded_programs_recompilation_phase() { goto_end_of_slot(bank.clone()); // Advance to next epoch, which starts the recompilation phase - let bank = new_from_parent_next_epoch(bank, 1); + let bank = new_from_parent_next_epoch(bank, bank_forks.as_ref(), 1); // Execute after feature is enabled to check it was filtered out and reverified. let result_with_feature_enabled = bank.process_transaction(&transaction2); @@ -12017,12 +12103,17 @@ fn test_bank_verify_accounts_hash_with_base() { bank.fill_bank_with_ticks_for_tests(); }; - let mut bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (mut bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); // make some banks, do some transactions, ensure there's some zero-lamport accounts for _ in 0..2 { let slot = bank.slot() + 1; - bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::new_unique(), slot)); + bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank, + &Pubkey::new_unique(), + slot, + ); do_transfers(&bank); } @@ -12036,7 +12127,12 @@ fn test_bank_verify_accounts_hash_with_base() { // make more banks, do more transactions, ensure there's more zero-lamport accounts for _ in 0..2 { let slot = bank.slot() + 1; - bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::new_unique(), slot)); + bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank, + &Pubkey::new_unique(), + slot, + ); do_transfers(&bank); } @@ -12711,13 +12807,17 @@ fn test_program_execution_restricted_for_stake_account_in_reward_period() { let node_key = &validator_keypairs[0].node_keypair; let stake_key = &validator_keypairs[0].stake_keypair; - let bank0 = Bank::new_for_tests(&genesis_config); - let num_slots_in_epoch = bank0.get_slots_in_epoch(bank0.epoch()); + let (mut previous_bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let num_slots_in_epoch = previous_bank.get_slots_in_epoch(previous_bank.epoch()); assert_eq!(num_slots_in_epoch, 32); - let mut previous_bank = Arc::new(bank0); for slot in 1..=num_slots_in_epoch + 2 { - let bank = Bank::new_from_parent(previous_bank.clone(), &Pubkey::default(), slot); + let bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + previous_bank.clone(), + &Pubkey::default(), + slot, + ); // Fill bank_forks with banks with votes landing in the next slot // So that rewards will be paid out at the epoch boundary, i.e. slot = 32 @@ -12753,7 +12853,7 @@ fn test_program_execution_restricted_for_stake_account_in_reward_period() { // iteration are different. Otherwise, all those transactions will be the same, and will not be // executed by the bank except the first one. bank.register_unique_recent_blockhash_for_test(); - previous_bank = Arc::new(bank); + previous_bank = bank; } } @@ -12906,8 +13006,8 @@ fn test_store_vote_accounts_partitioned_empty() { #[test] fn test_system_instruction_allocate() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.0)); - let bank = Bank::new_for_tests(&genesis_config); - let bank_client = BankClient::new(bank); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let bank_client = BankClient::new_shared(bank); let data_len = 2; let amount = genesis_config.rent.minimum_balance(data_len); @@ -12964,7 +13064,7 @@ where // create initial bank and fund the alice account let (genesis_config, mint_keypair) = create_genesis_config(mint_lamports); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let bank_client = BankClient::new_shared(bank.clone()); bank_client .transfer_and_confirm(mint_lamports, &mint_keypair, &alice_pubkey) @@ -12973,12 +13073,12 @@ where // create zero-lamports account to be cleaned let account = AccountSharedData::new(0, len1, &program); let slot = bank.slot() + 1; - let bank = Arc::new(Bank::new_from_parent(bank, &collector, slot)); + let bank = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank, &collector, slot); bank.store_account(&bob_pubkey, &account); // transfer some to bogus pubkey just to make previous bank (=slot) really cleanable let slot = bank.slot() + 1; - let bank = Arc::new(Bank::new_from_parent(bank, &collector, slot)); + let bank = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank, &collector, slot); let bank_client = BankClient::new_shared(bank.clone()); bank_client .transfer_and_confirm( @@ -12990,13 +13090,13 @@ where // super fun time; callback chooses to .clean_accounts(None) or not let slot = bank.slot() + 1; - let bank = Arc::new(Bank::new_from_parent(bank, &collector, slot)); + let bank = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank, &collector, slot); callback(&bank); // create a normal account at the same pubkey as the zero-lamports account let lamports = genesis_config.rent.minimum_balance(len2); let slot = bank.slot() + 1; - let bank = Arc::new(Bank::new_from_parent(bank, &collector, slot)); + let bank = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank, &collector, slot); let bank_client = BankClient::new_shared(bank); let ix = system_instruction::create_account( &alice_pubkey, @@ -13033,8 +13133,8 @@ fn test_create_zero_lamport_without_clean() { #[test] fn test_system_instruction_assign_with_seed() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.0)); - let bank = Bank::new_for_tests(&genesis_config); - let bank_client = BankClient::new(bank); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let bank_client = BankClient::new_shared(bank); let alice_keypair = Keypair::new(); let alice_pubkey = alice_keypair.pubkey(); @@ -13074,8 +13174,8 @@ fn test_system_instruction_unsigned_transaction() { let amount = genesis_config.rent.minimum_balance(0); // Fund to account to bypass AccountNotFound error - let bank = Bank::new_for_tests(&genesis_config); - let bank_client = BankClient::new(bank); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let bank_client = BankClient::new_shared(bank); bank_client .transfer_and_confirm(amount, &alice_keypair, &mallory_pubkey) .unwrap(); @@ -13730,3 +13830,201 @@ fn test_filter_executable_program_accounts_invalid_blockhash() { ); assert_eq!(lock_results[1].0, Err(TransactionError::BlockhashNotFound)); } + +/// Test that rehashing works with skipped rewrites +/// +/// Since `bank_to_xxx_snapshot_archive()` calls `Bank::rehash()`, we must ensure that rehashing +/// works properly when also using `test_skip_rewrites_but_include_in_bank_hash`. +#[test] +fn test_rehash_with_skipped_rewrites() { + let accounts_db_config = AccountsDbConfig { + test_skip_rewrites_but_include_in_bank_hash: true, + ..ACCOUNTS_DB_CONFIG_FOR_TESTING + }; + let bank = Arc::new(Bank::new_with_paths( + &GenesisConfig::default(), + Arc::new(RuntimeConfig::default()), + Vec::default(), + None, + None, + AccountSecondaryIndexes::default(), + AccountShrinkThreshold::default(), + false, + Some(accounts_db_config), + None, + Arc::new(AtomicBool::new(false)), + )); + // This test is only meaningful while the bank hash contains rewrites. + // Once this feature is enabled, it may be possible to remove this test entirely. + assert!(!bank.bank_hash_skips_rent_rewrites()); + + // Store an account *in this bank* that will be checked for rent collection *in the next bank* + let pubkey = { + let rent_collection_partition = bank + .variable_cycle_partitions_between_slots(bank.slot(), bank.slot() + 1) + .last() + .copied() + .unwrap(); + let pubkey_range = + accounts_partition::pubkey_range_from_partition(rent_collection_partition); + *pubkey_range.end() + }; + let mut account = AccountSharedData::new(123_456_789, 0, &Pubkey::default()); + // The account's rent epoch must be set to EXEMPT + // in order for its rewrite to be skipped by rent collection. + account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); + bank.store_account_and_update_capitalization(&pubkey, &account); + + // Create a new bank that will do rent collection on the account stored in the previous slot + let bank = Arc::new(Bank::new_from_parent( + bank.clone(), + &Pubkey::new_unique(), + bank.slot() + 1, + )); + + // Freeze the bank to trigger rent collection and hash calculation + bank.freeze(); + + // Ensure the bank hash is the same before and after rehashing + let bank_hash = bank.hash(); + bank.rehash(); + let bank_rehash = bank.hash(); + assert_eq!(bank_rehash, bank_hash); +} + +/// Test that skipped_rewrites are properly rebuilt when booting from a snapshot +/// that was generated by a node skipping rewrites. +#[test] +fn test_rebuild_skipped_rewrites() { + let genesis_config = GenesisConfig::default(); + let accounts_db_config = AccountsDbConfig { + test_skip_rewrites_but_include_in_bank_hash: true, + ..ACCOUNTS_DB_CONFIG_FOR_TESTING + }; + let bank = Arc::new(Bank::new_with_paths( + &genesis_config, + Arc::new(RuntimeConfig::default()), + Vec::default(), + None, + None, + AccountSecondaryIndexes::default(), + AccountShrinkThreshold::default(), + false, + Some(accounts_db_config.clone()), + None, + Arc::new(AtomicBool::new(false)), + )); + // This test is only meaningful while the bank hash contains rewrites. + // Once this feature is enabled, it may be possible to remove this test entirely. + assert!(!bank.bank_hash_skips_rent_rewrites()); + + // Store an account *in this bank* that will be checked for rent collection *in the next bank* + let pubkey = { + let rent_collection_partition = bank + .variable_cycle_partitions_between_slots(bank.slot(), bank.slot() + 1) + .last() + .copied() + .unwrap(); + let pubkey_range = + accounts_partition::pubkey_range_from_partition(rent_collection_partition); + *pubkey_range.end() + }; + let mut account = AccountSharedData::new(123_456_789, 0, &Pubkey::default()); + // The account's rent epoch must be set to EXEMPT + // in order for its rewrite to be skipped by rent collection. + account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); + bank.store_account_and_update_capitalization(&pubkey, &account); + + // Create a new bank that will do rent collection on the account stored in the previous slot + let bank = Arc::new(Bank::new_from_parent( + bank.clone(), + &Pubkey::new_unique(), + bank.slot() + 1, + )); + + // This fn is called within freeze(), but freeze() *consumes* Self::skipped_rewrites! + // For testing, we want to know what's in the skipped rewrites, so we perform + // rent collection manually. + bank.collect_rent_eagerly(); + let actual_skipped_rewrites = bank.skipped_rewrites.lock().unwrap().clone(); + // Ensure skipped rewrites now includes the account we stored above + assert!(actual_skipped_rewrites.contains_key(&pubkey)); + // Ensure the calculated skipped rewrites match the actual ones + let calculated_skipped_rewrites = bank.calculate_skipped_rewrites(); + assert_eq!(calculated_skipped_rewrites, actual_skipped_rewrites); + + // required in order to snapshot the bank + bank.fill_bank_with_ticks_for_tests(); + + // Now take a snapshot! + let (_tmp_dir, accounts_dir) = snapshot_utils::create_tmp_accounts_dir_for_tests(); + let bank_snapshots_dir = TempDir::new().unwrap(); + let full_snapshot_archives_dir = TempDir::new().unwrap(); + let incremental_snapshot_archives_dir = TempDir::new().unwrap(); + let full_snapshot_archive = snapshot_bank_utils::bank_to_full_snapshot_archive( + bank_snapshots_dir.path(), + &bank, + None, + full_snapshot_archives_dir.path(), + incremental_snapshot_archives_dir.path(), + snapshot_utils::ArchiveFormat::Tar, + snapshot_utils::DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN, + snapshot_utils::DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN, + ) + .unwrap(); + + // Rebuild the bank and ensure it passes verification + let (snapshot_bank, _) = snapshot_bank_utils::bank_from_snapshot_archives( + &[accounts_dir], + bank_snapshots_dir.path(), + &full_snapshot_archive, + None, + &genesis_config, + &RuntimeConfig::default(), + None, + None, + AccountSecondaryIndexes::default(), + None, + AccountShrinkThreshold::default(), + false, + false, + false, + false, + Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + None, + Arc::new(AtomicBool::new(false)), + ) + .unwrap(); + snapshot_bank.wait_for_initial_accounts_hash_verification_completed_for_tests(); + assert_eq!(bank.as_ref(), &snapshot_bank); + + // Ensure the snapshot bank's skipped rewrites match the original bank's + let snapshot_skipped_rewrites = snapshot_bank.calculate_skipped_rewrites(); + assert_eq!(snapshot_skipped_rewrites, actual_skipped_rewrites); +} + +/// Test that simulations report the compute units of failed transactions +#[test] +fn test_failed_simulation_compute_units() { + let (genesis_config, mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL); + let program_id = Pubkey::new_unique(); + let bank = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, program_id, MockBuiltin::vm).0; + + const TEST_UNITS: u64 = 10_000; + declare_process_instruction!(MockBuiltin, 1, |invoke_context| { + invoke_context.consume_checked(TEST_UNITS).unwrap(); + Err(InstructionError::InvalidInstructionData) + }); + + let message = Message::new( + &[Instruction::new_with_bincode(program_id, &0, vec![])], + Some(&mint_keypair.pubkey()), + ); + let transaction = Transaction::new(&[&mint_keypair], message, bank.last_blockhash()); + + bank.freeze(); + let sanitized = SanitizedTransaction::from_transaction_for_tests(transaction); + let simulation = bank.simulate_transaction(&sanitized, false); + assert_eq!(TEST_UNITS, simulation.units_consumed); +} diff --git a/runtime/src/bank/transaction_account_state_info.rs b/runtime/src/bank/transaction_account_state_info.rs index 11e6d540d9f18f..4e5f58d85fffc8 100644 --- a/runtime/src/bank/transaction_account_state_info.rs +++ b/runtime/src/bank/transaction_account_state_info.rs @@ -1,6 +1,8 @@ use { - crate::bank::Bank, - solana_accounts_db::account_rent_state::{check_rent_state, RentState}, + crate::{ + accounts::account_rent_state::{check_rent_state, RentState}, + bank::Bank, + }, solana_sdk::{ account::ReadableAccount, message::SanitizedMessage, diff --git a/runtime/src/bank_client.rs b/runtime/src/bank_client.rs index 3d7f7e3e244261..7fe6418d4110b2 100644 --- a/runtime/src/bank_client.rs +++ b/runtime/src/bank_client.rs @@ -4,7 +4,6 @@ use { solana_sdk::{ account::Account, client::{AsyncClient, Client, SyncClient}, - clock, commitment_config::CommitmentConfig, epoch_info::EpochInfo, fee_calculator::{FeeCalculator, FeeRateGovernor}, @@ -27,6 +26,8 @@ use { time::{Duration, Instant}, }, }; +#[cfg(feature = "dev-context-only-utils")] +use {crate::bank_forks::BankForks, solana_sdk::clock, std::sync::RwLock}; pub struct BankClient { bank: Arc, @@ -44,7 +45,7 @@ impl AsyncClient for BankClient { &self, transaction: VersionedTransaction, ) -> Result { - let signature = transaction.signatures.get(0).cloned().unwrap_or_default(); + let signature = transaction.signatures.first().cloned().unwrap_or_default(); let transaction_sender = self.transaction_sender.lock().unwrap(); transaction_sender.send(transaction).unwrap(); Ok(signature) @@ -60,7 +61,7 @@ impl SyncClient for BankClient { let blockhash = self.bank.last_blockhash(); let transaction = Transaction::new(keypairs, message, blockhash); self.bank.process_transaction(&transaction)?; - Ok(transaction.signatures.get(0).cloned().unwrap_or_default()) + Ok(transaction.signatures.first().cloned().unwrap_or_default()) } /// Create and process a transaction from a single instruction. @@ -330,12 +331,24 @@ impl BankClient { self.bank.set_sysvar_for_tests(sysvar); } - pub fn advance_slot(&mut self, by: u64, collector_id: &Pubkey) -> Option> { - self.bank = Arc::new(Bank::new_from_parent( + #[cfg(feature = "dev-context-only-utils")] + pub fn advance_slot( + &mut self, + by: u64, + bank_forks: &RwLock, + collector_id: &Pubkey, + ) -> Option> { + let new_bank = Bank::new_from_parent( self.bank.clone(), collector_id, self.bank.slot().checked_add(by)?, - )); + ); + self.bank = bank_forks + .write() + .unwrap() + .insert(new_bank) + .clone_without_scheduler(); + self.set_sysvar_for_tests(&clock::Clock { slot: self.bank.slot(), ..clock::Clock::default() @@ -361,8 +374,8 @@ mod tests { let jane_doe_keypair = Keypair::new(); let jane_pubkey = jane_doe_keypair.pubkey(); let doe_keypairs = vec![&john_doe_keypair, &jane_doe_keypair]; - let bank = Bank::new_for_tests(&genesis_config); - let bank_client = BankClient::new(bank); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let bank_client = BankClient::new_shared(bank); let amount = genesis_config.rent.minimum_balance(0); // Create 2-2 Multisig Transfer instruction. diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index dabd90e4c2c835..d481bf1b43bda8 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -14,7 +14,6 @@ use { solana_program_runtime::loaded_programs::{BlockRelation, ForkGraph}, solana_sdk::{ clock::{Epoch, Slot}, - feature_set, hash::Hash, timing, }, @@ -85,9 +84,55 @@ impl Index for BankForks { } impl BankForks { - pub fn new_rw_arc(bank: Bank) -> Arc> { - let root = bank.slot(); - Self::new_from_banks(&[Arc::new(bank)], root) + pub fn new_rw_arc(root_bank: Bank) -> Arc> { + let root_bank = Arc::new(root_bank); + let root_slot = root_bank.slot(); + + let mut banks = HashMap::new(); + banks.insert( + root_slot, + BankWithScheduler::new_without_scheduler(root_bank.clone()), + ); + + let parents = root_bank.parents(); + for parent in parents { + if banks + .insert( + parent.slot(), + BankWithScheduler::new_without_scheduler(parent.clone()), + ) + .is_some() + { + // All ancestors have already been inserted by another fork + break; + } + } + + let mut descendants = HashMap::<_, HashSet<_>>::new(); + descendants.entry(root_slot).or_default(); + for parent in root_bank.proper_ancestors() { + descendants.entry(parent).or_default().insert(root_slot); + } + + let bank_forks = Arc::new(RwLock::new(Self { + root: Arc::new(AtomicSlot::new(root_slot)), + banks, + descendants, + snapshot_config: None, + accounts_hash_interval_slots: std::u64::MAX, + last_accounts_hash_slot: root_slot, + in_vote_only_mode: Arc::new(AtomicBool::new(false)), + highest_slot_at_startup: 0, + scheduler_pool: None, + })); + + root_bank + .loaded_programs_cache + .write() + .unwrap() + .set_fork_graph(bank_forks.clone()); + + bank_forks } pub fn banks(&self) -> &HashMap { @@ -167,58 +212,6 @@ impl BankForks { self[self.root()].clone() } - pub fn new_from_banks(initial_forks: &[Arc], root: Slot) -> Arc> { - let mut banks = HashMap::new(); - - // Iterate through the heads of all the different forks - for bank in initial_forks { - banks.insert( - bank.slot(), - BankWithScheduler::new_without_scheduler(bank.clone()), - ); - let parents = bank.parents(); - for parent in parents { - if banks - .insert( - parent.slot(), - BankWithScheduler::new_without_scheduler(parent.clone()), - ) - .is_some() - { - // All ancestors have already been inserted by another fork - break; - } - } - } - let mut descendants = HashMap::<_, HashSet<_>>::new(); - for (slot, bank) in &banks { - descendants.entry(*slot).or_default(); - for parent in bank.proper_ancestors() { - descendants.entry(parent).or_default().insert(*slot); - } - } - let bank_forks = Arc::new(RwLock::new(Self { - root: Arc::new(AtomicSlot::new(root)), - banks, - descendants, - snapshot_config: None, - accounts_hash_interval_slots: std::u64::MAX, - last_accounts_hash_slot: root, - in_vote_only_mode: Arc::new(AtomicBool::new(false)), - highest_slot_at_startup: 0, - scheduler_pool: None, - })); - - for bank in bank_forks.read().unwrap().banks.values() { - bank.loaded_programs_cache - .write() - .unwrap() - .set_fork_graph(bank_forks.clone()); - } - - bank_forks - } - pub fn install_scheduler_pool(&mut self, pool: InstalledSchedulerPoolArc) { info!("Installed new scheduler_pool into bank_forks: {:?}", pool); assert!( @@ -678,13 +671,6 @@ impl BankForks { /// Determine if this bank should request an epoch accounts hash #[must_use] fn should_request_epoch_accounts_hash(&self, bank: &Bank) -> bool { - if !bank - .feature_set - .is_active(&feature_set::epoch_accounts_hash::id()) - { - return false; - } - if !epoch_accounts_hash_utils::is_enabled_this_epoch(bank) { return false; } @@ -761,21 +747,6 @@ mod tests { assert_eq!(bank_forks.working_bank().tick_height(), 1); } - #[test] - fn test_bank_forks_new_from_banks() { - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let child_bank = Arc::new(Bank::new_from_parent(bank.clone(), &Pubkey::default(), 1)); - - let bank_forks = BankForks::new_from_banks(&[bank.clone(), child_bank.clone()], 0); - assert_eq!(bank_forks.read().unwrap().root(), 0); - assert_eq!(bank_forks.read().unwrap().working_bank().slot(), 1); - - let bank_forks = BankForks::new_from_banks(&[child_bank, bank], 0); - assert_eq!(bank_forks.read().unwrap().root(), 0); - assert_eq!(bank_forks.read().unwrap().working_bank().slot(), 1); - } - #[test] fn test_bank_forks_descendants() { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); diff --git a/runtime/src/bank_utils.rs b/runtime/src/bank_utils.rs index 96844da6351257..d8d6144d89d1d7 100644 --- a/runtime/src/bank_utils.rs +++ b/runtime/src/bank_utils.rs @@ -1,13 +1,18 @@ +#[cfg(feature = "dev-context-only-utils")] use { crate::{ bank::Bank, genesis_utils::{self, GenesisConfigInfo, ValidatorVoteKeypairs}, }, + solana_sdk::{pubkey::Pubkey, signature::Signer}, +}; +use { solana_accounts_db::transaction_results::TransactionResults, - solana_sdk::{pubkey::Pubkey, signature::Signer, transaction::SanitizedTransaction}, + solana_sdk::transaction::SanitizedTransaction, solana_vote::{vote_parser, vote_sender_types::ReplayVoteSender}, }; +#[cfg(feature = "dev-context-only-utils")] pub fn setup_bank_and_vote_pubkeys_for_tests( num_vote_accounts: usize, stake: u64, diff --git a/runtime/src/installed_scheduler_pool.rs b/runtime/src/installed_scheduler_pool.rs index dde82f2a63f890..d39a18d567232a 100644 --- a/runtime/src/installed_scheduler_pool.rs +++ b/runtime/src/installed_scheduler_pool.rs @@ -39,7 +39,7 @@ use { use {mockall::automock, qualifier_attr::qualifiers}; pub trait InstalledSchedulerPool: Send + Sync + Debug { - fn take_scheduler(&self, context: SchedulingContext) -> DefaultInstalledSchedulerBox; + fn take_scheduler(&self, context: SchedulingContext) -> InstalledSchedulerBox; } #[cfg_attr(doc, aquamarine::aquamarine)] @@ -107,28 +107,36 @@ pub trait InstalledScheduler: Send + Sync + Debug + 'static { transaction_with_index: &'a (&'a SanitizedTransaction, usize), ); - /// Wait for a scheduler to terminate after it is notified with the given reason. + /// Wait for a scheduler to terminate after processing. /// - /// Firstly, this function blocks the current thread while waiting for the scheduler to - /// complete all of the executions for the scheduled transactions. This means the scheduler has - /// prepared the finalized `ResultWithTimings` at least internally at the time of existing from - /// this function. If no trsanction is scheduled, the result and timing will be `Ok(())` and - /// `ExecuteTimings::default()` respectively. This is done in the same way regardless of - /// `WaitReason`. + /// This function blocks the current thread while waiting for the scheduler to complete all of + /// the executions for the scheduled transactions and to return the finalized + /// `ResultWithTimings`. Along with the result, this function also makes the scheduler itself + /// uninstalled from the bank by transforming the consumed self. /// - /// After that, the scheduler may behave differently depending on the reason, regarding the - /// final bookkeeping. Specifically, this function guaranteed to return - /// `Some(finalized_result_with_timings)` unless the reason is `PausedForRecentBlockhash`. In - /// the case of `PausedForRecentBlockhash`, the scheduler is responsible to retain the - /// finalized `ResultWithTimings` until it's `wait_for_termination()`-ed with one of the other - /// two reasons later. - #[must_use] - fn wait_for_termination(&mut self, reason: &WaitReason) -> Option; + /// If no transaction is scheduled, the result and timing will be `Ok(())` and + /// `ExecuteTimings::default()` respectively. + fn wait_for_termination( + self: Box, + is_dropped: bool, + ) -> (ResultWithTimings, UninstalledSchedulerBox); + + /// Pause a scheduler after processing to update bank's recent blockhash. + /// + /// This function blocks the current thread like wait_for_termination(). However, the scheduler + /// won't be consumed. This means the scheduler is responsible to retain the finalized + /// `ResultWithTimings` internally until it's `wait_for_termination()`-ed to collect the result + /// later. + fn pause_for_recent_blockhash(&mut self); +} +#[cfg_attr(feature = "dev-context-only-utils", automock)] +pub trait UninstalledScheduler: Send + Sync + Debug + 'static { fn return_to_pool(self: Box); } -pub type DefaultInstalledSchedulerBox = Box; +pub type InstalledSchedulerBox = Box; +pub type UninstalledSchedulerBox = Box; pub type InstalledSchedulerPoolArc = Arc; @@ -165,9 +173,9 @@ impl SchedulingContext { pub type ResultWithTimings = (Result<()>, ExecuteTimings); -/// A hint from the bank about the reason the caller is waiting on its scheduler termination. +/// A hint from the bank about the reason the caller is waiting on its scheduler. #[derive(Debug, PartialEq, Eq, Clone, Copy)] -pub enum WaitReason { +enum WaitReason { // The bank wants its scheduler to terminate after the completion of transaction execution, in // order to freeze itself immediately thereafter. This is by far the most normal wait reason. // @@ -178,8 +186,9 @@ pub enum WaitReason { // The bank wants its scheduler to terminate just like `TerminatedToFreeze` and indicate that // Drop::drop() is the caller. DroppedFromBankForks, - // The bank wants its scheduler to pause the scheduler after the completion without being - // returned to the pool to collect scheduler's internally-held `ResultWithTimings` later. + // The bank wants its scheduler to pause after the completion without being returned to the + // pool. This is to update bank's recent blockhash and to collect scheduler's internally-held + // `ResultWithTimings` later. PausedForRecentBlockhash, } @@ -192,6 +201,15 @@ impl WaitReason { WaitReason::TerminatedToFreeze | WaitReason::DroppedFromBankForks => false, } } + + pub fn is_dropped(&self) -> bool { + // Exhaustive `match` is preferred here than `matches!()` to trigger an explicit + // decision to be made, should we add new variants like `PausedForFooBar`... + match self { + WaitReason::DroppedFromBankForks => true, + WaitReason::TerminatedToFreeze | WaitReason::PausedForRecentBlockhash => false, + } + } } /// Very thin wrapper around Arc @@ -221,11 +239,11 @@ pub struct BankWithSchedulerInner { bank: Arc, scheduler: InstalledSchedulerRwLock, } -pub type InstalledSchedulerRwLock = RwLock>; +pub type InstalledSchedulerRwLock = RwLock>; impl BankWithScheduler { #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] - pub(crate) fn new(bank: Arc, scheduler: Option) -> Self { + pub(crate) fn new(bank: Arc, scheduler: Option) -> Self { if let Some(bank_in_context) = scheduler .as_ref() .map(|scheduler| scheduler.context().bank()) @@ -259,6 +277,7 @@ impl BankWithScheduler { self.inner.bank.register_tick(hash, &self.inner.scheduler); } + #[cfg(feature = "dev-context-only-utils")] pub fn fill_bank_with_ticks_for_tests(&self) { self.do_fill_bank_with_ticks_for_tests(&self.inner.scheduler); } @@ -340,18 +359,18 @@ impl BankWithSchedulerInner { ); let mut scheduler = scheduler.write().unwrap(); - let result_with_timings = if scheduler.is_some() { - let result_with_timings = scheduler - .as_mut() - .and_then(|scheduler| scheduler.wait_for_termination(&reason)); - if !reason.is_paused() { - let scheduler = scheduler.take().expect("scheduler after waiting"); - scheduler.return_to_pool(); - } - result_with_timings - } else { - None - }; + let result_with_timings = + if let Some(scheduler) = scheduler.as_mut().filter(|_| reason.is_paused()) { + scheduler.pause_for_recent_blockhash(); + None + } else if let Some(scheduler) = scheduler.take() { + let (result_with_timings, uninstalled_scheduler) = + scheduler.wait_for_termination(reason.is_dropped()); + uninstalled_scheduler.return_to_pool(); + Some(result_with_timings) + } else { + None + }; debug!( "wait_for_scheduler_termination(slot: {}, reason: {:?}): finished with: {:?}...", bank.slot(), @@ -410,39 +429,42 @@ mod tests { assert_matches::assert_matches, mockall::Sequence, solana_sdk::system_transaction, + std::sync::Mutex, }; fn setup_mocked_scheduler_with_extra( bank: Arc, - wait_reasons: impl Iterator, + is_dropped_flags: impl Iterator, f: Option, - ) -> DefaultInstalledSchedulerBox { + ) -> InstalledSchedulerBox { let mut mock = MockInstalledScheduler::new(); - let mut seq = Sequence::new(); + let seq = Arc::new(Mutex::new(Sequence::new())); mock.expect_context() .times(1) - .in_sequence(&mut seq) + .in_sequence(&mut seq.lock().unwrap()) .return_const(SchedulingContext::new(bank)); - for wait_reason in wait_reasons { + for wait_reason in is_dropped_flags { + let seq_cloned = seq.clone(); mock.expect_wait_for_termination() .with(mockall::predicate::eq(wait_reason)) .times(1) - .in_sequence(&mut seq) + .in_sequence(&mut seq.lock().unwrap()) .returning(move |_| { - if wait_reason.is_paused() { - None - } else { - Some((Ok(()), ExecuteTimings::default())) - } + let mut mock_uninstalled = MockUninstalledScheduler::new(); + mock_uninstalled + .expect_return_to_pool() + .times(1) + .in_sequence(&mut seq_cloned.lock().unwrap()) + .returning(|| ()); + ( + (Ok(()), ExecuteTimings::default()), + Box::new(mock_uninstalled), + ) }); } - mock.expect_return_to_pool() - .times(1) - .in_sequence(&mut seq) - .returning(|| ()); if let Some(f) = f { f(&mut mock); } @@ -452,11 +474,11 @@ mod tests { fn setup_mocked_scheduler( bank: Arc, - wait_reasons: impl Iterator, - ) -> DefaultInstalledSchedulerBox { + is_dropped_flags: impl Iterator, + ) -> InstalledSchedulerBox { setup_mocked_scheduler_with_extra( bank, - wait_reasons, + is_dropped_flags, None:: ()>, ) } @@ -468,10 +490,7 @@ mod tests { let bank = Arc::new(Bank::default_for_tests()); let bank = BankWithScheduler::new( bank.clone(), - Some(setup_mocked_scheduler( - bank, - [WaitReason::TerminatedToFreeze].into_iter(), - )), + Some(setup_mocked_scheduler(bank, [false].into_iter())), ); assert!(bank.has_installed_scheduler()); assert_matches!(bank.wait_for_completed_scheduler(), Some(_)); @@ -501,10 +520,7 @@ mod tests { let bank = Arc::new(Bank::default_for_tests()); let bank = BankWithScheduler::new( bank.clone(), - Some(setup_mocked_scheduler( - bank, - [WaitReason::DroppedFromBankForks].into_iter(), - )), + Some(setup_mocked_scheduler(bank, [true].into_iter())), ); drop(bank); } @@ -516,13 +532,15 @@ mod tests { let bank = Arc::new(crate::bank::tests::create_simple_test_bank(42)); let bank = BankWithScheduler::new( bank.clone(), - Some(setup_mocked_scheduler( + Some(setup_mocked_scheduler_with_extra( bank, - [ - WaitReason::PausedForRecentBlockhash, - WaitReason::TerminatedToFreeze, - ] - .into_iter(), + [false].into_iter(), + Some(|mocked: &mut MockInstalledScheduler| { + mocked + .expect_pause_for_recent_blockhash() + .times(1) + .returning(|| ()); + }), )), ); goto_end_of_slot_with_scheduler(&bank); @@ -547,7 +565,7 @@ mod tests { let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let mocked_scheduler = setup_mocked_scheduler_with_extra( bank.clone(), - [WaitReason::DroppedFromBankForks].into_iter(), + [true].into_iter(), Some(|mocked: &mut MockInstalledScheduler| { mocked .expect_schedule_execution() diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index e6ba2b1bd8969b..b0884a6f185c20 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -4,6 +4,7 @@ #[macro_use] extern crate lazy_static; +pub mod accounts; pub mod accounts_background_service; pub mod bank; pub mod bank_client; diff --git a/runtime/src/prioritization_fee_cache.rs b/runtime/src/prioritization_fee_cache.rs index c41d5a72bd397f..ece749387a9147 100644 --- a/runtime/src/prioritization_fee_cache.rs +++ b/runtime/src/prioritization_fee_cache.rs @@ -142,6 +142,7 @@ type SlotPrioritizationFee = DashMap; /// Stores up to MAX_NUM_RECENT_BLOCKS recent block's prioritization fee, /// A separate internal thread `service_thread` handles additional tasks when a bank is frozen, /// and collecting stats and reporting metrics. +#[derive(Debug)] pub struct PrioritizationFeeCache { cache: Arc>>>, service_thread: Option>, diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 078da133979f64..ddcaef833b8275 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -620,7 +620,7 @@ where bank_fields.incremental_snapshot_persistence.as_ref(), )?; - let bank_rc = BankRc::new(Accounts::new_empty(accounts_db), bank_fields.slot); + let bank_rc = BankRc::new(Accounts::new(Arc::new(accounts_db)), bank_fields.slot); let runtime_config = Arc::new(runtime_config.clone()); // if limit_load_slot_count_from_snapshot is set, then we need to side-step some correctness checks beneath this call @@ -941,8 +941,6 @@ where .set(rent_paying_accounts_by_partition) .unwrap(); - accounts_db.maybe_add_filler_accounts(&genesis_config.epoch_schedule, snapshot_slot); - handle.join().unwrap(); measure_notify.stop(); diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 278d6d68da8bc1..f9d45b372f5fc4 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -223,12 +223,8 @@ mod serde_snapshot_tests { fn test_accounts_serialize_style(serde_style: SerdeStyle) { solana_logger::setup(); let (_accounts_dir, paths) = get_temp_accounts_paths(4).unwrap(); - let accounts = Accounts::new_with_config_for_tests( - paths, - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_for_tests(paths, &ClusterType::Development); + let accounts = Accounts::new(Arc::new(accounts_db)); let slot = 0; let mut pubkeys: Vec = vec![]; @@ -260,7 +256,7 @@ mod serde_snapshot_tests { let buf = writer.into_inner(); let mut reader = BufReader::new(&buf[..]); let (_accounts_dir, daccounts_paths) = get_temp_accounts_paths(2).unwrap(); - let daccounts = Accounts::new_empty( + let daccounts = Accounts::new(Arc::new( accountsdb_from_stream( serde_style, &mut reader, @@ -268,7 +264,7 @@ mod serde_snapshot_tests { storage_and_next_append_vec_id, ) .unwrap(), - ); + )); check_accounts_local(&daccounts, &pubkeys, 100); let daccounts_delta_hash = daccounts.accounts_db.calculate_accounts_delta_hash(slot); assert_eq!(accounts_delta_hash, daccounts_delta_hash); @@ -286,7 +282,7 @@ mod serde_snapshot_tests { solana_logger::setup(); let unrooted_slot = 9; let unrooted_bank_id = 9; - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let key = solana_sdk::pubkey::new_rand(); let account0 = AccountSharedData::new(1, 0, &key); db.store_for_tests(unrooted_slot, &[(&key, &account0)]); @@ -446,9 +442,6 @@ mod serde_snapshot_tests { let account2 = AccountSharedData::new(some_lamport + 1, no_data, &owner); let pubkey2 = solana_sdk::pubkey::new_rand(); - let filler_account = AccountSharedData::new(some_lamport, no_data, &owner); - let filler_account_pubkey = solana_sdk::pubkey::new_rand(); - let accounts = AccountsDb::new_single_for_tests(); let mut current_slot = 1; @@ -459,12 +452,6 @@ mod serde_snapshot_tests { accounts.store_for_tests(current_slot, &[(&pubkey, &zero_lamport_account)]); accounts.store_for_tests(current_slot, &[(&pubkey2, &account2)]); - // Store the account a few times. - // use to be: store enough accounts such that an additional store for slot 2 is created. - // but we use the write cache now - for _ in 0..3 { - accounts.store_for_tests(current_slot, &[(&filler_account_pubkey, &filler_account)]); - } accounts.add_root_and_flush_write_cache(current_slot); accounts.assert_load_account(current_slot, pubkey, zero_lamport); diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index 62ac8285b1cea9..67464230c2ef77 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -13,16 +13,16 @@ use { snapshot_hash::SnapshotHash, snapshot_package::{AccountsPackage, AccountsPackageKind, SnapshotKind, SnapshotPackage}, snapshot_utils::{ - self, archive_snapshot_package, build_storage_from_snapshot_dir, - delete_contents_of_path, deserialize_snapshot_data_file, - deserialize_snapshot_data_files, get_bank_snapshot_dir, get_highest_bank_snapshot_post, - get_highest_full_snapshot_archive_info, get_highest_incremental_snapshot_archive_info, - get_snapshot_file_name, get_storages_to_serialize, hard_link_storages_to_snapshot, - serialize_snapshot_data_file, verify_and_unarchive_snapshots, - verify_unpacked_snapshots_dir_and_version, write_snapshot_version_file, - AddBankSnapshotError, ArchiveFormat, BankSnapshotInfo, BankSnapshotType, SnapshotError, - SnapshotRootPaths, SnapshotVersion, StorageAndNextAppendVecId, - UnpackedSnapshotsDirAndVersion, VerifySlotDeltasError, + self, archive_snapshot_package, delete_contents_of_path, + deserialize_snapshot_data_file, deserialize_snapshot_data_files, get_bank_snapshot_dir, + get_highest_bank_snapshot_post, get_highest_full_snapshot_archive_info, + get_highest_incremental_snapshot_archive_info, get_snapshot_file_name, + get_storages_to_serialize, hard_link_storages_to_snapshot, + rebuild_storages_from_snapshot_dir, serialize_snapshot_data_file, + verify_and_unarchive_snapshots, verify_unpacked_snapshots_dir_and_version, + write_snapshot_version_file, AddBankSnapshotError, ArchiveFormat, BankSnapshotInfo, + BankSnapshotType, SnapshotError, SnapshotRootPaths, SnapshotVersion, + StorageAndNextAppendVecId, UnpackedSnapshotsDirAndVersion, VerifySlotDeltasError, }, status_cache, }, @@ -202,18 +202,18 @@ fn serialize_status_cache( }) } -#[derive(Debug, Default)] -pub struct BankFromArchiveTimings { - pub rebuild_bank_from_snapshots_us: u64, - pub full_snapshot_untar_us: u64, - pub incremental_snapshot_untar_us: u64, - pub verify_snapshot_bank_us: u64, +#[derive(Debug)] +pub struct BankFromArchivesTimings { + pub untar_full_snapshot_archive_us: u64, + pub untar_incremental_snapshot_archive_us: u64, + pub rebuild_bank_us: u64, + pub verify_bank_us: u64, } -#[derive(Debug, Default)] +#[derive(Debug)] pub struct BankFromDirTimings { - pub rebuild_bank_from_snapshot_us: u64, - pub build_storage_us: u64, + pub rebuild_storages_us: u64, + pub rebuild_bank_us: u64, } /// Utility for parsing out bank specific information from a snapshot archive. This utility can be used @@ -276,7 +276,7 @@ pub fn bank_from_snapshot_archives( accounts_db_config: Option, accounts_update_notifier: Option, exit: Arc, -) -> snapshot_utils::Result<(Bank, BankFromArchiveTimings)> { +) -> snapshot_utils::Result<(Bank, BankFromArchivesTimings)> { info!( "Loading bank from full snapshot archive: {}, and incremental snapshot archive: {:?}", full_snapshot_archive_info.path().display(), @@ -375,37 +375,29 @@ pub fn bank_from_snapshot_archives( } measure_verify.stop(); - let timings = BankFromArchiveTimings { - rebuild_bank_from_snapshots_us: measure_rebuild.as_us(), - full_snapshot_untar_us: unarchived_full_snapshot.measure_untar.as_us(), - incremental_snapshot_untar_us: unarchived_incremental_snapshot + let timings = BankFromArchivesTimings { + untar_full_snapshot_archive_us: unarchived_full_snapshot.measure_untar.as_us(), + untar_incremental_snapshot_archive_us: unarchived_incremental_snapshot .map_or(0, |unarchive_preparation_result| { unarchive_preparation_result.measure_untar.as_us() }), - verify_snapshot_bank_us: measure_verify.as_us(), + rebuild_bank_us: measure_rebuild.as_us(), + verify_bank_us: measure_verify.as_us(), }; datapoint_info!( "bank_from_snapshot_archives", ( - "full_snapshot_untar_us", - timings.full_snapshot_untar_us, + "untar_full_snapshot_archive_us", + timings.untar_full_snapshot_archive_us, i64 ), ( - "incremental_snapshot_untar_us", - timings.incremental_snapshot_untar_us, - i64 - ), - ( - "rebuild_bank_from_snapshots_us", - timings.rebuild_bank_from_snapshots_us, - i64 - ), - ( - "verify_snapshot_bank_us", - timings.verify_snapshot_bank_us, + "untar_incremental_snapshot_archive_us", + timings.untar_incremental_snapshot_archive_us, i64 ), + ("rebuild_bank_us", timings.rebuild_bank_us, i64), + ("verify_bank_us", timings.verify_bank_us, i64), ); Ok((bank, timings)) } @@ -506,11 +498,15 @@ pub fn bank_from_snapshot_dir( let next_append_vec_id = Arc::new(AtomicAppendVecId::new(0)); - let (storage, measure_build_storage) = measure!( - build_storage_from_snapshot_dir(bank_snapshot, account_paths, next_append_vec_id.clone())?, - "build storage from snapshot dir" + let (storage, measure_rebuild_storages) = measure!( + rebuild_storages_from_snapshot_dir( + bank_snapshot, + account_paths, + next_append_vec_id.clone() + )?, + "rebuild storages from snapshot dir" ); - info!("{}", measure_build_storage); + info!("{}", measure_rebuild_storages); let next_append_vec_id = Arc::try_unwrap(next_append_vec_id).expect("this is the only strong reference"); @@ -518,46 +514,39 @@ pub fn bank_from_snapshot_dir( storage, next_append_vec_id, }; - let mut measure_rebuild = Measure::start("rebuild bank from snapshot"); - let bank = rebuild_bank_from_snapshot( - bank_snapshot, - account_paths, - storage_and_next_append_vec_id, - genesis_config, - runtime_config, - debug_keys, - additional_builtins, - account_secondary_indexes, - limit_load_slot_count_from_snapshot, - shrink_ratio, - verify_index, - accounts_db_config, - accounts_update_notifier, - exit, - )?; - measure_rebuild.stop(); - info!("{}", measure_rebuild); + let (bank, measure_rebuild_bank) = measure!( + rebuild_bank_from_snapshot( + bank_snapshot, + account_paths, + storage_and_next_append_vec_id, + genesis_config, + runtime_config, + debug_keys, + additional_builtins, + account_secondary_indexes, + limit_load_slot_count_from_snapshot, + shrink_ratio, + verify_index, + accounts_db_config, + accounts_update_notifier, + exit, + )?, + "rebuild bank from snapshot" + ); + info!("{}", measure_rebuild_bank); // Skip bank.verify_snapshot_bank. Subsequent snapshot requests/accounts hash verification requests // will calculate and check the accounts hash, so we will still have safety/correctness there. bank.set_initial_accounts_hash_verification_completed(); let timings = BankFromDirTimings { - rebuild_bank_from_snapshot_us: measure_rebuild.as_us(), - build_storage_us: measure_build_storage.as_us(), + rebuild_storages_us: measure_rebuild_storages.as_us(), + rebuild_bank_us: measure_rebuild_bank.as_us(), }; datapoint_info!( "bank_from_snapshot_dir", - ( - "build_storage_from_snapshot_dir_us", - timings.build_storage_us, - i64 - ), - ( - "rebuild_bank_from_snapshot_us", - timings.rebuild_bank_from_snapshot_us, - i64 - ), + ("rebuild_storages_us", timings.rebuild_storages_us, i64), + ("rebuild_bank_us", timings.rebuild_bank_us, i64), ); Ok((bank, timings)) } @@ -1204,6 +1193,7 @@ pub fn package_and_archive_incremental_snapshot( )) } +#[cfg(feature = "dev-context-only-utils")] pub fn create_snapshot_dirs_for_tests( genesis_config: &GenesisConfig, bank_snapshots_dir: impl AsRef, @@ -1260,6 +1250,7 @@ mod tests { use { super::*, crate::{ + bank_forks::BankForks, genesis_utils, snapshot_utils::{ clean_orphaned_account_snapshot_dirs, create_all_accounts_run_and_snapshot_dirs, @@ -1283,9 +1274,23 @@ mod tests { system_transaction, transaction::SanitizedTransaction, }, - std::sync::{atomic::Ordering, Arc}, + std::sync::{atomic::Ordering, Arc, RwLock}, }; + fn new_bank_from_parent_with_bank_forks( + bank_forks: &RwLock, + parent: Arc, + collector_id: &Pubkey, + slot: Slot, + ) -> Arc { + let bank = Bank::new_from_parent(parent, collector_id, slot); + bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler() + } + /// Test roundtrip of bank to a full snapshot, then back again. This test creates the simplest /// bank possible, so the contents of the snapshot archive will be quite minimal. #[test] @@ -1353,7 +1358,7 @@ mod tests { let key5 = Keypair::new(); let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000_000.)); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); bank0 .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); @@ -1368,7 +1373,8 @@ mod tests { } let slot = 1; - let bank1 = Arc::new(Bank::new_from_parent(bank0, &collector, slot)); + let bank1 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank0, &collector, slot); bank1 .transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey()) .unwrap(); @@ -1383,7 +1389,8 @@ mod tests { } let slot = slot + 1; - let bank2 = Arc::new(Bank::new_from_parent(bank1, &collector, slot)); + let bank2 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank1, &collector, slot); bank2 .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); @@ -1392,7 +1399,8 @@ mod tests { } let slot = slot + 1; - let bank3 = Arc::new(Bank::new_from_parent(bank2, &collector, slot)); + let bank3 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank2, &collector, slot); bank3 .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); @@ -1401,7 +1409,8 @@ mod tests { } let slot = slot + 1; - let bank4 = Arc::new(Bank::new_from_parent(bank3, &collector, slot)); + let bank4 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank3, &collector, slot); bank4 .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); @@ -1471,7 +1480,7 @@ mod tests { let key5 = Keypair::new(); let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000_000.)); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); bank0 .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); @@ -1486,7 +1495,8 @@ mod tests { } let slot = 1; - let bank1 = Arc::new(Bank::new_from_parent(bank0, &collector, slot)); + let bank1 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank0, &collector, slot); bank1 .transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey()) .unwrap(); @@ -1520,7 +1530,8 @@ mod tests { .unwrap(); let slot = slot + 1; - let bank2 = Arc::new(Bank::new_from_parent(bank1, &collector, slot)); + let bank2 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank1, &collector, slot); bank2 .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); @@ -1529,7 +1540,8 @@ mod tests { } let slot = slot + 1; - let bank3 = Arc::new(Bank::new_from_parent(bank2, &collector, slot)); + let bank3 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank2, &collector, slot); bank3 .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); @@ -1538,7 +1550,8 @@ mod tests { } let slot = slot + 1; - let bank4 = Arc::new(Bank::new_from_parent(bank3, &collector, slot)); + let bank4 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank3, &collector, slot); bank4 .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); @@ -1593,7 +1606,7 @@ mod tests { let key3 = Keypair::new(); let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000_000.)); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); bank0 .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); @@ -1608,7 +1621,8 @@ mod tests { } let slot = 1; - let bank1 = Arc::new(Bank::new_from_parent(bank0, &collector, slot)); + let bank1 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank0, &collector, slot); bank1 .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); @@ -1642,7 +1656,8 @@ mod tests { .unwrap(); let slot = slot + 1; - let bank2 = Arc::new(Bank::new_from_parent(bank1, &collector, slot)); + let bank2 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank1, &collector, slot); bank2 .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); @@ -1651,7 +1666,8 @@ mod tests { } let slot = slot + 1; - let bank3 = Arc::new(Bank::new_from_parent(bank2, &collector, slot)); + let bank3 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank2, &collector, slot); bank3 .transfer(sol_to_lamports(2.), &mint_keypair, &key2.pubkey()) .unwrap(); @@ -1660,7 +1676,8 @@ mod tests { } let slot = slot + 1; - let bank4 = Arc::new(Bank::new_from_parent(bank3, &collector, slot)); + let bank4 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank3, &collector, slot); bank4 .transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey()) .unwrap(); @@ -1717,7 +1734,7 @@ mod tests { /// - take an incremental snapshot /// - ensure deserializing from this snapshot is equal to this bank /// slot 3: - /// - remove Account2's reference back to slot 2 by transfering from the mint to Account2 + /// - remove Account2's reference back to slot 2 by transferring from the mint to Account2 /// slot 4: /// - ensure `clean_accounts()` has run and that Account1 is gone /// - take another incremental snapshot @@ -1743,13 +1760,14 @@ mod tests { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000_000.)); let lamports_to_transfer = sol_to_lamports(123_456.); - let bank0 = Arc::new(Bank::new_with_paths_for_tests( + let (bank0, bank_forks) = Bank::new_with_paths_for_tests( &genesis_config, Arc::::default(), vec![accounts_dir.clone()], AccountSecondaryIndexes::default(), AccountShrinkThreshold::default(), - )); + ) + .wrap_with_bank_forks_for_tests(); bank0 .transfer(lamports_to_transfer, &mint_keypair, &key2.pubkey()) .unwrap(); @@ -1758,7 +1776,8 @@ mod tests { } let slot = 1; - let bank1 = Arc::new(Bank::new_from_parent(bank0, &collector, slot)); + let bank1 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank0, &collector, slot); bank1 .transfer(lamports_to_transfer, &key2, &key1.pubkey()) .unwrap(); @@ -1780,7 +1799,8 @@ mod tests { .unwrap(); let slot = slot + 1; - let bank2 = Arc::new(Bank::new_from_parent(bank1, &collector, slot)); + let bank2 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank1, &collector, slot); let blockhash = bank2.last_blockhash(); let tx = SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( &key1, @@ -1847,7 +1867,8 @@ mod tests { ); let slot = slot + 1; - let bank3 = Arc::new(Bank::new_from_parent(bank2, &collector, slot)); + let bank3 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank2, &collector, slot); // Update Account2 so that it no longer holds a reference to slot2 bank3 .transfer(lamports_to_transfer, &mint_keypair, &key2.pubkey()) @@ -1857,7 +1878,8 @@ mod tests { } let slot = slot + 1; - let bank4 = Arc::new(Bank::new_from_parent(bank3, &collector, slot)); + let bank4 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank3, &collector, slot); while !bank4.is_complete() { bank4.register_unique_tick(); } @@ -1925,13 +1947,14 @@ mod tests { let key1 = Keypair::new(); let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000_000.)); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); while !bank0.is_complete() { bank0.register_unique_tick(); } let slot = 1; - let bank1 = Arc::new(Bank::new_from_parent(bank0, &collector, slot)); + let bank1 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank0, &collector, slot); while !bank1.is_complete() { bank1.register_unique_tick(); } @@ -1953,7 +1976,8 @@ mod tests { .unwrap(); let slot = slot + 1; - let bank2 = Arc::new(Bank::new_from_parent(bank1, &collector, slot)); + let bank2 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank1, &collector, slot); bank2 .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); @@ -2192,12 +2216,18 @@ mod tests { bank.fill_bank_with_ticks_for_tests(); }; - let mut bank = Arc::new(Bank::new_for_tests(&genesis_config_info.genesis_config)); + let (mut bank, bank_forks) = + Bank::new_with_bank_forks_for_tests(&genesis_config_info.genesis_config); // make some banks, do some transactions, ensure there's some zero-lamport accounts for _ in 0..5 { let slot = bank.slot() + 1; - bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::new_unique(), slot)); + bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank, + &Pubkey::new_unique(), + slot, + ); do_transfers(&bank); } @@ -2223,7 +2253,12 @@ mod tests { // make more banks, do more transactions, ensure there's more zero-lamport accounts for _ in 0..5 { let slot = bank.slot() + 1; - bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::new_unique(), slot)); + bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank, + &Pubkey::new_unique(), + slot, + ); do_transfers(&bank); } diff --git a/runtime/src/snapshot_minimizer.rs b/runtime/src/snapshot_minimizer.rs index 37db9eea724f55..4e7d576f0b6c95 100644 --- a/runtime/src/snapshot_minimizer.rs +++ b/runtime/src/snapshot_minimizer.rs @@ -43,7 +43,7 @@ pub struct SnapshotMinimizer<'a> { impl<'a> SnapshotMinimizer<'a> { /// Removes all accounts not necessary for replaying slots in the range [starting_slot, ending_slot]. /// `transaction_account_set` should contain accounts used in transactions in the slot range [starting_slot, ending_slot]. - /// This function will accumulate other accounts (rent colleciton, builtins, etc) necessary to replay transactions. + /// This function will accumulate other accounts (rent collection, builtins, etc) necessary to replay transactions. /// /// This function will modify accounts_db by removing accounts not needed to replay [starting_slot, ending_slot], /// and update the bank's capitalization. diff --git a/runtime/src/snapshot_package.rs b/runtime/src/snapshot_package.rs index 6685cd269daea6..f5623c550a24bf 100644 --- a/runtime/src/snapshot_package.rs +++ b/runtime/src/snapshot_package.rs @@ -8,7 +8,7 @@ use { log::*, solana_accounts_db::{ accounts::Accounts, - accounts_db::AccountStorageEntry, + accounts_db::{AccountStorageEntry, AccountsDb}, accounts_hash::{AccountsHash, AccountsHashKind}, epoch_accounts_hash::EpochAccountsHash, rent_collector::RentCollector, @@ -148,7 +148,7 @@ impl AccountsPackage { expected_capitalization: bank.capitalization(), accounts_hash_for_testing, accounts: bank.accounts(), - epoch_schedule: *bank.epoch_schedule(), + epoch_schedule: bank.epoch_schedule().clone(), rent_collector: bank.rent_collector().clone(), is_incremental_accounts_hash_feature_enabled, snapshot_info, @@ -159,6 +159,8 @@ impl AccountsPackage { /// Create a new Accounts Package where basically every field is defaulted. /// Only use for tests; many of the fields are invalid! pub fn default_for_tests() -> Self { + let accounts_db = AccountsDb::default_for_tests(); + let accounts = Accounts::new(Arc::new(accounts_db)); Self { package_kind: AccountsPackageKind::AccountsHashVerifier, slot: Slot::default(), @@ -166,7 +168,7 @@ impl AccountsPackage { snapshot_storages: Vec::default(), expected_capitalization: u64::default(), accounts_hash_for_testing: Option::default(), - accounts: Arc::new(Accounts::default_for_tests()), + accounts: Arc::new(accounts), epoch_schedule: EpochSchedule::default(), rent_collector: RentCollector::default(), is_incremental_accounts_hash_feature_enabled: bool::default(), diff --git a/runtime/src/snapshot_package/compare.rs b/runtime/src/snapshot_package/compare.rs index 75e75119cf0a6e..d951d818c37975 100644 --- a/runtime/src/snapshot_package/compare.rs +++ b/runtime/src/snapshot_package/compare.rs @@ -33,22 +33,22 @@ pub fn cmp_accounts_package_kinds_by_priority( a: &AccountsPackageKind, b: &AccountsPackageKind, ) -> Ordering { - use AccountsPackageKind::*; + use AccountsPackageKind as Kind; match (a, b) { // Epoch Accounts Hash packages - (EpochAccountsHash, EpochAccountsHash) => Equal, - (EpochAccountsHash, _) => Greater, - (_, EpochAccountsHash) => Less, + (Kind::EpochAccountsHash, Kind::EpochAccountsHash) => Equal, + (Kind::EpochAccountsHash, _) => Greater, + (_, Kind::EpochAccountsHash) => Less, // Snapshot packages - (Snapshot(snapshot_kind_a), Snapshot(snapshot_kind_b)) => { + (Kind::Snapshot(snapshot_kind_a), Kind::Snapshot(snapshot_kind_b)) => { cmp_snapshot_kinds_by_priority(snapshot_kind_a, snapshot_kind_b) } - (Snapshot(_), _) => Greater, - (_, Snapshot(_)) => Less, + (Kind::Snapshot(_), _) => Greater, + (_, Kind::Snapshot(_)) => Less, // Accounts Hash Verifier packages - (AccountsHashVerifier, AccountsHashVerifier) => Equal, + (Kind::AccountsHashVerifier, Kind::AccountsHashVerifier) => Equal, } } @@ -58,12 +58,12 @@ pub fn cmp_accounts_package_kinds_by_priority( /// If two `IncrementalSnapshot`s are compared, their base slots are the tiebreaker. #[must_use] pub fn cmp_snapshot_kinds_by_priority(a: &SnapshotKind, b: &SnapshotKind) -> Ordering { - use SnapshotKind::*; + use SnapshotKind as Kind; match (a, b) { - (FullSnapshot, FullSnapshot) => Equal, - (FullSnapshot, IncrementalSnapshot(_)) => Greater, - (IncrementalSnapshot(_), FullSnapshot) => Less, - (IncrementalSnapshot(base_slot_a), IncrementalSnapshot(base_slot_b)) => { + (Kind::FullSnapshot, Kind::FullSnapshot) => Equal, + (Kind::FullSnapshot, Kind::IncrementalSnapshot(_)) => Greater, + (Kind::IncrementalSnapshot(_), Kind::FullSnapshot) => Less, + (Kind::IncrementalSnapshot(base_slot_a), Kind::IncrementalSnapshot(base_slot_b)) => { base_slot_a.cmp(base_slot_b) } } diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index c890c3196f936d..da9ee359543a1e 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -715,7 +715,7 @@ pub fn archive_snapshot_package( .map_err(|err| SnapshotError::IoWithSource(err, "create staging snapshots path"))?; let src_snapshot_dir = &snapshot_package.bank_snapshot_dir; - // To be a source for symlinking and archiving, the path need to be an aboslute path + // To be a source for symlinking and archiving, the path need to be an absolute path let src_snapshot_dir = src_snapshot_dir .canonicalize() .map_err(|_e| SnapshotError::InvalidSnapshotDirPath(src_snapshot_dir.clone()))?; @@ -1211,20 +1211,6 @@ pub(crate) fn get_storages_to_serialize( .collect::>() } -#[derive(Debug, Default)] -pub struct BankFromArchiveTimings { - pub rebuild_bank_from_snapshots_us: u64, - pub full_snapshot_untar_us: u64, - pub incremental_snapshot_untar_us: u64, - pub verify_snapshot_bank_us: u64, -} - -#[derive(Debug, Default)] -pub struct BankFromDirTimings { - pub rebuild_bank_from_snapshot_us: u64, - pub build_storage_us: u64, -} - // From testing, 4 seems to be a sweet spot for ranges of 60M-360M accounts and 16-64 cores. This may need to be tuned later. const PARALLEL_UNTAR_READERS_DEFAULT: usize = 4; @@ -1461,9 +1447,11 @@ fn streaming_snapshot_dir_files( Ok(()) } -/// Perform the common tasks when deserialize a snapshot. Handles reading snapshot file, reading the version file, -/// and then returning those fields plus the rebuilt storage -pub fn build_storage_from_snapshot_dir( +/// Performs the common tasks when deserializing a snapshot +/// +/// Handles reading the snapshot file and version file, +/// then returning those fields plus the rebuilt storages. +pub fn rebuild_storages_from_snapshot_dir( snapshot_info: &BankSnapshotInfo, account_paths: &[PathBuf], next_append_vec_id: Arc, @@ -2036,7 +2024,7 @@ pub fn verify_snapshot_archive( // The new the status_cache file is inside the slot directory together with the snapshot file. // When unpacking an archive, the status_cache file from the archive is one-level up outside of - // the slot direcotry. + // the slot directory. // The unpacked status_cache file need to be put back into the slot directory for the directory // comparison to pass. let existing_unpacked_status_cache_file = @@ -3043,7 +3031,7 @@ mod tests { } // Ensure the remaining incremental snapshots are at the right slot - let expected_remaing_incremental_snapshot_archive_slots = + let expected_remaining_incremental_snapshot_archive_slots = (latest_full_snapshot_archive_slot..) .step_by(incremental_snapshot_interval) .take(num_incremental_snapshots_per_full_snapshot) @@ -3060,7 +3048,7 @@ mod tests { .collect::>(); assert_eq!( actual_remaining_incremental_snapshot_archive_slots, - expected_remaing_incremental_snapshot_archive_slots + expected_remaining_incremental_snapshot_archive_slots ); } diff --git a/runtime/src/stakes.rs b/runtime/src/stakes.rs index 977c25b180564f..45192e919d2495 100644 --- a/runtime/src/stakes.rs +++ b/runtime/src/stakes.rs @@ -19,7 +19,7 @@ use { solana_vote::vote_account::{VoteAccount, VoteAccounts}, std::{ collections::{HashMap, HashSet}, - ops::{Add, Deref}, + ops::Add, sync::{Arc, RwLock, RwLockReadGuard}, }, thiserror::Error, @@ -301,7 +301,7 @@ impl Stakes { let delegation = stake_account.delegation(); acc + delegation.stake_activating_and_deactivating( self.epoch, - Some(&self.stake_history), + &self.stake_history, new_rate_activation_epoch, ) }) @@ -333,9 +333,7 @@ impl Stakes { .values() .map(StakeAccount::delegation) .filter(|delegation| &delegation.voter_pubkey == voter_pubkey) - .map(|delegation| { - delegation.stake(epoch, Some(stake_history), new_rate_activation_epoch) - }) + .map(|delegation| delegation.stake(epoch, stake_history, new_rate_activation_epoch)) .sum() } @@ -361,7 +359,7 @@ impl Stakes { let removed_delegation = stake_account.delegation(); let removed_stake = removed_delegation.stake( self.epoch, - Some(&self.stake_history), + &self.stake_history, new_rate_activation_epoch, ); self.vote_accounts @@ -402,11 +400,7 @@ impl Stakes { debug_assert_ne!(stake_account.lamports(), 0u64); let delegation = stake_account.delegation(); let voter_pubkey = delegation.voter_pubkey; - let stake = delegation.stake( - self.epoch, - Some(&self.stake_history), - new_rate_activation_epoch, - ); + let stake = delegation.stake(self.epoch, &self.stake_history, new_rate_activation_epoch); match self.stake_delegations.insert(stake_pubkey, stake_account) { None => self.vote_accounts.add_stake(&voter_pubkey, stake), Some(old_stake_account) => { @@ -414,7 +408,7 @@ impl Stakes { let old_voter_pubkey = old_delegation.voter_pubkey; let old_stake = old_delegation.stake( self.epoch, - Some(&self.stake_history), + &self.stake_history, new_rate_activation_epoch, ); if voter_pubkey != old_voter_pubkey || stake != old_stake { @@ -583,7 +577,6 @@ fn refresh_vote_accounts( } stakes } - let stake_history = Some(stake_history.deref()); let delegated_stakes = thread_pool.install(|| { stake_delegations .par_iter() @@ -703,7 +696,7 @@ pub(crate) mod tests { assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!( vote_accounts.get_delegated_stake(&vote_pubkey), - stake.stake(i, None, None) + stake.stake(i, &StakeHistory::default(), None) ); } @@ -715,7 +708,7 @@ pub(crate) mod tests { assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!( vote_accounts.get_delegated_stake(&vote_pubkey), - stake.stake(i, None, None) + stake.stake(i, &StakeHistory::default(), None) ); // stays old stake, because only 10 is activated } @@ -730,7 +723,7 @@ pub(crate) mod tests { assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!( vote_accounts.get_delegated_stake(&vote_pubkey), - stake.stake(i, None, None) + stake.stake(i, &StakeHistory::default(), None) ); // now stake of 42 is activated } @@ -874,7 +867,7 @@ pub(crate) mod tests { assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!( vote_accounts.get_delegated_stake(&vote_pubkey), - stake.stake(stakes.epoch, Some(&stakes.stake_history), None) + stake.stake(stakes.epoch, &stakes.stake_history, None) ); assert!(vote_accounts.get(&vote_pubkey2).is_some()); assert_eq!(vote_accounts.get_delegated_stake(&vote_pubkey2), 0); @@ -891,7 +884,7 @@ pub(crate) mod tests { assert!(vote_accounts.get(&vote_pubkey2).is_some()); assert_eq!( vote_accounts.get_delegated_stake(&vote_pubkey2), - stake.stake(stakes.epoch, Some(&stakes.stake_history), None) + stake.stake(stakes.epoch, &stakes.stake_history, None) ); } } @@ -938,7 +931,7 @@ pub(crate) mod tests { let vote_accounts = stakes.vote_accounts(); assert_eq!( vote_accounts.get_delegated_stake(&vote_pubkey), - stake.stake(stakes.epoch, Some(&stakes.stake_history), None) + stake.stake(stakes.epoch, &stakes.stake_history, None) ); } let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); @@ -948,7 +941,7 @@ pub(crate) mod tests { let vote_accounts = stakes.vote_accounts(); assert_eq!( vote_accounts.get_delegated_stake(&vote_pubkey), - stake.stake(stakes.epoch, Some(&stakes.stake_history), None) + stake.stake(stakes.epoch, &stakes.stake_history, None) ); } } diff --git a/runtime/src/transaction_priority_details.rs b/runtime/src/transaction_priority_details.rs index d7a1ed590894a1..284acb791a2e6a 100644 --- a/runtime/src/transaction_priority_details.rs +++ b/runtime/src/transaction_priority_details.rs @@ -1,7 +1,6 @@ use { solana_program_runtime::compute_budget_processor::process_compute_budget_instructions, solana_sdk::{ - feature_set::FeatureSet, instruction::CompiledInstruction, pubkey::Pubkey, transaction::{SanitizedTransaction, SanitizedVersionedTransaction}, @@ -24,14 +23,7 @@ pub trait GetTransactionPriorityDetails { instructions: impl Iterator, _round_compute_unit_price_enabled: bool, ) -> Option { - let mut feature_set = FeatureSet::default(); - feature_set.activate( - &solana_sdk::feature_set::add_set_tx_loaded_accounts_data_size_instruction::id(), - 0, - ); - - let compute_budget_limits = - process_compute_budget_instructions(instructions, &feature_set).ok()?; + let compute_budget_limits = process_compute_budget_instructions(instructions).ok()?; Some(TransactionPriorityDetails { priority: compute_budget_limits.compute_unit_price, compute_unit_limit: u64::from(compute_budget_limits.compute_unit_limit), @@ -68,8 +60,7 @@ mod tests { use { super::*, solana_sdk::{ - compute_budget::{self, ComputeBudgetInstruction}, - instruction::Instruction, + compute_budget::ComputeBudgetInstruction, message::Message, pubkey::Pubkey, signature::{Keypair, Signer}, @@ -192,49 +183,4 @@ mod tests { }) ); } - - #[test] - fn test_get_priority_with_deprecated_compute_unit_request() { - let priority = 1_000; - let units = 200_000; - let additional_fee = units * priority / 1_000_000; - let keypair = Keypair::new(); - let transaction = Transaction::new_unsigned(Message::new( - &[ - system_instruction::transfer(&keypair.pubkey(), &Pubkey::new_unique(), 1), - Instruction::new_with_borsh( - compute_budget::id(), - &ComputeBudgetInstruction::RequestUnitsDeprecated { - units, - additional_fee, - }, - vec![], - ), - ], - Some(&keypair.pubkey()), - )); - - // assert for SanitizedVersionedTransaction - let versioned_transaction = VersionedTransaction::from(transaction.clone()); - let sanitized_versioned_transaction = - SanitizedVersionedTransaction::try_new(versioned_transaction).unwrap(); - assert_eq!( - sanitized_versioned_transaction.get_transaction_priority_details(false), - Some(TransactionPriorityDetails { - priority: priority as u64, - compute_unit_limit: units as u64 - }) - ); - - // assert for SanitizedTransaction - let sanitized_transaction = - SanitizedTransaction::try_from_legacy_transaction(transaction).unwrap(); - assert_eq!( - sanitized_transaction.get_transaction_priority_details(false), - Some(TransactionPriorityDetails { - priority: priority as u64, - compute_unit_limit: units as u64 - }) - ); - } } diff --git a/runtime/tests/accounts.rs b/runtime/tests/accounts.rs index 8c07cf2a144bdb..549839be5fca15 100644 --- a/runtime/tests/accounts.rs +++ b/runtime/tests/accounts.rs @@ -9,7 +9,6 @@ use { solana_sdk::{ account::{AccountSharedData, ReadableAccount, WritableAccount}, clock::Slot, - genesis_config::ClusterType, pubkey::Pubkey, sysvar::epoch_schedule::EpochSchedule, }, @@ -81,7 +80,7 @@ fn test_shrink_and_clean() { #[test] fn test_bad_bank_hash() { solana_logger::setup(); - let db = AccountsDb::new_for_tests(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new_single_for_tests(); let some_slot: Slot = 0; let max_accounts = 200; diff --git a/runtime/tests/stake.rs b/runtime/tests/stake.rs index 7088e6438e1c22..7c53e1e44a3af3 100755 --- a/runtime/tests/stake.rs +++ b/runtime/tests/stake.rs @@ -5,12 +5,14 @@ use { solana_runtime::{ bank::Bank, bank_client::BankClient, + bank_forks::BankForks, genesis_utils::{create_genesis_config_with_leader, GenesisConfigInfo}, }, solana_sdk::{ account::from_account, account_utils::StateMut, client::SyncClient, + clock::Slot, epoch_schedule::{EpochSchedule, MINIMUM_SLOTS_PER_EPOCH}, hash::Hash, message::Message, @@ -28,19 +30,37 @@ use { vote_instruction, vote_state::{Vote, VoteInit, VoteState, VoteStateVersions}, }, - std::sync::Arc, + std::sync::{Arc, RwLock}, }; +fn new_bank_from_parent_with_bank_forks( + bank_forks: &RwLock, + parent: Arc, + collector_id: &Pubkey, + slot: Slot, +) -> Arc { + let bank = Bank::new_from_parent(parent, collector_id, slot); + bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler() +} + /// get bank at next epoch + `n` slots -fn next_epoch_and_n_slots(bank: Arc, mut n: usize) -> Arc { +fn next_epoch_and_n_slots( + bank: Arc, + bank_forks: &RwLock, + mut n: usize, +) -> Arc { bank.squash(); let slot = bank.get_slots_in_epoch(bank.epoch()) + bank.slot(); - let mut bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); + let mut bank = new_bank_from_parent_with_bank_forks(bank_forks, bank, &Pubkey::default(), slot); while n > 0 { bank.squash(); let slot = bank.slot() + 1; - bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); + bank = new_bank_from_parent_with_bank_forks(bank_forks, bank, &Pubkey::default(), slot); n -= 1; } @@ -49,6 +69,7 @@ fn next_epoch_and_n_slots(bank: Arc, mut n: usize) -> Arc { fn fill_epoch_with_votes( mut bank: Arc, + bank_forks: &RwLock, vote_keypair: &Keypair, mint_keypair: &Keypair, ) -> Arc { @@ -58,7 +79,7 @@ fn fill_epoch_with_votes( while bank.epoch() != old_epoch + 1 { bank.squash(); let slot = bank.slot() + 1; - bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); + bank = new_bank_from_parent_with_bank_forks(bank_forks, bank, &Pubkey::default(), slot); let bank_client = BankClient::new_shared(bank.clone()); let parent = bank.parent().unwrap(); @@ -84,12 +105,10 @@ fn warmed_up(bank: &Bank, stake_pubkey: &Pubkey) -> bool { stake.delegation.stake == stake.stake( bank.epoch(), - Some( - &from_account::( - &bank.get_account(&sysvar::stake_history::id()).unwrap(), - ) - .unwrap(), - ), + &from_account::( + &bank.get_account(&sysvar::stake_history::id()).unwrap(), + ) + .unwrap(), bank.new_warmup_cooldown_rate_epoch(), ) } @@ -99,12 +118,10 @@ fn get_staked(bank: &Bank, stake_pubkey: &Pubkey) -> u64 { .unwrap() .stake( bank.epoch(), - Some( - &from_account::( - &bank.get_account(&sysvar::stake_history::id()).unwrap(), - ) - .unwrap(), - ), + &from_account::( + &bank.get_account(&sysvar::stake_history::id()).unwrap(), + ) + .unwrap(), bank.new_warmup_cooldown_rate_epoch(), ) } @@ -125,7 +142,7 @@ fn test_stake_create_and_split_single_signature() { let staker_pubkey = staker_keypair.pubkey(); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let bank_client = BankClient::new_shared(bank.clone()); let stake_address = @@ -201,7 +218,7 @@ fn test_stake_create_and_split_to_existing_system_account() { let staker_pubkey = staker_keypair.pubkey(); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let bank_client = BankClient::new_shared(bank.clone()); let stake_address = @@ -288,9 +305,8 @@ fn test_stake_account_lifetime() { ); genesis_config.epoch_schedule = EpochSchedule::new(MINIMUM_SLOTS_PER_EPOCH); genesis_config.rent = Rent::default(); - let bank = Bank::new_for_tests(&genesis_config); + let (mut bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mint_pubkey = mint_keypair.pubkey(); - let mut bank = Arc::new(bank); // Need to set the EAH to Valid so that `Bank::new_from_parent()` doesn't panic during freeze // when parent is in the EAH calculation window. bank.rc @@ -392,12 +408,12 @@ fn test_stake_account_lifetime() { break; } // Cycle thru banks until we're fully warmed up - bank = next_epoch_and_n_slots(bank, 0); + bank = next_epoch_and_n_slots(bank, bank_forks.as_ref(), 0); } // Reward redemption // Submit enough votes to generate rewards - bank = fill_epoch_with_votes(bank, &vote_keypair, &mint_keypair); + bank = fill_epoch_with_votes(bank, bank_forks.as_ref(), &vote_keypair, &mint_keypair); // Test that votes and credits are there let account = bank.get_account(&vote_pubkey).expect("account not found"); @@ -410,13 +426,13 @@ fn test_stake_account_lifetime() { // one vote per slot, might be more slots than 32 in the epoch assert!(vote_state.credits() >= 1); - bank = fill_epoch_with_votes(bank, &vote_keypair, &mint_keypair); + bank = fill_epoch_with_votes(bank, bank_forks.as_ref(), &vote_keypair, &mint_keypair); let pre_staked = get_staked(&bank, &stake_pubkey); let pre_balance = bank.get_balance(&stake_pubkey); // next epoch bank plus one additional slot should pay rewards - bank = next_epoch_and_n_slots(bank, 1); + bank = next_epoch_and_n_slots(bank, bank_forks.as_ref(), 1); // Test that balance increased, and that the balance got staked let staked = get_staked(&bank, &stake_pubkey); @@ -490,7 +506,7 @@ fn test_stake_account_lifetime() { .send_and_confirm_message(&[&mint_keypair, &stake_keypair], message) .is_err()); - let mut bank = next_epoch_and_n_slots(bank, 1); + let mut bank = next_epoch_and_n_slots(bank, bank_forks.as_ref(), 1); let bank_client = BankClient::new_shared(bank.clone()); @@ -536,7 +552,7 @@ fn test_stake_account_lifetime() { if get_staked(&bank, &split_stake_pubkey) == 0 { break; } - bank = next_epoch_and_n_slots(bank, 1); + bank = next_epoch_and_n_slots(bank, bank_forks.as_ref(), 1); } let bank_client = BankClient::new_shared(bank.clone()); @@ -577,9 +593,8 @@ fn test_create_stake_account_from_seed() { &solana_sdk::pubkey::new_rand(), 1_000_000, ); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let mint_pubkey = mint_keypair.pubkey(); - let bank = Arc::new(bank); let bank_client = BankClient::new_shared(bank.clone()); let seed = "test-string"; diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 8142c3012694b1..7897a24d1a1be7 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "1.73.0" +channel = "1.75.0" diff --git a/scripts/build-downstream-anchor-projects.sh b/scripts/build-downstream-anchor-projects.sh index 7702c59d93b517..de2860573ee145 100755 --- a/scripts/build-downstream-anchor-projects.sh +++ b/scripts/build-downstream-anchor-projects.sh @@ -9,8 +9,10 @@ source ci/_ source scripts/patch-crates.sh source scripts/read-cargo-variable.sh +anchor_version=$1 solana_ver=$(readCargoVariable version Cargo.toml) solana_dir=$PWD +cargo="$solana_dir"/cargo cargo_build_sbf="$solana_dir"/cargo-build-sbf cargo_test_sbf="$solana_dir"/cargo-test-sbf @@ -43,15 +45,22 @@ anchor() { set -x rm -rf anchor git clone https://github.com/coral-xyz/anchor.git + cd anchor || exit 1 + + # checkout tag + if [[ -n "$anchor_version" ]]; then + git checkout "$anchor_version" + fi + # copy toolchain file to use solana's rust version - cp "$solana_dir"/rust-toolchain.toml anchor/ - cd anchor + cp "$solana_dir"/rust-toolchain.toml . update_solana_dependencies . "$solana_ver" patch_crates_io_solana Cargo.toml "$solana_dir" - cargo build - cargo test + $cargo test + (cd spl && $cargo_build_sbf --features dex metadata stake) + (cd client && $cargo test --all-features) anchor_dir=$PWD anchor_ver=$(readCargoVariable version "$anchor_dir"/lang/Cargo.toml) @@ -73,8 +82,9 @@ mango() { patch_crates_io_solana Cargo.toml "$solana_dir" patch_crates_io_anchor Cargo.toml "$anchor_dir" - cargo build - cargo test + cd program + $cargo build + $cargo test $cargo_build_sbf $cargo_test_sbf ) @@ -83,19 +93,17 @@ mango() { metaplex() { ( set -x - rm -rf metaplex-program-library - git clone https://github.com/metaplex-foundation/metaplex-program-library - # copy toolchain file to use solana's rust version - cp "$solana_dir"/rust-toolchain.toml metaplex-program-library/ - cd metaplex-program-library + rm -rf mpl-token-metadata + git clone https://github.com/metaplex-foundation/mpl-token-metadata + # copy toolchain file to use solana's rust version + cp "$solana_dir"/rust-toolchain.toml mpl-token-metadata/ + cd mpl-token-metadata/programs/token-metadata/program update_solana_dependencies . "$solana_ver" - update_anchor_dependencies . "$anchor_ver" patch_crates_io_solana Cargo.toml "$solana_dir" - patch_crates_io_anchor Cargo.toml "$anchor_dir" - cargo build - cargo test + $cargo build + $cargo test $cargo_build_sbf $cargo_test_sbf ) diff --git a/scripts/cargo-clippy-nightly.sh b/scripts/cargo-clippy-nightly.sh new file mode 100755 index 00000000000000..5393225542e2f6 --- /dev/null +++ b/scripts/cargo-clippy-nightly.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +set -o errexit + +here="$(dirname "$0")" +cargo="$(readlink -f "${here}/../cargo")" + +if [[ -z $cargo ]]; then + echo >&2 "Failed to find cargo. Mac readlink doesn't support -f. Consider switching + to gnu readlink with 'brew install coreutils' and then symlink greadlink as + /usr/local/bin/readlink." + exit 1 +fi + +# shellcheck source=ci/rust-version.sh +source "$here/../ci/rust-version.sh" nightly + +# Use nightly clippy, as frozen-abi proc-macro generates a lot of code across +# various crates in this whole monorepo (frozen-abi is enabled only under nightly +# due to the use of unstable rust feature). Likewise, frozen-abi(-macro) crates' +# unit tests are only compiled under nightly. +# Similarly, nightly is desired to run clippy over all of bench files because +# the bench itself isn't stabilized yet... +# ref: https://github.com/rust-lang/rust/issues/66287 +"$here/cargo-for-all-lock-files.sh" -- \ + "+${rust_nightly}" clippy \ + --workspace --all-targets --features dummy-for-ci-check -- \ + --deny=warnings \ + --deny=clippy::default_trait_access \ + --deny=clippy::arithmetic_side_effects \ + --deny=clippy::manual_let_else \ + --deny=clippy::used_underscore_binding \ + --allow=clippy::redundant_clone diff --git a/scripts/cargo-clippy-stable.sh b/scripts/cargo-clippy-stable.sh new file mode 100755 index 00000000000000..ed564503e6ae2a --- /dev/null +++ b/scripts/cargo-clippy-stable.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +set -o errexit + +here="$(dirname "$0")" +cargo="$(readlink -f "${here}/../cargo")" + +if [[ -z $cargo ]]; then + >&2 echo "Failed to find cargo. Mac readlink doesn't support -f. Consider switching + to gnu readlink with 'brew install coreutils' and then symlink greadlink as + /usr/local/bin/readlink." + exit 1 +fi + +# shellcheck source=ci/rust-version.sh +source "$here/../ci/rust-version.sh" stable + +# temporarily run stable clippy as well to scan the codebase for +# `redundant_clone`s, which is disabled as nightly clippy is buggy: +# https://github.com/solana-labs/solana/issues/31834 +# +# can't use --all-targets: +# error[E0554]: `#![feature]` may not be used on the stable release channel +"$here/cargo-for-all-lock-files.sh" -- \ + clippy \ + --workspace --tests --bins --examples --features dummy-for-ci-check -- \ + --deny=warnings \ + --deny=clippy::default_trait_access \ + --deny=clippy::arithmetic_side_effects \ + --deny=clippy::manual_let_else \ + --deny=clippy::used_underscore_binding diff --git a/scripts/cargo-clippy.sh b/scripts/cargo-clippy.sh index 16419cb2cc944d..2be51de54fb324 100755 --- a/scripts/cargo-clippy.sh +++ b/scripts/cargo-clippy.sh @@ -14,48 +14,9 @@ set -o errexit here="$(dirname "$0")" -cargo="$(readlink -f "${here}/../cargo")" -if [[ -z $cargo ]]; then - >&2 echo "Failed to find cargo. Mac readlink doesn't support -f. Consider switching - to gnu readlink with 'brew install coreutils' and then symlink greadlink as - /usr/local/bin/readlink." - exit 1 -fi +# stable +"$here/cargo-clippy-stable.sh" -# shellcheck source=ci/rust-version.sh -source "$here/../ci/rust-version.sh" - -nightly_clippy_allows=(--allow=clippy::redundant_clone) - -# Use nightly clippy, as frozen-abi proc-macro generates a lot of code across -# various crates in this whole monorepo (frozen-abi is enabled only under nightly -# due to the use of unstable rust feature). Likewise, frozen-abi(-macro) crates' -# unit tests are only compiled under nightly. -# Similarly, nightly is desired to run clippy over all of bench files because -# the bench itself isn't stabilized yet... -# ref: https://github.com/rust-lang/rust/issues/66287 -"$here/cargo-for-all-lock-files.sh" -- \ - "+${rust_nightly}" clippy \ - --workspace --all-targets --features dummy-for-ci-check -- \ - --deny=warnings \ - --deny=clippy::default_trait_access \ - --deny=clippy::arithmetic_side_effects \ - --deny=clippy::manual_let_else \ - --deny=clippy::used_underscore_binding \ - "${nightly_clippy_allows[@]}" - -# temporarily run stable clippy as well to scan the codebase for -# `redundant_clone`s, which is disabled as nightly clippy is buggy: -# https://github.com/solana-labs/solana/issues/31834 -# -# can't use --all-targets: -# error[E0554]: `#![feature]` may not be used on the stable release channel -"$here/cargo-for-all-lock-files.sh" -- \ - clippy \ - --workspace --tests --bins --examples --features dummy-for-ci-check -- \ - --deny=warnings \ - --deny=clippy::default_trait_access \ - --deny=clippy::arithmetic_side_effects \ - --deny=clippy::manual_let_else \ - --deny=clippy::used_underscore_binding +# nightly +"$here/cargo-clippy-nightly.sh" diff --git a/scripts/cargo-install-all.sh b/scripts/cargo-install-all.sh index 4aceef69a4fe73..549aa15550b0eb 100755 --- a/scripts/cargo-install-all.sh +++ b/scripts/cargo-install-all.sh @@ -17,7 +17,8 @@ if [[ $OSTYPE == darwin* ]]; then fi fi -cargo="$("${readlink_cmd}" -f "${here}/../cargo")" +SOLANA_ROOT="$("${readlink_cmd}" -f "${here}/..")" +cargo="${SOLANA_ROOT}/cargo" set -e @@ -149,12 +150,15 @@ mkdir -p "$installDir/bin" # Exclude `spl-token` binary for net.sh builds if [[ -z "$validatorOnly" ]]; then + # shellcheck source=scripts/spl-token-cli-version.sh + source "$SOLANA_ROOT"/scripts/spl-token-cli-version.sh + # the patch-related configs are needed for rust 1.69+ on Windows; see Cargo.toml # shellcheck disable=SC2086 # Don't want to double quote $rust_version "$cargo" $maybeRustVersion \ --config 'patch.crates-io.ntapi.git="https://github.com/solana-labs/ntapi"' \ --config 'patch.crates-io.ntapi.rev="97ede981a1777883ff86d142b75024b023f04fad"' \ - install --locked spl-token-cli --root "$installDir" + install --locked spl-token-cli --root "$installDir" $maybeSplTokenCliVersionArg fi ) diff --git a/scripts/check-dev-context-only-utils.sh b/scripts/check-dev-context-only-utils.sh index 33bfbd00d8e4a5..cb405d78f77ca1 100755 --- a/scripts/check-dev-context-only-utils.sh +++ b/scripts/check-dev-context-only-utils.sh @@ -29,6 +29,7 @@ source ci/rust-version.sh nightly # reason to bend dev-context-only-utils's original intention and that listed # package isn't part of released binaries. declare tainted_packages=( + solana-accounts-bench solana-banking-bench solana-ledger-tool ) diff --git a/scripts/coverage-in-disk.sh b/scripts/coverage-in-disk.sh index 50d3e0ac98d317..a6d8e34814130f 100755 --- a/scripts/coverage-in-disk.sh +++ b/scripts/coverage-in-disk.sh @@ -34,7 +34,7 @@ fi coverageFlags=() coverageFlags+=(-Zprofile) # Enable coverage -coverageFlags+=("-Aincomplete_features") # Supress warnings due to frozen abi, which is harmless for it +coverageFlags+=("-Aincomplete_features") # Suppress warnings due to frozen abi, which is harmless for it if [[ $(uname) != Darwin ]]; then # macOS skipped due to https://github.com/rust-lang/rust/issues/63047 coverageFlags+=("-Clink-dead-code") # Dead code should appear red in the report fi diff --git a/scripts/coverage.sh b/scripts/coverage.sh index 93a9afbe33eb8f..d1ed8c752cf8a2 100755 --- a/scripts/coverage.sh +++ b/scripts/coverage.sh @@ -34,7 +34,7 @@ fi coverageFlags=() coverageFlags+=(-Zprofile) # Enable coverage -coverageFlags+=("-Aincomplete_features") # Supress warnings due to frozen abi, which is harmless for it +coverageFlags+=("-Aincomplete_features") # Suppress warnings due to frozen abi, which is harmless for it if [[ $(uname) != Darwin ]]; then # macOS skipped due to https://github.com/rust-lang/rust/issues/63047 coverageFlags+=("-Clink-dead-code") # Dead code should appear red in the report fi diff --git a/scripts/patch-crates.sh b/scripts/patch-crates.sh index 813a0a32a6175c..91a3010c8a0bd7 100644 --- a/scripts/patch-crates.sh +++ b/scripts/patch-crates.sh @@ -7,11 +7,15 @@ update_solana_dependencies() { while IFS='' read -r line; do tomls+=("$line"); done < <(find "$project_root" -name Cargo.toml) sed -i -e "s#\(solana-program = \"\)[^\"]*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(solana-program = { version = \"\)[^\"]*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? sed -i -e "s#\(solana-program-test = \"\)[^\"]*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(solana-program-test = { version = \"\)[^\"]*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? sed -i -e "s#\(solana-sdk = \"\).*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? sed -i -e "s#\(solana-sdk = { version = \"\)[^\"]*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? sed -i -e "s#\(solana-client = \"\)[^\"]*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? sed -i -e "s#\(solana-client = { version = \"\)[^\"]*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(solana-cli-config = \"\)[^\"]*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(solana-cli-config = { version = \"\)[^\"]*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? sed -i -e "s#\(solana-clap-utils = \"\)[^\"]*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? sed -i -e "s#\(solana-clap-utils = { version = \"\)[^\"]*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? sed -i -e "s#\(solana-account-decoder = \"\)[^\"]*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? @@ -30,6 +34,7 @@ patch_crates_io_solana() { solana-account-decoder = { path = "$solana_dir/account-decoder" } solana-clap-utils = { path = "$solana_dir/clap-utils" } solana-client = { path = "$solana_dir/client" } +solana-cli-config = { path = "$solana_dir/cli-config" } solana-program = { path = "$solana_dir/sdk/program" } solana-program-test = { path = "$solana_dir/program-test" } solana-sdk = { path = "$solana_dir/sdk" } diff --git a/scripts/sed-i-all-rs-files-for-rust-analyzer.sh b/scripts/sed-i-all-rs-files-for-rust-analyzer.sh index 4c14819a64d846..c8b4d7d173b3a9 100755 --- a/scripts/sed-i-all-rs-files-for-rust-analyzer.sh +++ b/scripts/sed-i-all-rs-files-for-rust-analyzer.sh @@ -6,7 +6,7 @@ set -e # so, here's some wild hack from ryoqun! if [[ $1 = "doit" ]]; then - # it's true that we put true just for truely-aligned lines + # it's true that we put true just for truly-aligned lines # shellcheck disable=SC2046 # our rust files are sanely named with no need to escape true && sed -i -e 's/#\[cfg(test)\]/#[cfg(escaped_cfg_test)]/g' $(git ls-files :**.rs :^**/build.rs) && diff --git a/scripts/spl-token-cli-version.sh b/scripts/spl-token-cli-version.sh new file mode 100644 index 00000000000000..82559c5bc4bbb6 --- /dev/null +++ b/scripts/spl-token-cli-version.sh @@ -0,0 +1,8 @@ +# populate this on the stable branch +splTokenCliVersion= + +maybeSplTokenCliVersionArg= +if [[ -n "$splTokenCliVersion" ]]; then + # shellcheck disable=SC2034 + maybeSplTokenCliVersionArg="--version $splTokenCliVersion" +fi diff --git a/scripts/system-stats.sh b/scripts/system-stats.sh index 08e27506b26697..12c72ee1e4db24 100755 --- a/scripts/system-stats.sh +++ b/scripts/system-stats.sh @@ -12,11 +12,11 @@ source scripts/configure-metrics.sh while true; do # collect top twice because the first time is inaccurate - top_ouput="$(top -bn2 -d1)" + top_output="$(top -bn2 -d1)" # collect the total cpu usage by subtracting idle usage from 100% - cpu_usage=$(echo "${top_ouput}" | grep '%Cpu(s):' | sed "s/.*, *\([0-9.]*\)%* id.*/\1/" | tail -1 | awk '{print 100 - $1}') + cpu_usage=$(echo "${top_output}" | grep '%Cpu(s):' | sed "s/.*, *\([0-9.]*\)%* id.*/\1/" | tail -1 | awk '{print 100 - $1}') # collect the total ram usage by dividing used memory / total memory - ram_total_and_usage=$(echo "${top_ouput}" | grep '.*B Mem'| tail -1 | sed "s/.*: *\([0-9.]*\)%* total.*, *\([0-9.]*\)%* used.*/\1 \2/") + ram_total_and_usage=$(echo "${top_output}" | grep '.*B Mem'| tail -1 | sed "s/.*: *\([0-9.]*\)%* total.*, *\([0-9.]*\)%* used.*/\1 \2/") read -r total used <<< "$ram_total_and_usage" ram_usage=$(awk "BEGIN {print $used / $total * 100}") cpu_report="cpu_usage=$cpu_usage,ram_usage=$ram_usage" diff --git a/sdk/README.md b/sdk/README.md index f12b3bfc19186b..e43ee613a6b9f6 100644 --- a/sdk/README.md +++ b/sdk/README.md @@ -8,7 +8,7 @@ Use the Solana SDK Crate to write client side applications in Rust. If writing on-chain programs, use the [Solana Program Crate](https://crates.io/crates/solana-program) instead. -More information about Solana is available in the [Solana documentation](https://docs.solana.com/). +More information about Solana is available in the [Solana documentation](https://solana.com/docs). The [Solana Program Library](https://github.com/solana-labs/solana-program-library) provides examples of how to use this crate. diff --git a/sdk/cargo-build-bpf/src/main.rs b/sdk/cargo-build-bpf/src/main.rs index a003a0b91cab67..3635901eeceb4d 100644 --- a/sdk/cargo-build-bpf/src/main.rs +++ b/sdk/cargo-build-bpf/src/main.rs @@ -16,7 +16,7 @@ fn main() { s.replace("--bpf", "--sbf") }) .collect::>(); - let program = if let Some(arg0) = args.get(0) { + let program = if let Some(arg0) = args.first() { let arg0 = arg0.replace("build-bpf", "build-sbf"); args.remove(0); PathBuf::from(arg0) @@ -25,7 +25,7 @@ fn main() { }; // When run as a cargo subcommand, the first program argument is the subcommand name. // Remove it - if let Some(arg0) = args.get(0) { + if let Some(arg0) = args.first() { if arg0 == "build-bpf" { args.remove(0); } diff --git a/sdk/cargo-build-sbf/Cargo.toml b/sdk/cargo-build-sbf/Cargo.toml index 0d96c4f94ebc53..36ce44d0d5c7d3 100644 --- a/sdk/cargo-build-sbf/Cargo.toml +++ b/sdk/cargo-build-sbf/Cargo.toml @@ -27,6 +27,7 @@ tar = { workspace = true } assert_cmd = { workspace = true } predicates = { workspace = true } serial_test = { workspace = true } +solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } [features] program = [] diff --git a/sdk/cargo-test-bpf/src/main.rs b/sdk/cargo-test-bpf/src/main.rs index 3befa78779318e..21b8735787de19 100644 --- a/sdk/cargo-test-bpf/src/main.rs +++ b/sdk/cargo-test-bpf/src/main.rs @@ -16,7 +16,7 @@ fn main() { let cargo_build_sbf = cargo_build_bpf.replace("build-bpf", "build-sbf"); env::set_var("CARGO_BUILD_SBF", cargo_build_sbf); } - let program = if let Some(arg0) = args.get(0) { + let program = if let Some(arg0) = args.first() { let cargo_test_sbf = arg0.replace("test-bpf", "test-sbf"); let cargo_build_sbf = cargo_test_sbf.replace("test-sbf", "build-sbf"); env::set_var("CARGO_BUILD_SBF", cargo_build_sbf); @@ -27,7 +27,7 @@ fn main() { }; // When run as a cargo subcommand, the first program argument is the subcommand name. // Remove it - if let Some(arg0) = args.get(0) { + if let Some(arg0) = args.first() { if arg0 == "test-bpf" { args.remove(0); } diff --git a/sdk/macro/src/lib.rs b/sdk/macro/src/lib.rs index f72dcdfcf8eb2f..157592dc37bcaa 100644 --- a/sdk/macro/src/lib.rs +++ b/sdk/macro/src/lib.rs @@ -430,7 +430,6 @@ pub fn derive_clone_zeroed(input: proc_macro::TokenStream) -> proc_macro::TokenS // implementations on `Copy` types are simply wrappers of `Copy`. // This is not the case here, and intentionally so because we want to // guarantee zeroed padding. - #[allow(clippy::incorrect_clone_impl_on_copy_type)] fn clone(&self) -> Self { let mut value = std::mem::MaybeUninit::::uninit(); unsafe { diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index f608ed61943826..ccd18701eefcc4 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -15,6 +15,7 @@ rust-version = "1.72.0" # solana platform-tools rust version bincode = { workspace = true } blake3 = { workspace = true, features = ["digest", "traits-preview"] } borsh = { workspace = true } +borsh0-10 = { package = "borsh", version = "0.10.3" } borsh0-9 = { package = "borsh", version = "0.9.3" } bs58 = { workspace = true } bv = { workspace = true, features = ["serde"] } diff --git a/sdk/program/README.md b/sdk/program/README.md index 226a3318adc0a8..c93d4a1431eda1 100644 --- a/sdk/program/README.md +++ b/sdk/program/README.md @@ -8,7 +8,7 @@ Use the Solana Program Crate to write on-chain programs in Rust. If writing client-side applications, use the [Solana SDK Crate](https://crates.io/crates/solana-sdk) instead. -More information about Solana is available in the [Solana documentation](https://docs.solana.com/). +More information about Solana is available in the [Solana documentation](https://solana.com/docs). [Solana Program Library](https://github.com/solana-labs/solana-program-library) provides examples of how to use this crate. diff --git a/sdk/program/src/address_lookup_table/mod.rs b/sdk/program/src/address_lookup_table/mod.rs index c7a712e4592df3..05a9f8cd1e96e1 100644 --- a/sdk/program/src/address_lookup_table/mod.rs +++ b/sdk/program/src/address_lookup_table/mod.rs @@ -1,6 +1,6 @@ //! The [address lookup table program][np]. //! -//! [np]: https://docs.solana.com/developing/runtime-facilities/programs#address-lookup-table-program +//! [np]: https://docs.solanalabs.com/runtime/programs#address-lookup-table-program pub mod error; pub mod instruction; diff --git a/sdk/program/src/alt_bn128/mod.rs b/sdk/program/src/alt_bn128/mod.rs index 9e60048e4c8889..f214157152c114 100644 --- a/sdk/program/src/alt_bn128/mod.rs +++ b/sdk/program/src/alt_bn128/mod.rs @@ -181,9 +181,7 @@ mod target_arch { let result_point = p + q; let mut result_point_data = [0u8; ALT_BN128_ADDITION_OUTPUT_LEN]; - let result_point_affine: G1 = result_point - .try_into() - .map_err(|_| AltBn128Error::ProjectiveToG1Failed)?; + let result_point_affine: G1 = result_point.into(); result_point_affine .x .serialize_with_mode(&mut result_point_data[..32], Compress::No) diff --git a/sdk/program/src/blake3.rs b/sdk/program/src/blake3.rs index d8351b06c6ad0d..cc50318e336c41 100644 --- a/sdk/program/src/blake3.rs +++ b/sdk/program/src/blake3.rs @@ -31,6 +31,7 @@ const MAX_BASE58_LEN: usize = 44; Hash, AbiExample, )] +#[borsh(crate = "borsh")] #[repr(transparent)] pub struct Hash(pub [u8; HASH_BYTES]); diff --git a/sdk/program/src/borsh.rs b/sdk/program/src/borsh.rs index 90ce42f661f82f..0041aa80602946 100644 --- a/sdk/program/src/borsh.rs +++ b/sdk/program/src/borsh.rs @@ -8,7 +8,7 @@ //! be removed in a future release //! //! [borsh]: https://borsh.io/ -use borsh::{maybestd::io::Error, BorshDeserialize, BorshSchema, BorshSerialize}; +use borsh0_10::{maybestd::io::Error, BorshDeserialize, BorshSchema, BorshSerialize}; /// Get the worst-case packed length for the given BorshSchema /// @@ -19,6 +19,7 @@ use borsh::{maybestd::io::Error, BorshDeserialize, BorshSchema, BorshSerialize}; note = "Please use `borsh0_10::get_packed_len` instead" )] pub fn get_packed_len() -> usize { + #[allow(deprecated)] crate::borsh0_10::get_packed_len::() } @@ -36,6 +37,7 @@ pub fn get_packed_len() -> usize { note = "Please use `borsh0_10::try_from_slice_unchecked` instead" )] pub fn try_from_slice_unchecked(data: &[u8]) -> Result { + #[allow(deprecated)] crate::borsh0_10::try_from_slice_unchecked::(data) } @@ -50,10 +52,11 @@ pub fn try_from_slice_unchecked(data: &[u8]) -> Result(instance: &T) -> Result { + #[allow(deprecated)] crate::borsh0_10::get_instance_packed_len(instance) } -macro_rules! impl_get_packed_len { +macro_rules! impl_get_packed_len_v0 { ($borsh:ident $(,#[$meta:meta])?) => { /// Get the worst-case packed length for the given BorshSchema /// @@ -113,10 +116,72 @@ macro_rules! impl_get_packed_len { } } } -pub(crate) use impl_get_packed_len; +pub(crate) use impl_get_packed_len_v0; -macro_rules! impl_try_from_slice_unchecked { +macro_rules! impl_get_packed_len_v1 { ($borsh:ident $(,#[$meta:meta])?) => { + /// Get the worst-case packed length for the given BorshSchema + /// + /// Note: due to the serializer currently used by Borsh, this function cannot + /// be used on-chain in the Solana SBF execution environment. + $(#[$meta])? + pub fn get_packed_len() -> usize { + let container = $borsh::schema_container_of::(); + get_declaration_packed_len(container.declaration(), &container) + } + + /// Get packed length for the given BorshSchema Declaration + fn get_declaration_packed_len( + declaration: &str, + container: &$borsh::schema::BorshSchemaContainer, + ) -> usize { + match container.get_definition(declaration) { + Some($borsh::schema::Definition::Sequence { length_width, length_range, elements }) if *length_width == 0 => { + *length_range.end() as usize * get_declaration_packed_len(elements, container) + } + Some($borsh::schema::Definition::Enum { tag_width, variants }) => { + (*tag_width as usize) + variants + .iter() + .map(|(_, _, declaration)| get_declaration_packed_len(declaration, container)) + .max() + .unwrap_or(0) + } + Some($borsh::schema::Definition::Struct { fields }) => match fields { + $borsh::schema::Fields::NamedFields(named_fields) => named_fields + .iter() + .map(|(_, declaration)| get_declaration_packed_len(declaration, container)) + .sum(), + $borsh::schema::Fields::UnnamedFields(declarations) => declarations + .iter() + .map(|declaration| get_declaration_packed_len(declaration, container)) + .sum(), + $borsh::schema::Fields::Empty => 0, + }, + Some($borsh::schema::Definition::Sequence { + .. + }) => panic!("Missing support for Definition::Sequence"), + Some($borsh::schema::Definition::Tuple { elements }) => elements + .iter() + .map(|element| get_declaration_packed_len(element, container)) + .sum(), + Some($borsh::schema::Definition::Primitive(size)) => *size as usize, + None => match declaration { + "bool" | "u8" | "i8" => 1, + "u16" | "i16" => 2, + "u32" | "i32" => 4, + "u64" | "i64" => 8, + "u128" | "i128" => 16, + "nil" => 0, + _ => panic!("Missing primitive type: {declaration}"), + }, + } + } + } +} +pub(crate) use impl_get_packed_len_v1; + +macro_rules! impl_try_from_slice_unchecked { + ($borsh:ident, $borsh_io:ident $(,#[$meta:meta])?) => { /// Deserializes without checking that the entire slice has been consumed /// /// Normally, `try_from_slice` checks the length of the final slice to ensure @@ -127,7 +192,7 @@ macro_rules! impl_try_from_slice_unchecked { /// user passes a buffer destined for a different type, the error won't get caught /// as easily. $(#[$meta])? - pub fn try_from_slice_unchecked(data: &[u8]) -> Result { + pub fn try_from_slice_unchecked(data: &[u8]) -> Result { let mut data_mut = data; let result = T::deserialize(&mut data_mut)?; Ok(result) @@ -137,21 +202,21 @@ macro_rules! impl_try_from_slice_unchecked { pub(crate) use impl_try_from_slice_unchecked; macro_rules! impl_get_instance_packed_len { - ($borsh:ident $(,#[$meta:meta])?) => { + ($borsh:ident, $borsh_io:ident $(,#[$meta:meta])?) => { /// Helper struct which to count how much data would be written during serialization #[derive(Default)] struct WriteCounter { count: usize, } - impl $borsh::maybestd::io::Write for WriteCounter { - fn write(&mut self, data: &[u8]) -> Result { + impl $borsh_io::Write for WriteCounter { + fn write(&mut self, data: &[u8]) -> Result { let amount = data.len(); self.count += amount; Ok(amount) } - fn flush(&mut self) -> Result<(), $borsh::maybestd::io::Error> { + fn flush(&mut self) -> Result<(), $borsh_io::Error> { Ok(()) } } @@ -163,7 +228,7 @@ macro_rules! impl_get_instance_packed_len { /// length only from the type's schema, this can be used when an instance already /// exists, to figure out how much space to allocate in an account. $(#[$meta])? - pub fn get_instance_packed_len(instance: &T) -> Result { + pub fn get_instance_packed_len(instance: &T) -> Result { let mut counter = WriteCounter::default(); instance.serialize(&mut counter)?; Ok(counter.count) @@ -174,11 +239,13 @@ pub(crate) use impl_get_instance_packed_len; #[cfg(test)] macro_rules! impl_tests { - ($borsh:ident) => { + ($borsh:ident, $borsh_io:ident) => { + extern crate alloc; use { super::*, std::{collections::HashMap, mem::size_of}, - $borsh::{maybestd::io::ErrorKind, BorshDeserialize, BorshSerialize}, + $borsh::{BorshDeserialize, BorshSerialize}, + $borsh_io::ErrorKind, }; type Child = [u8; 64]; diff --git a/sdk/program/src/borsh0_10.rs b/sdk/program/src/borsh0_10.rs index f29640885e14d6..c7d190f820b366 100644 --- a/sdk/program/src/borsh0_10.rs +++ b/sdk/program/src/borsh0_10.rs @@ -2,16 +2,40 @@ //! Utilities for the [borsh] serialization format, version 0.10. //! //! [borsh]: https://borsh.io/ -use crate::borsh::{ - impl_get_instance_packed_len, impl_get_packed_len, impl_try_from_slice_unchecked, +use { + crate::borsh::{ + impl_get_instance_packed_len, impl_get_packed_len_v0, impl_try_from_slice_unchecked, + }, + borsh0_10::maybestd::io, }; -impl_get_packed_len!(borsh); -impl_try_from_slice_unchecked!(borsh); -impl_get_instance_packed_len!(borsh); +impl_get_packed_len_v0!( + borsh0_10, + #[deprecated( + since = "1.18.0", + note = "Please upgrade to Borsh 1.X and use `borsh1::get_packed_len` instead" + )] +); +impl_try_from_slice_unchecked!( + borsh0_10, + io, + #[deprecated( + since = "1.18.0", + note = "Please upgrade to Borsh 1.X and use `borsh1::try_from_slice_unchecked` instead" + )] +); +impl_get_instance_packed_len!( + borsh0_10, + io, + #[deprecated( + since = "1.18.0", + note = "Please upgrade to Borsh 1.X and use `borsh1::get_instance_packed_len` instead" + )] +); #[cfg(test)] +#[allow(deprecated)] mod tests { - use crate::borsh::impl_tests; - impl_tests!(borsh); + use {crate::borsh::impl_tests, borsh0_10::maybestd::io}; + impl_tests!(borsh0_10, io); } diff --git a/sdk/program/src/borsh0_9.rs b/sdk/program/src/borsh0_9.rs index dd9e401db189c9..d7d1e97013f898 100644 --- a/sdk/program/src/borsh0_9.rs +++ b/sdk/program/src/borsh0_9.rs @@ -5,35 +5,40 @@ //! borsh 0.9, even though this crate canonically uses borsh 0.10. //! //! [borsh]: https://borsh.io/ -use crate::borsh::{ - impl_get_instance_packed_len, impl_get_packed_len, impl_try_from_slice_unchecked, +use { + crate::borsh::{ + impl_get_instance_packed_len, impl_get_packed_len_v0, impl_try_from_slice_unchecked, + }, + borsh0_9::maybestd::io, }; -impl_get_packed_len!( +impl_get_packed_len_v0!( borsh0_9, #[deprecated( since = "1.17.0", - note = "Please upgrade to Borsh 0.10 and use `borsh0_10::get_packed_len` instead" + note = "Please upgrade to Borsh 1.X and use `borsh1::get_packed_len` instead" )] ); impl_try_from_slice_unchecked!( borsh0_9, + io, #[deprecated( since = "1.17.0", - note = "Please upgrade to Borsh 0.10 and use `borsh0_10::try_from_slice_unchecked` instead" + note = "Please upgrade to Borsh 1.X and use `borsh1::try_from_slice_unchecked` instead" )] ); impl_get_instance_packed_len!( borsh0_9, + io, #[deprecated( since = "1.17.0", - note = "Please upgrade to Borsh 0.10 and use `borsh0_10::get_instance_packed_len` instead" + note = "Please upgrade to Borsh 1.X and use `borsh1::get_instance_packed_len` instead" )] ); #[cfg(test)] #[allow(deprecated)] mod tests { - use crate::borsh::impl_tests; - impl_tests!(borsh0_9); + use {crate::borsh::impl_tests, borsh0_9::maybestd::io}; + impl_tests!(borsh0_9, io); } diff --git a/sdk/program/src/borsh1.rs b/sdk/program/src/borsh1.rs new file mode 100644 index 00000000000000..a44ea522494232 --- /dev/null +++ b/sdk/program/src/borsh1.rs @@ -0,0 +1,20 @@ +#![allow(clippy::arithmetic_side_effects)] +//! Utilities for the [borsh] serialization format, version 1. +//! +//! [borsh]: https://borsh.io/ +use { + crate::borsh::{ + impl_get_instance_packed_len, impl_get_packed_len_v1, impl_try_from_slice_unchecked, + }, + borsh::io, +}; + +impl_get_packed_len_v1!(borsh); +impl_try_from_slice_unchecked!(borsh, io); +impl_get_instance_packed_len!(borsh, io); + +#[cfg(test)] +mod tests { + use {crate::borsh::impl_tests, borsh::io}; + impl_tests!(borsh, io); +} diff --git a/sdk/program/src/clock.rs b/sdk/program/src/clock.rs index e988bafb21d354..e19c4c84486ced 100644 --- a/sdk/program/src/clock.rs +++ b/sdk/program/src/clock.rs @@ -18,7 +18,7 @@ //! [`Clock::unix_timestamp`], which is produced by an [oracle derived from the //! validator set][oracle]. //! -//! [oracle]: https://docs.solana.com/implemented-proposals/validator-timestamp-oracle +//! [oracle]: https://docs.solanalabs.com/implemented-proposals/validator-timestamp-oracle use solana_sdk_macro::CloneZeroed; @@ -194,8 +194,8 @@ pub struct Clock { /// the [`timestamp_correction` and `timestamp_bounding`][tsc] features it /// is calculated using a [validator timestamp oracle][oracle]. /// - /// [tsc]: https://docs.solana.com/implemented-proposals/bank-timestamp-correction - /// [oracle]: https://docs.solana.com/implemented-proposals/validator-timestamp-oracle + /// [tsc]: https://docs.solanalabs.com/implemented-proposals/bank-timestamp-correction + /// [oracle]: https://docs.solanalabs.com/implemented-proposals/validator-timestamp-oracle pub unix_timestamp: UnixTimestamp, } diff --git a/sdk/program/src/ed25519_program.rs b/sdk/program/src/ed25519_program.rs index e104a794033cb5..651761c869be56 100644 --- a/sdk/program/src/ed25519_program.rs +++ b/sdk/program/src/ed25519_program.rs @@ -1,5 +1,5 @@ //! The [ed25519 native program][np]. //! -//! [np]: https://docs.solana.com/developing/runtime-facilities/programs#ed25519-program +//! [np]: https://docs.solanalabs.com/runtime/programs#ed25519-program crate::declare_id!("Ed25519SigVerify111111111111111111111111111"); diff --git a/sdk/program/src/entrypoint.rs b/sdk/program/src/entrypoint.rs index a2fba66da2dfe4..d0d579411dcfe8 100644 --- a/sdk/program/src/entrypoint.rs +++ b/sdk/program/src/entrypoint.rs @@ -146,7 +146,7 @@ macro_rules! entrypoint { /// for [BPF] targets. /// /// [Cargo features]: https://doc.rust-lang.org/cargo/reference/features.html -/// [BPF]: https://docs.solana.com/developing/on-chain-programs/overview#berkeley-packet-filter-bpf +/// [BPF]: https://solana.com/docs/programs/faq#berkeley-packet-filter-bpf /// /// # Cargo features /// @@ -181,7 +181,7 @@ macro_rules! custom_heap_default { /// for [BPF] targets. /// /// [Cargo features]: https://doc.rust-lang.org/cargo/reference/features.html -/// [BPF]: https://docs.solana.com/developing/on-chain-programs/overview#berkeley-packet-filter-bpf +/// [BPF]: https://solana.com/docs/programs/faq#berkeley-packet-filter-bpf /// /// # Cargo features /// diff --git a/sdk/program/src/epoch_rewards.rs b/sdk/program/src/epoch_rewards.rs index 24bb596637b2af..525caf5f1f9c68 100644 --- a/sdk/program/src/epoch_rewards.rs +++ b/sdk/program/src/epoch_rewards.rs @@ -1,6 +1,6 @@ //! A type to hold data for the [`EpochRewards` sysvar][sv]. //! -//! [sv]: https://docs.solana.com/developing/runtime-facilities/sysvars#EpochRewards +//! [sv]: https://docs.solanalabs.com/runtime/sysvars#epochrewards //! //! The sysvar ID is declared in [`sysvar::epoch_rewards`]. //! diff --git a/sdk/program/src/epoch_schedule.rs b/sdk/program/src/epoch_schedule.rs index 672b0f15359f6e..cd3fa59c6d9cd3 100644 --- a/sdk/program/src/epoch_schedule.rs +++ b/sdk/program/src/epoch_schedule.rs @@ -4,7 +4,7 @@ //! [leader schedule][ls] is in effect. The epoch schedule determines the length //! of epochs, and the timing of the next leader-schedule selection. //! -//! [ls]: https://docs.solana.com/cluster/leader-rotation#leader-schedule-rotation +//! [ls]: https://docs.solanalabs.com/consensus/leader-rotation#leader-schedule-rotation //! //! The epoch schedule does not change during the life of a blockchain, //! though the length of an epoch does — during the initial launch of @@ -29,7 +29,7 @@ pub const MAX_LEADER_SCHEDULE_EPOCH_OFFSET: u64 = 3; pub const MINIMUM_SLOTS_PER_EPOCH: u64 = 32; #[repr(C)] -#[derive(Debug, CloneZeroed, Copy, PartialEq, Eq, Deserialize, Serialize, AbiExample)] +#[derive(Debug, CloneZeroed, PartialEq, Eq, Deserialize, Serialize, AbiExample)] #[serde(rename_all = "camelCase")] pub struct EpochSchedule { /// The maximum number of slots in each epoch. diff --git a/sdk/program/src/hash.rs b/sdk/program/src/hash.rs index 27d481b62b5441..288f696df31b93 100644 --- a/sdk/program/src/hash.rs +++ b/sdk/program/src/hash.rs @@ -46,6 +46,7 @@ const MAX_BASE58_LEN: usize = 44; Pod, Zeroable, )] +#[borsh(crate = "borsh")] #[repr(transparent)] pub struct Hash(pub(crate) [u8; HASH_BYTES]); diff --git a/sdk/program/src/instruction.rs b/sdk/program/src/instruction.rs index e68fc198a36642..db26af5ad04fde 100644 --- a/sdk/program/src/instruction.rs +++ b/sdk/program/src/instruction.rs @@ -274,7 +274,7 @@ pub enum InstructionError { /// clients. Instructions are also used to describe [cross-program /// invocations][cpi]. /// -/// [cpi]: https://docs.solana.com/developing/programming-model/calling-between-programs +/// [cpi]: https://solana.com/docs/core/cpi /// /// During execution, a program will receive a list of account data as one of /// its arguments, in the same order as specified during `Instruction` @@ -347,7 +347,7 @@ impl Instruction { /// `program_id` is the address of the program that will execute the instruction. /// `accounts` contains a description of all accounts that may be accessed by the program. /// - /// Borsh serialization is often prefered over bincode as it has a stable + /// Borsh serialization is often preferred over bincode as it has a stable /// [specification] and an [implementation in JavaScript][jsb], neither of /// which are true of bincode. /// @@ -364,6 +364,7 @@ impl Instruction { /// # use borsh::{BorshSerialize, BorshDeserialize}; /// # /// #[derive(BorshSerialize, BorshDeserialize)] + /// # #[borsh(crate = "borsh")] /// pub struct MyInstruction { /// pub lamports: u64, /// } @@ -391,7 +392,7 @@ impl Instruction { data: &T, accounts: Vec, ) -> Self { - let data = data.try_to_vec().unwrap(); + let data = borsh::to_vec(data).unwrap(); Self { program_id, accounts, @@ -466,10 +467,10 @@ impl Instruction { /// # pubkey::Pubkey, /// # instruction::{AccountMeta, Instruction}, /// # }; - /// # use borsh::{BorshSerialize, BorshDeserialize}; - /// # use anyhow::Result; + /// # use borsh::{io::Error, BorshSerialize, BorshDeserialize}; /// # /// #[derive(BorshSerialize, BorshDeserialize)] + /// # #[borsh(crate = "borsh")] /// pub struct MyInstruction { /// pub lamports: u64, /// } @@ -479,7 +480,7 @@ impl Instruction { /// from: &Pubkey, /// to: &Pubkey, /// lamports: u64, - /// ) -> Result { + /// ) -> Result { /// let instr = MyInstruction { lamports }; /// /// let mut instr_in_bytes: Vec = Vec::new(); @@ -558,6 +559,7 @@ impl AccountMeta { /// # use borsh::{BorshSerialize, BorshDeserialize}; /// # /// # #[derive(BorshSerialize, BorshDeserialize)] + /// # #[borsh(crate = "borsh")] /// # pub struct MyInstruction; /// # /// # let instruction = MyInstruction; @@ -593,6 +595,7 @@ impl AccountMeta { /// # use borsh::{BorshSerialize, BorshDeserialize}; /// # /// # #[derive(BorshSerialize, BorshDeserialize)] + /// # #[borsh(crate = "borsh")] /// # pub struct MyInstruction; /// # /// # let instruction = MyInstruction; diff --git a/sdk/program/src/keccak.rs b/sdk/program/src/keccak.rs index 17829485c2bdac..6a1cfaf1113b7b 100644 --- a/sdk/program/src/keccak.rs +++ b/sdk/program/src/keccak.rs @@ -29,6 +29,7 @@ const MAX_BASE58_LEN: usize = 44; Hash, AbiExample, )] +#[borsh(crate = "borsh")] #[repr(transparent)] pub struct Hash(pub [u8; HASH_BYTES]); diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index f9b731e6384c61..c6d5a680ff52a8 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -8,7 +8,7 @@ //! [`solana-sdk`] crate, which reexports all modules from `solana-program`. //! //! [std]: https://doc.rust-lang.org/stable/std/ -//! [sstd]: https://docs.solana.com/developing/on-chain-programs/developing-rust#restrictions +//! [sstd]: https://solana.com/docs/programs/lang-rust#restrictions //! [`solana-sdk`]: https://docs.rs/solana-sdk/latest/solana_sdk/ //! //! This library defines @@ -148,7 +148,7 @@ //! For a more complete description of Solana's implementation of eBPF and its //! limitations, see the main Solana documentation for [on-chain programs][ocp]. //! -//! [ocp]: https://docs.solana.com/developing/on-chain-programs/overview +//! [ocp]: https://solana.com/docs/programs //! //! # Core data types //! @@ -173,7 +173,7 @@ //! [_lamports_], the smallest fractional unit of SOL, in the [`native_token`] //! module. //! -//! [acc]: https://docs.solana.com/developing/programming-model/accounts +//! [acc]: https://solana.com/docs/core/accounts //! [`Pubkey`]: pubkey::Pubkey //! [`Hash`]: hash::Hash //! [`Instruction`]: instruction::Instruction @@ -184,7 +184,7 @@ //! [`Keypair`]: https://docs.rs/solana-sdk/latest/solana_sdk/signer/keypair/struct.Keypair.html //! [SHA-256]: https://en.wikipedia.org/wiki/SHA-2 //! [`Sol`]: native_token::Sol -//! [_lamports_]: https://docs.solana.com/introduction#what-are-sols +//! [_lamports_]: https://solana.com/docs/intro#what-are-sols //! //! # Serialization //! @@ -272,7 +272,7 @@ //! //! [`invoke`]: program::invoke //! [`invoke_signed`]: program::invoke_signed -//! [cpi]: https://docs.solana.com/developing/programming-model/calling-between-programs +//! [cpi]: https://solana.com/docs/core/cpi //! //! A simple example of transferring lamports via CPI: //! @@ -319,7 +319,7 @@ //! `invoke_signed` to call another program while virtually "signing" for the //! PDA. //! -//! [pdas]: https://docs.solana.com/developing/programming-model/calling-between-programs#program-derived-addresses +//! [pdas]: https://solana.com/docs/core/cpi#program-derived-addresses //! [`Pubkey::find_program_address`]: pubkey::Pubkey::find_program_address //! //! A simple example of creating an account for a PDA: @@ -391,7 +391,7 @@ //! Some solana programs are [_native programs_][np2], running native machine //! code that is distributed with the runtime, with well-known program IDs. //! -//! [np2]: https://docs.solana.com/developing/runtime-facilities/programs +//! [np2]: https://docs.solanalabs.com/runtime/programs //! //! Some native programs can be [invoked][cpi] by other programs, but some can //! only be executed as "top-level" instructions included by off-chain clients @@ -416,7 +416,7 @@ //! active on any particular network. The `solana feature status` CLI command //! can help in determining active features. //! -//! [slot]: https://docs.solana.com/terminology#slot +//! [slot]: https://solana.com/docs/terminology#slot //! //! Native programs important to Solana program authors include: //! @@ -461,7 +461,7 @@ //! - Instruction: [`solana_program::loader_instruction`] //! - Invokable by programs? yes //! -//! [lut]: https://docs.solana.com/proposals/versioned-transactions +//! [lut]: https://docs.solanalabs.com/proposals/versioned-transactions #![allow(incomplete_features)] #![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))] @@ -479,6 +479,7 @@ pub mod blake3; pub mod borsh; pub mod borsh0_10; pub mod borsh0_9; +pub mod borsh1; pub mod bpf_loader; pub mod bpf_loader_deprecated; pub mod bpf_loader_upgradeable; @@ -554,7 +555,7 @@ pub use wasm_bindgen::prelude::wasm_bindgen; /// The [config native program][np]. /// -/// [np]: https://docs.solana.com/developing/runtime-facilities/programs#config-program +/// [np]: https://docs.solanalabs.com/runtime/programs#config-program pub mod config { pub mod program { crate::declare_id!("Config1111111111111111111111111111111111111"); diff --git a/sdk/program/src/loader_v4.rs b/sdk/program/src/loader_v4.rs index e5706f51f22721..6ca2b659b46530 100644 --- a/sdk/program/src/loader_v4.rs +++ b/sdk/program/src/loader_v4.rs @@ -17,7 +17,7 @@ pub const DEPLOYMENT_COOLDOWN_IN_SLOTS: u64 = 750; #[repr(u64)] #[derive(Debug, PartialEq, Eq, Clone, Copy, AbiExample)] pub enum LoaderV4Status { - /// Program is in maintanance + /// Program is in maintenance Retracted, /// Program is ready to be executed Deployed, diff --git a/sdk/program/src/loader_v4_instruction.rs b/sdk/program/src/loader_v4_instruction.rs index 66d868fe44e5c9..d2e0e041c6fcbc 100644 --- a/sdk/program/src/loader_v4_instruction.rs +++ b/sdk/program/src/loader_v4_instruction.rs @@ -24,7 +24,7 @@ pub enum LoaderV4Instruction { /// Decreasing to size zero closes the program account and resets it /// into an uninitialized state. /// Providing additional lamports upfront might be necessary to reach rent exemption. - /// Superflous funds are transfered to the recipient account. + /// Superflous funds are transferred to the recipient account. /// /// # Account references /// 0. `[(signer), writable]` The program account to change the size of. @@ -51,7 +51,7 @@ pub enum LoaderV4Instruction { /// Undo the deployment of a program account. /// - /// The program is no longer executable and goes into maintainance. + /// The program is no longer executable and goes into maintenance. /// Necessary for writing data and truncating. /// /// # Account references diff --git a/sdk/program/src/message/legacy.rs b/sdk/program/src/message/legacy.rs index e81c7c485ff5f6..1a6a9239f4e0aa 100644 --- a/sdk/program/src/message/legacy.rs +++ b/sdk/program/src/message/legacy.rs @@ -7,7 +7,7 @@ //! //! [`legacy`]: crate::message::legacy //! [`v0`]: crate::message::v0 -//! [future message format]: https://docs.solana.com/proposals/versioned-transactions +//! [future message format]: https://docs.solanalabs.com/proposals/versioned-transactions #![allow(clippy::arithmetic_side_effects)] @@ -26,7 +26,7 @@ use { }; lazy_static! { - // Copied keys over since direct references create cyclical dependency. + // This will be deprecated and so this list shouldn't be modified pub static ref BUILTIN_PROGRAMS_KEYS: [Pubkey; 10] = { let parse = |s| Pubkey::from_str(s).unwrap(); [ @@ -193,6 +193,7 @@ impl Message { /// // another crate so it can be shared between the on-chain program and /// // the client. /// #[derive(BorshSerialize, BorshDeserialize)] + /// # #[borsh(crate = "borsh")] /// enum BankInstruction { /// Initialize, /// Deposit { lamports: u64 }, @@ -264,6 +265,7 @@ impl Message { /// // another crate so it can be shared between the on-chain program and /// // the client. /// #[derive(BorshSerialize, BorshDeserialize)] + /// # #[borsh(crate = "borsh")] /// enum BankInstruction { /// Initialize, /// Deposit { lamports: u64 }, @@ -328,7 +330,7 @@ impl Message { /// Create a new message for a [nonced transaction]. /// - /// [nonced transaction]: https://docs.solana.com/implemented-proposals/durable-tx-nonces + /// [nonced transaction]: https://docs.solanalabs.com/implemented-proposals/durable-tx-nonces /// /// In this type of transaction, the blockhash is replaced with a _durable /// transaction nonce_, allowing for extended time to pass between the @@ -363,6 +365,7 @@ impl Message { /// // another crate so it can be shared between the on-chain program and /// // the client. /// #[derive(BorshSerialize, BorshDeserialize)] + /// # #[borsh(crate = "borsh")] /// enum BankInstruction { /// Initialize, /// Deposit { lamports: u64 }, diff --git a/sdk/program/src/message/mod.rs b/sdk/program/src/message/mod.rs index 8d073f8e78509c..4e763ab2d9416e 100644 --- a/sdk/program/src/message/mod.rs +++ b/sdk/program/src/message/mod.rs @@ -14,7 +14,7 @@ //! of that account array, a [recent blockhash], and a compact encoding of the //! message's instructions. //! -//! [recent blockhash]: https://docs.solana.com/developing/programming-model/transactions#recent-blockhash +//! [recent blockhash]: https://solana.com/docs/core/transactions#recent-blockhash //! //! Clients most often deal with `Instruction`s and `Transaction`s, with //! `Message`s being created by `Transaction` constructors. @@ -30,7 +30,7 @@ //! more account keys into a transaction than the legacy format. The //! [`VersionedMessage`] type is a thin wrapper around either message version. //! -//! [future message format]: https://docs.solana.com/proposals/versioned-transactions +//! [future message format]: https://docs.solanalabs.com/proposals/versioned-transactions //! //! Despite living in the `solana-program` crate, there is no way to access the //! runtime's messages from within a Solana program, and only the legacy message @@ -90,7 +90,7 @@ pub const MESSAGE_HEADER_LENGTH: usize = 3; /// may process them in parallel, in a single [PoH] entry. Transactions that /// access the same read-write accounts are processed sequentially. /// -/// [PoH]: https://docs.solana.com/cluster/synchronization +/// [PoH]: https://docs.solanalabs.com/consensus/synchronization #[derive(Serialize, Deserialize, Default, Debug, PartialEq, Eq, Clone, Copy, AbiExample)] #[serde(rename_all = "camelCase")] pub struct MessageHeader { diff --git a/sdk/program/src/message/versions/v0/mod.rs b/sdk/program/src/message/versions/v0/mod.rs index eb4b4590b5be22..df001bb19ce0bc 100644 --- a/sdk/program/src/message/versions/v0/mod.rs +++ b/sdk/program/src/message/versions/v0/mod.rs @@ -7,7 +7,7 @@ //! //! [`legacy`]: crate::message::legacy //! [`v0`]: crate::message::v0 -//! [future message format]: https://docs.solana.com/proposals/versioned-transactions +//! [future message format]: https://docs.solanalabs.com/proposals/versioned-transactions use crate::{ address_lookup_table_account::AddressLookupTableAccount, diff --git a/sdk/program/src/program.rs b/sdk/program/src/program.rs index ac7ffb05f3c5ea..27a4a2a8cca957 100644 --- a/sdk/program/src/program.rs +++ b/sdk/program/src/program.rs @@ -6,7 +6,7 @@ //! //! [`invoke`]: invoke //! [`invoke_signed`]: invoke_signed -//! [cpi]: https://docs.solana.com/developing/programming-model/calling-between-programs +//! [cpi]: https://solana.com/docs/core/cpi use crate::{ account_info::AccountInfo, entrypoint::ProgramResult, instruction::Instruction, pubkey::Pubkey, @@ -174,7 +174,7 @@ pub fn invoke_unchecked(instruction: &Instruction, account_infos: &[AccountInfo] /// PDA from the seeds and the calling program's ID, and if it matches one of /// the accounts in `account_info`, will consider that account "signed". /// -/// [pda]: https://docs.solana.com/developing/programming-model/calling-between-programs#program-derived-addresses +/// [pda]: https://solana.com/docs/core/cpi#program-derived-addresses /// /// See the documentation for [`Pubkey::find_program_address`] for more /// about program derived addresses. @@ -361,7 +361,7 @@ pub fn set_return_data(data: &[u8]) { /// /// For more about return data see the [documentation for the return data proposal][rdp]. /// -/// [rdp]: https://docs.solana.com/proposals/return-data +/// [rdp]: https://docs.solanalabs.com/proposals/return-data pub fn get_return_data() -> Option<(Pubkey, Vec)> { #[cfg(target_os = "solana")] { diff --git a/sdk/program/src/program_error.rs b/sdk/program/src/program_error.rs index 6eb7e9ecd71981..0840ee16b901d7 100644 --- a/sdk/program/src/program_error.rs +++ b/sdk/program/src/program_error.rs @@ -3,7 +3,7 @@ #![allow(clippy::arithmetic_side_effects)] use { crate::{decode_error::DecodeError, instruction::InstructionError, msg, pubkey::PubkeyError}, - borsh::maybestd::io::Error as BorshIoError, + borsh::io::Error as BorshIoError, num_traits::{FromPrimitive, ToPrimitive}, std::convert::TryFrom, thiserror::Error, diff --git a/sdk/program/src/pubkey.rs b/sdk/program/src/pubkey.rs index ebbe5295036fc0..04fcc69dc9185a 100644 --- a/sdk/program/src/pubkey.rs +++ b/sdk/program/src/pubkey.rs @@ -60,9 +60,9 @@ impl From for PubkeyError { /// can not safely create or manage secret keys, the full [`Keypair`] is not /// defined in `solana-program` but in `solana-sdk`. /// -/// [acc]: https://docs.solana.com/developing/programming-model/accounts +/// [acc]: https://solana.com/docs/core/accounts /// [ed25519]: https://ed25519.cr.yp.to/ -/// [pdas]: https://docs.solana.com/developing/programming-model/calling-between-programs#program-derived-addresses +/// [pdas]: https://solana.com/docs/core/cpi#program-derived-addresses /// [`Keypair`]: https://docs.rs/solana-sdk/latest/solana_sdk/signer/keypair/struct.Keypair.html #[wasm_bindgen] #[repr(transparent)] @@ -84,6 +84,7 @@ impl From for PubkeyError { Serialize, Zeroable, )] +#[borsh(crate = "borsh")] pub struct Pubkey(pub(crate) [u8; 32]); impl crate::sanitize::Sanitize for Pubkey {} @@ -225,7 +226,7 @@ impl Pubkey { /// Find a valid [program derived address][pda] and its corresponding bump seed. /// - /// [pda]: https://docs.solana.com/developing/programming-model/calling-between-programs#program-derived-addresses + /// [pda]: https://solana.com/docs/core/cpi#program-derived-addresses /// /// Program derived addresses (PDAs) are account keys that only the program, /// `program_id`, has the authority to sign. The address is of the same form @@ -328,6 +329,7 @@ impl Pubkey { /// // The computed address of the PDA will be passed to this program via /// // the `accounts` vector of the `Instruction` type. /// #[derive(BorshSerialize, BorshDeserialize, Debug)] + /// # #[borsh(crate = "borsh")] /// pub struct InstructionData { /// pub vault_bump_seed: u8, /// pub lamports: u64, @@ -409,6 +411,7 @@ impl Pubkey { /// # use anyhow::Result; /// # /// # #[derive(BorshSerialize, BorshDeserialize, Debug)] + /// # #[borsh(crate = "borsh")] /// # struct InstructionData { /// # pub vault_bump_seed: u8, /// # pub lamports: u64, @@ -481,7 +484,7 @@ impl Pubkey { /// Find a valid [program derived address][pda] and its corresponding bump seed. /// - /// [pda]: https://docs.solana.com/developing/programming-model/calling-between-programs#program-derived-addresses + /// [pda]: https://solana.com/docs/core/cpi#program-derived-addresses /// /// The only difference between this method and [`find_program_address`] /// is that this one returns `None` in the statistically improbable event @@ -535,7 +538,7 @@ impl Pubkey { /// Create a valid [program derived address][pda] without searching for a bump seed. /// - /// [pda]: https://docs.solana.com/developing/programming-model/calling-between-programs#program-derived-addresses + /// [pda]: https://solana.com/docs/core/cpi#program-derived-addresses /// /// Because this function does not create a bump seed, it may unpredictably /// return an error for any given set of seeds and is not generally suitable @@ -668,47 +671,70 @@ impl fmt::Display for Pubkey { } } +impl borsh0_10::de::BorshDeserialize for Pubkey { + fn deserialize_reader( + reader: &mut R, + ) -> ::core::result::Result { + Ok(Self(borsh0_10::BorshDeserialize::deserialize_reader( + reader, + )?)) + } +} impl borsh0_9::de::BorshDeserialize for Pubkey { fn deserialize(buf: &mut &[u8]) -> ::core::result::Result { Ok(Self(borsh0_9::BorshDeserialize::deserialize(buf)?)) } } -impl borsh0_9::BorshSchema for Pubkey -where - [u8; 32]: borsh0_9::BorshSchema, -{ - fn declaration() -> borsh0_9::schema::Declaration { - "Pubkey".to_string() - } - fn add_definitions_recursively( - definitions: &mut borsh0_9::maybestd::collections::HashMap< - borsh0_9::schema::Declaration, - borsh0_9::schema::Definition, - >, - ) { - let fields = borsh0_9::schema::Fields::UnnamedFields(<[_]>::into_vec( - borsh0_9::maybestd::boxed::Box::new([ - <[u8; 32] as borsh0_9::BorshSchema>::declaration(), - ]), - )); - let definition = borsh0_9::schema::Definition::Struct { fields }; - ::add_definition( - ::declaration(), - definition, - definitions, - ); - <[u8; 32] as borsh0_9::BorshSchema>::add_definitions_recursively(definitions); - } + +macro_rules! impl_borsh_schema { + ($borsh:ident) => { + impl $borsh::BorshSchema for Pubkey + where + [u8; 32]: $borsh::BorshSchema, + { + fn declaration() -> $borsh::schema::Declaration { + "Pubkey".to_string() + } + fn add_definitions_recursively( + definitions: &mut $borsh::maybestd::collections::HashMap< + $borsh::schema::Declaration, + $borsh::schema::Definition, + >, + ) { + let fields = $borsh::schema::Fields::UnnamedFields(<[_]>::into_vec( + $borsh::maybestd::boxed::Box::new([ + <[u8; 32] as $borsh::BorshSchema>::declaration(), + ]), + )); + let definition = $borsh::schema::Definition::Struct { fields }; + ::add_definition( + ::declaration(), + definition, + definitions, + ); + <[u8; 32] as $borsh::BorshSchema>::add_definitions_recursively(definitions); + } + } + }; } -impl borsh0_9::ser::BorshSerialize for Pubkey { - fn serialize( - &self, - writer: &mut W, - ) -> ::core::result::Result<(), borsh0_9::maybestd::io::Error> { - borsh0_9::BorshSerialize::serialize(&self.0, writer)?; - Ok(()) - } +impl_borsh_schema!(borsh0_10); +impl_borsh_schema!(borsh0_9); + +macro_rules! impl_borsh_serialize { + ($borsh:ident) => { + impl $borsh::ser::BorshSerialize for Pubkey { + fn serialize( + &self, + writer: &mut W, + ) -> ::core::result::Result<(), $borsh::maybestd::io::Error> { + $borsh::BorshSerialize::serialize(&self.0, writer)?; + Ok(()) + } + } + }; } +impl_borsh_serialize!(borsh0_10); +impl_borsh_serialize!(borsh0_9); #[cfg(test)] mod tests { diff --git a/sdk/program/src/rent.rs b/sdk/program/src/rent.rs index 7257b9a2073ec7..6d670542e2b6b5 100644 --- a/sdk/program/src/rent.rs +++ b/sdk/program/src/rent.rs @@ -1,6 +1,6 @@ //! Configuration for network [rent]. //! -//! [rent]: https://docs.solana.com/implemented-proposals/rent +//! [rent]: https://docs.solanalabs.com/implemented-proposals/rent #![allow(clippy::arithmetic_side_effects)] @@ -8,7 +8,7 @@ use {crate::clock::DEFAULT_SLOTS_PER_EPOCH, solana_sdk_macro::CloneZeroed}; /// Configuration of network rent. #[repr(C)] -#[derive(Serialize, Deserialize, PartialEq, CloneZeroed, Copy, Debug, AbiExample)] +#[derive(Serialize, Deserialize, PartialEq, CloneZeroed, Debug, AbiExample)] pub struct Rent { /// Rental rate in lamports/byte-year. pub lamports_per_byte_year: u64, diff --git a/sdk/program/src/secp256k1_program.rs b/sdk/program/src/secp256k1_program.rs index 4bc3de2c71596e..fb09e7195a37ef 100644 --- a/sdk/program/src/secp256k1_program.rs +++ b/sdk/program/src/secp256k1_program.rs @@ -1,6 +1,6 @@ //! The [secp256k1 native program][np]. //! -//! [np]: https://docs.solana.com/developing/runtime-facilities/programs#secp256k1-program +//! [np]: https://docs.solanalabs.com/runtime/programs#secp256k1-program //! //! Constructors for secp256k1 program instructions, and documentation on the //! program's usage can be found in [`solana_sdk::secp256k1_instruction`]. diff --git a/sdk/program/src/secp256k1_recover.rs b/sdk/program/src/secp256k1_recover.rs index 5bca285c2f8849..f688e7d485497e 100644 --- a/sdk/program/src/secp256k1_recover.rs +++ b/sdk/program/src/secp256k1_recover.rs @@ -78,6 +78,7 @@ pub const SECP256K1_PUBLIC_KEY_LENGTH: usize = 64; Hash, AbiExample, )] +#[borsh(crate = "borsh")] pub struct Secp256k1Pubkey(pub [u8; SECP256K1_PUBLIC_KEY_LENGTH]); impl Secp256k1Pubkey { @@ -108,7 +109,7 @@ impl Secp256k1Pubkey { /// arbitrary message, signed by some public key. /// /// The recovery ID is a value in the range [0, 3] that is generated during -/// signing, and allows the recovery process to be more efficent. Note that the +/// signing, and allows the recovery process to be more efficient. Note that the /// `recovery_id` here does not directly correspond to an Ethereum recovery ID /// as used in `ecrecover`. This function accepts recovery IDs in the range of /// [0, 3], while Ethereum's recovery IDs have a value of 27 or 28. To convert @@ -254,6 +255,7 @@ impl Secp256k1Pubkey { /// use borsh::{BorshDeserialize, BorshSerialize}; /// /// #[derive(BorshSerialize, BorshDeserialize, Debug)] +/// # #[borsh(crate = "borsh")] /// pub struct DemoSecp256k1RecoverInstruction { /// pub message: Vec, /// pub signature: [u8; 64], @@ -348,6 +350,7 @@ impl Secp256k1Pubkey { /// }; /// # use borsh::{BorshDeserialize, BorshSerialize}; /// # #[derive(BorshSerialize, BorshDeserialize, Debug)] +/// # #[borsh(crate = "borsh")] /// # pub struct DemoSecp256k1RecoverInstruction { /// # pub message: Vec, /// # pub signature: [u8; 64], diff --git a/sdk/program/src/slot_hashes.rs b/sdk/program/src/slot_hashes.rs index 9de8638195f56f..f17512ac9fb124 100644 --- a/sdk/program/src/slot_hashes.rs +++ b/sdk/program/src/slot_hashes.rs @@ -1,6 +1,6 @@ //! A type to hold data for the [`SlotHashes` sysvar][sv]. //! -//! [sv]: https://docs.solana.com/developing/runtime-facilities/sysvars#slothashes +//! [sv]: https://docs.solanalabs.com/runtime/sysvars#slothashes //! //! The sysvar ID is declared in [`sysvar::slot_hashes`]. //! diff --git a/sdk/program/src/slot_history.rs b/sdk/program/src/slot_history.rs index 725916da2604db..5638407210aef9 100644 --- a/sdk/program/src/slot_history.rs +++ b/sdk/program/src/slot_history.rs @@ -1,6 +1,6 @@ //! A type to hold data for the [`SlotHistory` sysvar][sv]. //! -//! [sv]: https://docs.solana.com/developing/runtime-facilities/sysvars#slothistory +//! [sv]: https://docs.solanalabs.com/runtime/sysvars#slothistory //! //! The sysvar ID is declared in [`sysvar::slot_history`]. //! diff --git a/sdk/program/src/stake/mod.rs b/sdk/program/src/stake/mod.rs index 837a2c538bbc2e..1227a50c2c6474 100644 --- a/sdk/program/src/stake/mod.rs +++ b/sdk/program/src/stake/mod.rs @@ -1,6 +1,6 @@ //! The [stake native program][np]. //! -//! [np]: https://docs.solana.com/developing/runtime-facilities/sysvars#stakehistory +//! [np]: https://docs.solanalabs.com/runtime/sysvars#stakehistory #[allow(deprecated)] pub mod config; diff --git a/sdk/program/src/stake/stake_flags.rs b/sdk/program/src/stake/stake_flags.rs index a7d9d828fe2814..aa044ff928acb7 100644 --- a/sdk/program/src/stake/stake_flags.rs +++ b/sdk/program/src/stake/stake_flags.rs @@ -1,7 +1,6 @@ use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; /// Additional flags for stake state. -#[allow(dead_code)] #[derive( Serialize, Deserialize, @@ -18,12 +17,55 @@ use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; Hash, Debug, )] +#[borsh(crate = "borsh")] pub struct StakeFlags { bits: u8, } +impl borsh0_10::de::BorshDeserialize for StakeFlags { + fn deserialize_reader( + reader: &mut R, + ) -> ::core::result::Result { + Ok(Self { + bits: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + }) + } +} +impl borsh0_10::BorshSchema for StakeFlags { + fn declaration() -> borsh0_10::schema::Declaration { + "StakeFlags".to_string() + } + fn add_definitions_recursively( + definitions: &mut borsh0_10::maybestd::collections::HashMap< + borsh0_10::schema::Declaration, + borsh0_10::schema::Definition, + >, + ) { + let fields = borsh0_10::schema::Fields::NamedFields(<[_]>::into_vec( + borsh0_10::maybestd::boxed::Box::new([( + "bits".to_string(), + ::declaration(), + )]), + )); + let definition = borsh0_10::schema::Definition::Struct { fields }; + Self::add_definition( + ::declaration(), + definition, + definitions, + ); + ::add_definitions_recursively(definitions); + } +} +impl borsh0_10::ser::BorshSerialize for StakeFlags { + fn serialize( + &self, + writer: &mut W, + ) -> ::core::result::Result<(), borsh0_10::maybestd::io::Error> { + borsh0_10::BorshSerialize::serialize(&self.bits, writer)?; + Ok(()) + } +} /// Currently, only bit 1 is used. The other 7 bits are reserved for future usage. -#[allow(dead_code)] impl StakeFlags { /// Stake must be fully activated before deactivation is allowed (bit 1). pub const MUST_FULLY_ACTIVATE_BEFORE_DEACTIVATION_IS_PERMITTED: Self = @@ -52,7 +94,6 @@ impl StakeFlags { } } -#[allow(dead_code)] impl Default for StakeFlags { fn default() -> Self { StakeFlags::empty() diff --git a/sdk/program/src/stake/state.rs b/sdk/program/src/stake/state.rs index 4f94f73b3f2dd5..7bf9ea696c91cb 100644 --- a/sdk/program/src/stake/state.rs +++ b/sdk/program/src/stake/state.rs @@ -14,7 +14,7 @@ use { }, stake_history::{StakeHistory, StakeHistoryEntry}, }, - borsh::{maybestd::io, BorshDeserialize, BorshSchema, BorshSerialize}, + borsh::{io, BorshDeserialize, BorshSchema, BorshSerialize}, std::collections::HashSet, }; @@ -34,6 +34,49 @@ pub fn warmup_cooldown_rate(current_epoch: Epoch, new_rate_activation_epoch: Opt } } +macro_rules! impl_borsh_stake_state { + ($borsh:ident) => { + impl $borsh::BorshDeserialize for StakeState { + fn deserialize_reader(reader: &mut R) -> io::Result { + let enum_value: u32 = $borsh::BorshDeserialize::deserialize_reader(reader)?; + match enum_value { + 0 => Ok(StakeState::Uninitialized), + 1 => { + let meta: Meta = $borsh::BorshDeserialize::deserialize_reader(reader)?; + Ok(StakeState::Initialized(meta)) + } + 2 => { + let meta: Meta = $borsh::BorshDeserialize::deserialize_reader(reader)?; + let stake: Stake = $borsh::BorshDeserialize::deserialize_reader(reader)?; + Ok(StakeState::Stake(meta, stake)) + } + 3 => Ok(StakeState::RewardsPool), + _ => Err(io::Error::new( + io::ErrorKind::InvalidData, + "Invalid enum value", + )), + } + } + } + impl $borsh::BorshSerialize for StakeState { + fn serialize(&self, writer: &mut W) -> io::Result<()> { + match self { + StakeState::Uninitialized => writer.write_all(&0u32.to_le_bytes()), + StakeState::Initialized(meta) => { + writer.write_all(&1u32.to_le_bytes())?; + $borsh::BorshSerialize::serialize(&meta, writer) + } + StakeState::Stake(meta, stake) => { + writer.write_all(&2u32.to_le_bytes())?; + $borsh::BorshSerialize::serialize(&meta, writer)?; + $borsh::BorshSerialize::serialize(&stake, writer) + } + StakeState::RewardsPool => writer.write_all(&3u32.to_le_bytes()), + } + } + } + }; +} #[derive(Debug, Default, Serialize, Deserialize, PartialEq, Clone, Copy, AbiExample)] #[allow(clippy::large_enum_variant)] #[deprecated( @@ -47,45 +90,8 @@ pub enum StakeState { Stake(Meta, Stake), RewardsPool, } -impl BorshDeserialize for StakeState { - fn deserialize_reader(reader: &mut R) -> io::Result { - let enum_value = u32::deserialize_reader(reader)?; - match enum_value { - 0 => Ok(StakeState::Uninitialized), - 1 => { - let meta = Meta::deserialize_reader(reader)?; - Ok(StakeState::Initialized(meta)) - } - 2 => { - let meta: Meta = BorshDeserialize::deserialize_reader(reader)?; - let stake: Stake = BorshDeserialize::deserialize_reader(reader)?; - Ok(StakeState::Stake(meta, stake)) - } - 3 => Ok(StakeState::RewardsPool), - _ => Err(io::Error::new( - io::ErrorKind::InvalidData, - "Invalid enum value", - )), - } - } -} -impl BorshSerialize for StakeState { - fn serialize(&self, writer: &mut W) -> io::Result<()> { - match self { - StakeState::Uninitialized => writer.write_all(&0u32.to_le_bytes()), - StakeState::Initialized(meta) => { - writer.write_all(&1u32.to_le_bytes())?; - meta.serialize(writer) - } - StakeState::Stake(meta, stake) => { - writer.write_all(&2u32.to_le_bytes())?; - meta.serialize(writer)?; - stake.serialize(writer) - } - StakeState::RewardsPool => writer.write_all(&3u32.to_le_bytes()), - } - } -} +impl_borsh_stake_state!(borsh); +impl_borsh_stake_state!(borsh0_10); impl StakeState { /// The fixed number of bytes used to serialize each stake account pub const fn size_of() -> usize { @@ -136,49 +142,54 @@ pub enum StakeStateV2 { Stake(Meta, Stake, StakeFlags), RewardsPool, } - -impl BorshDeserialize for StakeStateV2 { - fn deserialize_reader(reader: &mut R) -> io::Result { - let enum_value = u32::deserialize_reader(reader)?; - match enum_value { - 0 => Ok(StakeStateV2::Uninitialized), - 1 => { - let meta = Meta::deserialize_reader(reader)?; - Ok(StakeStateV2::Initialized(meta)) - } - 2 => { - let meta: Meta = BorshDeserialize::deserialize_reader(reader)?; - let stake: Stake = BorshDeserialize::deserialize_reader(reader)?; - let stake_flags: StakeFlags = BorshDeserialize::deserialize_reader(reader)?; - Ok(StakeStateV2::Stake(meta, stake, stake_flags)) +macro_rules! impl_borsh_stake_state_v2 { + ($borsh:ident) => { + impl $borsh::BorshDeserialize for StakeStateV2 { + fn deserialize_reader(reader: &mut R) -> io::Result { + let enum_value: u32 = $borsh::BorshDeserialize::deserialize_reader(reader)?; + match enum_value { + 0 => Ok(StakeStateV2::Uninitialized), + 1 => { + let meta: Meta = $borsh::BorshDeserialize::deserialize_reader(reader)?; + Ok(StakeStateV2::Initialized(meta)) + } + 2 => { + let meta: Meta = $borsh::BorshDeserialize::deserialize_reader(reader)?; + let stake: Stake = $borsh::BorshDeserialize::deserialize_reader(reader)?; + let stake_flags: StakeFlags = + $borsh::BorshDeserialize::deserialize_reader(reader)?; + Ok(StakeStateV2::Stake(meta, stake, stake_flags)) + } + 3 => Ok(StakeStateV2::RewardsPool), + _ => Err(io::Error::new( + io::ErrorKind::InvalidData, + "Invalid enum value", + )), + } } - 3 => Ok(StakeStateV2::RewardsPool), - _ => Err(io::Error::new( - io::ErrorKind::InvalidData, - "Invalid enum value", - )), } - } -} - -impl BorshSerialize for StakeStateV2 { - fn serialize(&self, writer: &mut W) -> io::Result<()> { - match self { - StakeStateV2::Uninitialized => writer.write_all(&0u32.to_le_bytes()), - StakeStateV2::Initialized(meta) => { - writer.write_all(&1u32.to_le_bytes())?; - meta.serialize(writer) - } - StakeStateV2::Stake(meta, stake, stake_flags) => { - writer.write_all(&2u32.to_le_bytes())?; - meta.serialize(writer)?; - stake.serialize(writer)?; - stake_flags.serialize(writer) + impl $borsh::BorshSerialize for StakeStateV2 { + fn serialize(&self, writer: &mut W) -> io::Result<()> { + match self { + StakeStateV2::Uninitialized => writer.write_all(&0u32.to_le_bytes()), + StakeStateV2::Initialized(meta) => { + writer.write_all(&1u32.to_le_bytes())?; + $borsh::BorshSerialize::serialize(&meta, writer) + } + StakeStateV2::Stake(meta, stake, stake_flags) => { + writer.write_all(&2u32.to_le_bytes())?; + $borsh::BorshSerialize::serialize(&meta, writer)?; + $borsh::BorshSerialize::serialize(&stake, writer)?; + $borsh::BorshSerialize::serialize(&stake_flags, writer) + } + StakeStateV2::RewardsPool => writer.write_all(&3u32.to_le_bytes()), + } } - StakeStateV2::RewardsPool => writer.write_all(&3u32.to_le_bytes()), } - } + }; } +impl_borsh_stake_state_v2!(borsh); +impl_borsh_stake_state_v2!(borsh0_10); impl StakeStateV2 { /// The fixed number of bytes used to serialize each stake account @@ -241,6 +252,7 @@ pub enum StakeAuthorize { BorshSchema, BorshSerialize, )] +#[borsh(crate = "borsh")] pub struct Lockup { /// UnixTimestamp at which this stake will allow withdrawal, unless the /// transaction is signed by the custodian @@ -252,7 +264,6 @@ pub struct Lockup { /// lockup constraints pub custodian: Pubkey, } - impl Lockup { pub fn is_in_force(&self, clock: &Clock, custodian: Option<&Pubkey>) -> bool { if custodian == Some(&self.custodian) { @@ -261,6 +272,65 @@ impl Lockup { self.unix_timestamp > clock.unix_timestamp || self.epoch > clock.epoch } } +impl borsh0_10::de::BorshDeserialize for Lockup { + fn deserialize_reader( + reader: &mut R, + ) -> ::core::result::Result { + Ok(Self { + unix_timestamp: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + epoch: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + custodian: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + }) + } +} +impl borsh0_10::BorshSchema for Lockup { + fn declaration() -> borsh0_10::schema::Declaration { + "Lockup".to_string() + } + fn add_definitions_recursively( + definitions: &mut borsh0_10::maybestd::collections::HashMap< + borsh0_10::schema::Declaration, + borsh0_10::schema::Definition, + >, + ) { + let fields = borsh0_10::schema::Fields::NamedFields(<[_]>::into_vec( + borsh0_10::maybestd::boxed::Box::new([ + ( + "unix_timestamp".to_string(), + ::declaration(), + ), + ( + "epoch".to_string(), + ::declaration(), + ), + ( + "custodian".to_string(), + ::declaration(), + ), + ]), + )); + let definition = borsh0_10::schema::Definition::Struct { fields }; + Self::add_definition( + ::declaration(), + definition, + definitions, + ); + ::add_definitions_recursively(definitions); + ::add_definitions_recursively(definitions); + ::add_definitions_recursively(definitions); + } +} +impl borsh0_10::ser::BorshSerialize for Lockup { + fn serialize( + &self, + writer: &mut W, + ) -> ::core::result::Result<(), borsh0_10::maybestd::io::Error> { + borsh0_10::BorshSerialize::serialize(&self.unix_timestamp, writer)?; + borsh0_10::BorshSerialize::serialize(&self.epoch, writer)?; + borsh0_10::BorshSerialize::serialize(&self.custodian, writer)?; + Ok(()) + } +} #[derive( Default, @@ -276,6 +346,7 @@ impl Lockup { BorshSchema, BorshSerialize, )] +#[borsh(crate = "borsh")] pub struct Authorized { pub staker: Pubkey, pub withdrawer: Pubkey, @@ -341,6 +412,58 @@ impl Authorized { Ok(()) } } +impl borsh0_10::de::BorshDeserialize for Authorized { + fn deserialize_reader( + reader: &mut R, + ) -> ::core::result::Result { + Ok(Self { + staker: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + withdrawer: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + }) + } +} +impl borsh0_10::BorshSchema for Authorized { + fn declaration() -> borsh0_10::schema::Declaration { + "Authorized".to_string() + } + fn add_definitions_recursively( + definitions: &mut borsh0_10::maybestd::collections::HashMap< + borsh0_10::schema::Declaration, + borsh0_10::schema::Definition, + >, + ) { + let fields = borsh0_10::schema::Fields::NamedFields(<[_]>::into_vec( + borsh0_10::maybestd::boxed::Box::new([ + ( + "staker".to_string(), + ::declaration(), + ), + ( + "withdrawer".to_string(), + ::declaration(), + ), + ]), + )); + let definition = borsh0_10::schema::Definition::Struct { fields }; + Self::add_definition( + ::declaration(), + definition, + definitions, + ); + ::add_definitions_recursively(definitions); + ::add_definitions_recursively(definitions); + } +} +impl borsh0_10::ser::BorshSerialize for Authorized { + fn serialize( + &self, + writer: &mut W, + ) -> ::core::result::Result<(), borsh0_10::maybestd::io::Error> { + borsh0_10::BorshSerialize::serialize(&self.staker, writer)?; + borsh0_10::BorshSerialize::serialize(&self.withdrawer, writer)?; + Ok(()) + } +} #[derive( Default, @@ -356,6 +479,7 @@ impl Authorized { BorshSchema, BorshSerialize, )] +#[borsh(crate = "borsh")] pub struct Meta { pub rent_exempt_reserve: u64, pub authorized: Authorized, @@ -398,6 +522,65 @@ impl Meta { } } } +impl borsh0_10::de::BorshDeserialize for Meta { + fn deserialize_reader( + reader: &mut R, + ) -> ::core::result::Result { + Ok(Self { + rent_exempt_reserve: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + authorized: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + lockup: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + }) + } +} +impl borsh0_10::BorshSchema for Meta { + fn declaration() -> borsh0_10::schema::Declaration { + "Meta".to_string() + } + fn add_definitions_recursively( + definitions: &mut borsh0_10::maybestd::collections::HashMap< + borsh0_10::schema::Declaration, + borsh0_10::schema::Definition, + >, + ) { + let fields = borsh0_10::schema::Fields::NamedFields(<[_]>::into_vec( + borsh0_10::maybestd::boxed::Box::new([ + ( + "rent_exempt_reserve".to_string(), + ::declaration(), + ), + ( + "authorized".to_string(), + ::declaration(), + ), + ( + "lockup".to_string(), + ::declaration(), + ), + ]), + )); + let definition = borsh0_10::schema::Definition::Struct { fields }; + Self::add_definition( + ::declaration(), + definition, + definitions, + ); + ::add_definitions_recursively(definitions); + ::add_definitions_recursively(definitions); + ::add_definitions_recursively(definitions); + } +} +impl borsh0_10::ser::BorshSerialize for Meta { + fn serialize( + &self, + writer: &mut W, + ) -> ::core::result::Result<(), borsh0_10::maybestd::io::Error> { + borsh0_10::BorshSerialize::serialize(&self.rent_exempt_reserve, writer)?; + borsh0_10::BorshSerialize::serialize(&self.authorized, writer)?; + borsh0_10::BorshSerialize::serialize(&self.lockup, writer)?; + Ok(()) + } +} #[derive( Debug, @@ -411,6 +594,7 @@ impl Meta { BorshSchema, BorshSerialize, )] +#[borsh(crate = "borsh")] pub struct Delegation { /// to whom the stake is delegated pub voter_pubkey: Pubkey, @@ -457,7 +641,7 @@ impl Delegation { pub fn stake( &self, epoch: Epoch, - history: Option<&StakeHistory>, + history: &StakeHistory, new_rate_activation_epoch: Option, ) -> u64 { self.stake_activating_and_deactivating(epoch, history, new_rate_activation_epoch) @@ -468,7 +652,7 @@ impl Delegation { pub fn stake_activating_and_deactivating( &self, target_epoch: Epoch, - history: Option<&StakeHistory>, + history: &StakeHistory, new_rate_activation_epoch: Option, ) -> StakeActivationStatus { // first, calculate an effective and activating stake @@ -489,17 +673,14 @@ impl Delegation { } else if target_epoch == self.deactivation_epoch { // can only deactivate what's activated StakeActivationStatus::with_deactivating(effective_stake) - } else if let Some((history, mut prev_epoch, mut prev_cluster_stake)) = - history.and_then(|history| { - history - .get(self.deactivation_epoch) - .map(|cluster_stake_at_deactivation_epoch| { - ( - history, - self.deactivation_epoch, - cluster_stake_at_deactivation_epoch, - ) - }) + } else if let Some((history, mut prev_epoch, mut prev_cluster_stake)) = history + .get(self.deactivation_epoch) + .map(|cluster_stake_at_deactivation_epoch| { + ( + history, + self.deactivation_epoch, + cluster_stake_at_deactivation_epoch, + ) }) { // target_epoch > self.deactivation_epoch @@ -558,7 +739,7 @@ impl Delegation { fn stake_and_activating( &self, target_epoch: Epoch, - history: Option<&StakeHistory>, + history: &StakeHistory, new_rate_activation_epoch: Option, ) -> (u64, u64) { let delegated_stake = self.stake; @@ -576,17 +757,14 @@ impl Delegation { } else if target_epoch < self.activation_epoch { // not yet enabled (0, 0) - } else if let Some((history, mut prev_epoch, mut prev_cluster_stake)) = - history.and_then(|history| { - history - .get(self.activation_epoch) - .map(|cluster_stake_at_activation_epoch| { - ( - history, - self.activation_epoch, - cluster_stake_at_activation_epoch, - ) - }) + } else if let Some((history, mut prev_epoch, mut prev_cluster_stake)) = history + .get(self.activation_epoch) + .map(|cluster_stake_at_activation_epoch| { + ( + history, + self.activation_epoch, + cluster_stake_at_activation_epoch, + ) }) { // target_epoch > self.activation_epoch @@ -644,6 +822,79 @@ impl Delegation { } } } +impl borsh0_10::de::BorshDeserialize for Delegation { + fn deserialize_reader( + reader: &mut R, + ) -> ::core::result::Result { + Ok(Self { + voter_pubkey: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + stake: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + activation_epoch: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + deactivation_epoch: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + warmup_cooldown_rate: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + }) + } +} +impl borsh0_10::BorshSchema for Delegation { + fn declaration() -> borsh0_10::schema::Declaration { + "Delegation".to_string() + } + fn add_definitions_recursively( + definitions: &mut borsh0_10::maybestd::collections::HashMap< + borsh0_10::schema::Declaration, + borsh0_10::schema::Definition, + >, + ) { + let fields = borsh0_10::schema::Fields::NamedFields(<[_]>::into_vec( + borsh0_10::maybestd::boxed::Box::new([ + ( + "voter_pubkey".to_string(), + ::declaration(), + ), + ( + "stake".to_string(), + ::declaration(), + ), + ( + "activation_epoch".to_string(), + ::declaration(), + ), + ( + "deactivation_epoch".to_string(), + ::declaration(), + ), + ( + "warmup_cooldown_rate".to_string(), + ::declaration(), + ), + ]), + )); + let definition = borsh0_10::schema::Definition::Struct { fields }; + Self::add_definition( + ::declaration(), + definition, + definitions, + ); + ::add_definitions_recursively(definitions); + ::add_definitions_recursively(definitions); + ::add_definitions_recursively(definitions); + ::add_definitions_recursively(definitions); + ::add_definitions_recursively(definitions); + } +} +impl borsh0_10::ser::BorshSerialize for Delegation { + fn serialize( + &self, + writer: &mut W, + ) -> ::core::result::Result<(), borsh0_10::maybestd::io::Error> { + borsh0_10::BorshSerialize::serialize(&self.voter_pubkey, writer)?; + borsh0_10::BorshSerialize::serialize(&self.stake, writer)?; + borsh0_10::BorshSerialize::serialize(&self.activation_epoch, writer)?; + borsh0_10::BorshSerialize::serialize(&self.deactivation_epoch, writer)?; + borsh0_10::BorshSerialize::serialize(&self.warmup_cooldown_rate, writer)?; + Ok(()) + } +} #[derive( Debug, @@ -658,6 +909,7 @@ impl Delegation { BorshSchema, BorshSerialize, )] +#[borsh(crate = "borsh")] pub struct Stake { pub delegation: Delegation, /// credits observed is credits from vote account state when delegated or redeemed @@ -668,7 +920,7 @@ impl Stake { pub fn stake( &self, epoch: Epoch, - history: Option<&StakeHistory>, + history: &StakeHistory, new_rate_activation_epoch: Option, ) -> u64 { self.delegation @@ -703,11 +955,63 @@ impl Stake { } } } +impl borsh0_10::de::BorshDeserialize for Stake { + fn deserialize_reader( + reader: &mut R, + ) -> ::core::result::Result { + Ok(Self { + delegation: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + credits_observed: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + }) + } +} +impl borsh0_10::BorshSchema for Stake { + fn declaration() -> borsh0_10::schema::Declaration { + "Stake".to_string() + } + fn add_definitions_recursively( + definitions: &mut borsh0_10::maybestd::collections::HashMap< + borsh0_10::schema::Declaration, + borsh0_10::schema::Definition, + >, + ) { + let fields = borsh0_10::schema::Fields::NamedFields(<[_]>::into_vec( + borsh0_10::maybestd::boxed::Box::new([ + ( + "delegation".to_string(), + ::declaration(), + ), + ( + "credits_observed".to_string(), + ::declaration(), + ), + ]), + )); + let definition = borsh0_10::schema::Definition::Struct { fields }; + Self::add_definition( + ::declaration(), + definition, + definitions, + ); + ::add_definitions_recursively(definitions); + ::add_definitions_recursively(definitions); + } +} +impl borsh0_10::ser::BorshSerialize for Stake { + fn serialize( + &self, + writer: &mut W, + ) -> ::core::result::Result<(), borsh0_10::maybestd::io::Error> { + borsh0_10::BorshSerialize::serialize(&self.delegation, writer)?; + borsh0_10::BorshSerialize::serialize(&self.credits_observed, writer)?; + Ok(()) + } +} #[cfg(test)] mod test { use { - super::*, crate::borsh0_10::try_from_slice_unchecked, assert_matches::assert_matches, + super::*, crate::borsh1::try_from_slice_unchecked, assert_matches::assert_matches, bincode::serialize, }; @@ -719,7 +1023,7 @@ mod test { fn check_borsh_serialization(stake: StakeStateV2) { let bincode_serialized = serialize(&stake).unwrap(); - let borsh_serialized = StakeStateV2::try_to_vec(&stake).unwrap(); + let borsh_serialized = borsh::to_vec(&stake).unwrap(); assert_eq!(bincode_serialized, borsh_serialized); } @@ -850,7 +1154,7 @@ mod test { ); let bincode_serialized = serialize(&stake).unwrap(); - let borsh_serialized = StakeStateV2::try_to_vec(&stake).unwrap(); + let borsh_serialized = borsh::to_vec(&stake).unwrap(); assert_eq!(bincode_serialized[FLAG_OFFSET], expected); assert_eq!(borsh_serialized[FLAG_OFFSET], expected); @@ -872,7 +1176,7 @@ mod test { fn check_borsh_serialization(stake: StakeState) { let bincode_serialized = serialize(&stake).unwrap(); - let borsh_serialized = StakeState::try_to_vec(&stake).unwrap(); + let borsh_serialized = borsh::to_vec(&stake).unwrap(); assert_eq!(bincode_serialized, borsh_serialized); } diff --git a/sdk/program/src/stake_history.rs b/sdk/program/src/stake_history.rs index ab248461254a0d..331803e5f7572e 100644 --- a/sdk/program/src/stake_history.rs +++ b/sdk/program/src/stake_history.rs @@ -1,6 +1,6 @@ //! A type to hold data for the [`StakeHistory` sysvar][sv]. //! -//! [sv]: https://docs.solana.com/developing/runtime-facilities/sysvars#stakehistory +//! [sv]: https://docs.solanalabs.com/runtime/sysvars#stakehistory //! //! The sysvar ID is declared in [`sysvar::stake_history`]. //! diff --git a/sdk/program/src/system_instruction.rs b/sdk/program/src/system_instruction.rs index 74646f7fb7d331..bb66b4fbce6b67 100644 --- a/sdk/program/src/system_instruction.rs +++ b/sdk/program/src/system_instruction.rs @@ -4,7 +4,7 @@ //! accounts][na]. It is responsible for transferring lamports from accounts //! owned by the system program, including typical user wallet accounts. //! -//! [na]: https://docs.solana.com/implemented-proposals/durable-tx-nonces +//! [na]: https://docs.solanalabs.com/implemented-proposals/durable-tx-nonces //! //! Account creation typically involves three steps: [`allocate`] space, //! [`transfer`] lamports for rent, [`assign`] to its owning program. The @@ -12,7 +12,7 @@ //! contain enough lamports to be [rent exempt], or else the creation //! instruction will fail. //! -//! [rent exempt]: https://docs.solana.com/developing/programming-model/accounts#rent-exemption +//! [rent exempt]: https://solana.com/docs/core/accounts#rent-exemption //! //! The accounts created by the system program can either be user-controlled, //! where the secret keys are held outside the blockchain, @@ -378,6 +378,7 @@ pub enum SystemInstruction { /// }; /// /// #[derive(BorshSerialize, BorshDeserialize, Debug)] +/// # #[borsh(crate = "borsh")] /// pub struct CreateAccountInstruction { /// /// The PDA seed used to distinguish the new account from other PDAs /// pub new_account_seed: [u8; 16], @@ -594,6 +595,7 @@ pub fn create_account_with_seed( /// }; /// /// #[derive(BorshSerialize, BorshDeserialize, Debug)] +/// # #[borsh(crate = "borsh")] /// pub struct CreateAccountInstruction { /// /// The PDA seed used to distinguish the new account from other PDAs /// pub new_account_seed: [u8; 16], @@ -804,6 +806,7 @@ pub fn assign_with_seed( /// }; /// /// #[derive(BorshSerialize, BorshDeserialize, Debug)] +/// # #[borsh(crate = "borsh")] /// pub struct CreateAccountInstruction { /// /// The PDA seed used to distinguish the new account from other PDAs /// pub new_account_seed: [u8; 16], @@ -1023,6 +1026,7 @@ pub fn transfer_with_seed( /// }; /// /// #[derive(BorshSerialize, BorshDeserialize, Debug)] +/// # #[borsh(crate = "borsh")] /// pub struct CreateAccountInstruction { /// /// The PDA seed used to distinguish the new account from other PDAs /// pub new_account_seed: [u8; 16], @@ -1220,6 +1224,7 @@ pub fn allocate_with_seed( /// /// - 1: system_program - executable /// /// - *: to - writable /// #[derive(BorshSerialize, BorshDeserialize, Debug)] +/// # #[borsh(crate = "borsh")] /// pub struct TransferLamportsToManyInstruction { /// pub bank_pda_bump_seed: u8, /// pub amount_list: Vec, @@ -1325,7 +1330,7 @@ pub fn create_nonce_account_with_seed( /// Consequently, it is not possible to sign a transaction, wait more than two /// minutes, then successfully execute that transaction. /// -/// [dtn]: https://docs.solana.com/implemented-proposals/durable-tx-nonces +/// [dtn]: https://docs.solanalabs.com/implemented-proposals/durable-tx-nonces /// [rbh]: crate::message::Message::recent_blockhash /// [nonce]: https://en.wikipedia.org/wiki/Cryptographic_nonce /// @@ -1596,7 +1601,7 @@ pub fn advance_nonce_account(nonce_pubkey: &Pubkey, authorized_pubkey: &Pubkey) /// would leave the nonce account with a balance less than required for rent /// exemption, but also greater than zero, then the transaction will fail. /// -/// [rent exemption]: https://docs.solana.com/developing/programming-model/accounts#rent-exemption +/// [rent exemption]: https://solana.com/docs/core/accounts#rent-exemption /// /// This constructor creates a [`SystemInstruction::WithdrawNonceAccount`] /// instruction. diff --git a/sdk/program/src/system_program.rs b/sdk/program/src/system_program.rs index 14f614fa46367e..e04e930540db73 100644 --- a/sdk/program/src/system_program.rs +++ b/sdk/program/src/system_program.rs @@ -1,5 +1,5 @@ //! The [system native program][np]. //! -//! [np]: https://docs.solana.com/developing/runtime-facilities/programs#system-program +//! [np]: https://docs.solanalabs.com/runtime/programs#system-program crate::declare_id!("11111111111111111111111111111111"); diff --git a/sdk/program/src/sysvar/clock.rs b/sdk/program/src/sysvar/clock.rs index 198ac08842eae6..c9f31e8fa9efcd 100644 --- a/sdk/program/src/sysvar/clock.rs +++ b/sdk/program/src/sysvar/clock.rs @@ -9,7 +9,7 @@ //! //! See also the Solana [documentation on the clock sysvar][sdoc]. //! -//! [sdoc]: https://docs.solana.com/developing/runtime-facilities/sysvars#clock +//! [sdoc]: https://docs.solanalabs.com/runtime/sysvars#clock //! //! # Examples //! diff --git a/sdk/program/src/sysvar/epoch_rewards.rs b/sdk/program/src/sysvar/epoch_rewards.rs index 1c588d052ba45b..1d43fbad8a9b08 100755 --- a/sdk/program/src/sysvar/epoch_rewards.rs +++ b/sdk/program/src/sysvar/epoch_rewards.rs @@ -12,7 +12,7 @@ //! //! See also the Solana [documentation on the epoch rewards sysvar][sdoc]. //! -//! [sdoc]: https://docs.solana.com/developing/runtime-facilities/sysvars#epochrewards +//! [sdoc]: https://docs.solanalabs.com/runtime/sysvars#epochrewards //! //! # Examples //! diff --git a/sdk/program/src/sysvar/epoch_schedule.rs b/sdk/program/src/sysvar/epoch_schedule.rs index 1221b49d96fd41..0f3a0aa68896f8 100644 --- a/sdk/program/src/sysvar/epoch_schedule.rs +++ b/sdk/program/src/sysvar/epoch_schedule.rs @@ -9,7 +9,7 @@ //! //! See also the Solana [documentation on the epoch schedule sysvar][sdoc]. //! -//! [sdoc]: https://docs.solana.com/developing/runtime-facilities/sysvars#epochschedule +//! [sdoc]: https://docs.solanalabs.com/runtime/sysvars#epochschedule //! //! # Examples //! diff --git a/sdk/program/src/sysvar/fees.rs b/sdk/program/src/sysvar/fees.rs index 82bc145efa7b95..003a87a64f4f0b 100644 --- a/sdk/program/src/sysvar/fees.rs +++ b/sdk/program/src/sysvar/fees.rs @@ -11,12 +11,12 @@ //! method. For additional context see the [Comprehensive Compute Fees //! proposal][ccf]. //! -//! [`getFeeForMessage`]: https://docs.solana.com/developing/clients/jsonrpc-api#getfeeformessage -//! [ccf]: https://docs.solana.com/proposals/comprehensive-compute-fees +//! [`getFeeForMessage`]: https://solana.com/docs/rpc/http/getfeeformessage +//! [ccf]: https://docs.solanalabs.com/proposals/comprehensive-compute-fees //! //! See also the Solana [documentation on the fees sysvar][sdoc]. //! -//! [sdoc]: https://docs.solana.com/developing/runtime-facilities/sysvars#fees +//! [sdoc]: https://docs.solanalabs.com/runtime/sysvars#fees #![allow(deprecated)] diff --git a/sdk/program/src/sysvar/instructions.rs b/sdk/program/src/sysvar/instructions.rs index ce4283d0c35ae7..a5a31735795832 100644 --- a/sdk/program/src/sysvar/instructions.rs +++ b/sdk/program/src/sysvar/instructions.rs @@ -5,7 +5,7 @@ //! introspection][in], which is required for correctly interoperating with //! native programs like the [secp256k1] and [ed25519] programs. //! -//! [in]: https://docs.solana.com/implemented-proposals/instruction_introspection +//! [in]: https://docs.solanalabs.com/implemented-proposals/instruction_introspection //! [secp256k1]: crate::secp256k1_program //! [ed25519]: crate::ed25519_program //! @@ -18,7 +18,7 @@ //! //! See also the Solana [documentation on the instructions sysvar][sdoc]. //! -//! [sdoc]: https://docs.solana.com/developing/runtime-facilities/sysvars#instructions +//! [sdoc]: https://docs.solanalabs.com/runtime/sysvars#instructions //! //! # Examples //! diff --git a/sdk/program/src/sysvar/mod.rs b/sdk/program/src/sysvar/mod.rs index 1bb7c12b33a728..4aabbce336619a 100644 --- a/sdk/program/src/sysvar/mod.rs +++ b/sdk/program/src/sysvar/mod.rs @@ -79,7 +79,7 @@ //! //! For more details see the Solana [documentation on sysvars][sysvardoc]. //! -//! [sysvardoc]: https://docs.solana.com/developing/runtime-facilities/sysvars +//! [sysvardoc]: https://docs.solanalabs.com/runtime/sysvars use { crate::{account_info::AccountInfo, program_error::ProgramError, pubkey::Pubkey}, @@ -100,6 +100,7 @@ pub mod slot_history; pub mod stake_history; lazy_static! { + // This will be deprecated and so this list shouldn't be modified pub static ref ALL_IDS: Vec = vec![ clock::id(), epoch_schedule::id(), @@ -113,8 +114,6 @@ lazy_static! { slot_history::id(), stake_history::id(), instructions::id(), - epoch_rewards::id(), - last_restart_slot::id(), ]; } @@ -138,12 +137,6 @@ macro_rules! declare_sysvar_id( check_id(pubkey) } } - - #[cfg(test)] - #[test] - fn test_sysvar_id() { - assert!($crate::sysvar::is_sysvar_id(&id()), "sysvar::is_sysvar_id() doesn't know about {}", $name); - } ) ); @@ -164,12 +157,6 @@ macro_rules! declare_deprecated_sysvar_id( check_id(pubkey) } } - - #[cfg(test)] - #[test] - fn test_sysvar_id() { - assert!($crate::sysvar::is_sysvar_id(&id()), "sysvar::is_sysvar_id() doesn't know about {}", $name); - } ) ); diff --git a/sdk/program/src/sysvar/recent_blockhashes.rs b/sdk/program/src/sysvar/recent_blockhashes.rs index 91be1b8276633a..ec3a69baf7adb9 100644 --- a/sdk/program/src/sysvar/recent_blockhashes.rs +++ b/sdk/program/src/sysvar/recent_blockhashes.rs @@ -9,12 +9,12 @@ //! determined with the [`getFeeForMessage`] RPC method. For additional context //! see the [Comprehensive Compute Fees proposal][ccf]. //! -//! [`getFeeForMessage`]: https://docs.solana.com/developing/clients/jsonrpc-api#getfeeformessage -//! [ccf]: https://docs.solana.com/proposals/comprehensive-compute-fees +//! [`getFeeForMessage`]: https://solana.com/docs/rpc/http/getfeeformessage +//! [ccf]: https://docs.solanalabs.com/proposals/comprehensive-compute-fees //! //! See also the Solana [documentation on the recent blockhashes sysvar][sdoc]. //! -//! [sdoc]: https://docs.solana.com/developing/runtime-facilities/sysvars#recentblockhashes +//! [sdoc]: https://docs.solanalabs.com/runtime/sysvars#recentblockhashes #![allow(deprecated)] #![allow(clippy::arithmetic_side_effects)] diff --git a/sdk/program/src/sysvar/rent.rs b/sdk/program/src/sysvar/rent.rs index 0e0e518e151ca4..4767838383b22f 100644 --- a/sdk/program/src/sysvar/rent.rs +++ b/sdk/program/src/sysvar/rent.rs @@ -1,6 +1,6 @@ //! Configuration for network [rent]. //! -//! [rent]: https://docs.solana.com/implemented-proposals/rent +//! [rent]: https://docs.solanalabs.com/implemented-proposals/rent //! //! The _rent sysvar_ provides access to the [`Rent`] type, which defines //! storage rent fees. @@ -10,7 +10,7 @@ //! //! See also the Solana [documentation on the rent sysvar][sdoc]. //! -//! [sdoc]: https://docs.solana.com/developing/runtime-facilities/sysvars#rent +//! [sdoc]: https://docs.solanalabs.com/runtime/sysvars#rent //! //! # Examples //! diff --git a/sdk/program/src/vote/mod.rs b/sdk/program/src/vote/mod.rs index 64edd3222660d7..c26134c1a42450 100644 --- a/sdk/program/src/vote/mod.rs +++ b/sdk/program/src/vote/mod.rs @@ -1,6 +1,6 @@ //! The [vote native program][np]. //! -//! [np]: https://docs.solana.com/developing/runtime-facilities/programs#vote-program +//! [np]: https://docs.solanalabs.com/runtime/programs#vote-program pub mod authorized_voters; pub mod error; diff --git a/sdk/src/account.rs b/sdk/src/account.rs index 327560f075ac5c..a67f49ded47f1d 100644 --- a/sdk/src/account.rs +++ b/sdk/src/account.rs @@ -4,8 +4,11 @@ use qualifier_attr::qualifiers; use { crate::{ + bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, clock::{Epoch, INITIAL_RENT_EPOCH}, + feature_set::{deprecate_executable_meta_update_in_bpf_loader, FeatureSet}, lamports::LamportsError, + loader_v4, native_loader, pubkey::Pubkey, }, serde::{ @@ -37,6 +40,9 @@ pub struct Account { /// the program that owns this account. If executable, the program that loads this account. pub owner: Pubkey, /// this account's data contains a loaded program (and is now read-only) + /// + /// When feature `deprecate_executable_meta_update_in_bpf_loader` is active, + /// `executable` is deprecated, please use `fn is_executable(&account)` instead. pub executable: bool, /// the epoch at which this account will next owe rent pub rent_epoch: Epoch, @@ -754,6 +760,99 @@ pub fn create_is_signer_account_infos<'a>( .collect() } +/// Replacement for the executable flag: An account being owned by one of these contains a program. +pub const PROGRAM_OWNERS: &[Pubkey] = &[ + bpf_loader_upgradeable::id(), + bpf_loader::id(), + bpf_loader_deprecated::id(), + loader_v4::id(), +]; + +const LOADER_V4_STATUS_BYTE_OFFSET: usize = 40; + +/// Create executable account meta data based on account's `owner`. +/// +/// This function is only used for testing and an optimization during +/// transaction loading. +/// +/// When the program account is already present in the program cache, we don't +/// need to load the full account data during transaction loading. Instead, all +/// we need is a minimal executable account meta data, which is what this +/// function returns. +pub fn create_executable_meta(owner: &Pubkey) -> &[u8] { + // For upgradable program account, only `UpgradeableLoaderState::Program` + // variant (i.e. discriminant = 2) should *executable*, which means the + // discriminant for the enum at byte offset 0 in account data is 2. + const EXECUTABLE_META_FOR_BPF_LOADER_UPGRADABLE: [u8; 1] = [2]; + + // For loader v4 program, when LoaderV4Status (byte_offset = 40 in account + // data) is set, the program is executable. + const fn get_executable_meta_for_loader_v4() -> [u8; 41] { + let mut v = [0; LOADER_V4_STATUS_BYTE_OFFSET + 1]; + v[LOADER_V4_STATUS_BYTE_OFFSET] = 1; + v + } + const EXECUTABLE_META_FOR_LOADER_V4: [u8; LOADER_V4_STATUS_BYTE_OFFSET + 1] = + get_executable_meta_for_loader_v4(); + + // For other owners, simple returns a 1 byte array would make it executable. + const DEFAULT_EXECUTABLE_META: [u8; 1] = [1]; + + if bpf_loader_upgradeable::check_id(owner) { + &EXECUTABLE_META_FOR_BPF_LOADER_UPGRADABLE + } else if loader_v4::check_id(owner) { + &EXECUTABLE_META_FOR_LOADER_V4 + } else { + &DEFAULT_EXECUTABLE_META + } +} + +/// Return true if the account program is executable. +pub fn is_executable(account: &impl ReadableAccount, feature_set: &FeatureSet) -> bool { + if !feature_set.is_active(&deprecate_executable_meta_update_in_bpf_loader::id()) { + account.executable() + } else { + // First, check if the account is empty. Empty accounts are not executable. + if account.data().is_empty() { + return false; + } + + // bpf_loader/bpf_loader_deprecated still relies on `executable` on the + // program account. When the program account is finalized, the loader will + // mark `executable` flag on the account. We can't emulate `executable` for + // these two loaders. However, when `deprecate_executable` is true, we + // should have already disabled the deployment of bpf_loader and + // bpf_loader_deprecated. Therefore, we can safely assume that all those + // programs are `executable`. + if bpf_loader::check_id(account.owner()) || bpf_loader_deprecated::check_id(account.owner()) + { + return true; + } + + if bpf_loader_upgradeable::check_id(account.owner()) { + // For upgradable program account, only + // `UpgradeableLoaderState::Program` variant (i.e. discriminant = 2) is + // *executable*. + return account.data()[0] == 2; + } + + if loader_v4::check_id(account.owner()) { + // LoaderV4Status (byte_offset = 40) + // return account.data()[LOADER_V4_STATUS_BYTE_OFFSET] != 0; + return false; // TODO: return false for now + } + + false + } +} + +/// Return true if the account program is a builtin program. Note that for +/// builtin program, even when its account data is empty, it is still be +/// executable, such as vote program etc. +pub fn is_builtin(account: &impl ReadableAccount) -> bool { + native_loader::check_id(account.owner()) && !account.data().is_empty() +} + #[cfg(test)] pub mod tests { use super::*; diff --git a/sdk/src/compute_budget.rs b/sdk/src/compute_budget.rs index 84d0c3766023c6..c903be13c21446 100644 --- a/sdk/src/compute_budget.rs +++ b/sdk/src/compute_budget.rs @@ -23,14 +23,7 @@ crate::declare_id!("ComputeBudget111111111111111111111111111111"); Serialize, )] pub enum ComputeBudgetInstruction { - /// Deprecated - // TODO: after feature remove_deprecated_request_unit_ix::id() is activated, replace it with 'unused' - RequestUnitsDeprecated { - /// Units to request - units: u32, - /// Additional fee to add - additional_fee: u32, - }, + Unused, // deprecated variant, reserved value. /// Request a specific transaction-wide program heap region size in bytes. /// The value requested must be a multiple of 1024. This new heap region /// size applies to each program executed in the transaction, including all @@ -63,9 +56,9 @@ impl ComputeBudgetInstruction { /// Serialize Instruction using borsh, this is only used in runtime::cost_model::tests but compilation /// can't be restricted as it's used across packages - // #[cfg(test)] - pub fn pack(self) -> Result, std::io::Error> { - self.try_to_vec() + #[cfg(feature = "dev-context-only-utils")] + pub fn pack(self) -> Result, borsh::io::Error> { + borsh::to_vec(&self) } /// Create a `ComputeBudgetInstruction::SetLoadedAccountsDataSizeLimit` `Instruction` diff --git a/sdk/src/ed25519_instruction.rs b/sdk/src/ed25519_instruction.rs index 2def2c087ad761..6400d1ac74b2e0 100644 --- a/sdk/src/ed25519_instruction.rs +++ b/sdk/src/ed25519_instruction.rs @@ -1,6 +1,6 @@ //! Instructions for the [ed25519 native program][np]. //! -//! [np]: https://docs.solana.com/developing/runtime-facilities/programs#ed25519-program +//! [np]: https://docs.solanalabs.com/runtime/programs#ed25519-program #![cfg(feature = "full")] diff --git a/sdk/src/epoch_info.rs b/sdk/src/epoch_info.rs index 24106e2a678cc5..c324c38d8a1aa7 100644 --- a/sdk/src/epoch_info.rs +++ b/sdk/src/epoch_info.rs @@ -2,7 +2,7 @@ //! //! As returned by the [`getEpochInfo`] RPC method. //! -//! [`getEpochInfo`]: https://docs.solana.com/developing/clients/jsonrpc-api#getepochinfo +//! [`getEpochInfo`]: https://solana.com/docs/rpc/http/getepochinfo use crate::clock::{Epoch, Slot}; diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 2692969e7904df..27d4a14783f6cc 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -370,7 +370,7 @@ pub mod update_rewards_from_cached_accounts { solana_sdk::declare_id!("28s7i3htzhahXQKqmS2ExzbEoUypg9krwvtK2M9UWXh9"); } pub mod enable_partitioned_epoch_reward { - solana_sdk::declare_id!("HCnE3xQoZtDz9dSVm3jKwJXioTb6zMRbgwCmGg3PHHk8"); + solana_sdk::declare_id!("41tVp5qR1XwWRt5WifvtSQyuxtqQWJgEK8w91AtBqSwP"); } pub mod spl_token_v3_4_0 { @@ -399,7 +399,7 @@ pub mod stake_raise_minimum_delegation_to_1_sol { } pub mod stake_minimum_delegation_for_rewards { - solana_sdk::declare_id!("ELjxSXwNsyXGfAh8TqX8ih22xeT8huF6UngQirbLKYKH"); + solana_sdk::declare_id!("G6ANXD6ptCSyNd9znZm7j4dEczAJCfx7Cy43oBx3rKHJ"); } pub mod add_set_compute_unit_price_ix { @@ -727,6 +727,42 @@ pub mod validate_fee_collector_account { solana_sdk::declare_id!("prpFrMtgNmzaNzkPJg9o753fVvbHKqNrNTm76foJ2wm"); } +pub mod disable_rent_fees_collection { + solana_sdk::declare_id!("CJzY83ggJHqPGDq8VisV3U91jDJLuEaALZooBrXtnnLU"); +} + +pub mod enable_zk_transfer_with_fee { + solana_sdk::declare_id!("zkNLP7EQALfC1TYeB3biDU7akDckj8iPkvh9y2Mt2K3"); +} + +pub mod drop_legacy_shreds { + solana_sdk::declare_id!("GV49KKQdBNaiv2pgqhS2Dy3GWYJGXMTVYbYkdk91orRy"); +} + +pub mod allow_commission_decrease_at_any_time { + solana_sdk::declare_id!("decoMktMcnmiq6t3u7g5BfgcQu91nKZr6RvMYf9z1Jb"); +} + +pub mod consume_blockstore_duplicate_proofs { + solana_sdk::declare_id!("6YsBCejwK96GZCkJ6mkZ4b68oP63z2PLoQmWjC7ggTqZ"); +} + +pub mod index_erasure_conflict_duplicate_proofs { + solana_sdk::declare_id!("dupPajaLy2SSn8ko42aZz4mHANDNrLe8Nw8VQgFecLa"); +} + +pub mod merkle_conflict_duplicate_proofs { + solana_sdk::declare_id!("mrkPjRg79B2oK2ZLgd7S3AfEJaX9B6gAF3H9aEykRUS"); +} + +pub mod disable_bpf_loader_instructions { + solana_sdk::declare_id!("7WeS1vfPRgeeoXArLh7879YcB9mgE9ktjPDtajXeWfXn"); +} + +pub mod deprecate_executable_meta_update_in_bpf_loader { + solana_sdk::declare_id!("k6uR1J9VtKJnTukBV2Eo15BEy434MBg8bT6hHQgmU8v"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -904,6 +940,15 @@ lazy_static! { (update_hashes_per_tick5::id(), "Update desired hashes per tick to 9.2M"), (update_hashes_per_tick6::id(), "Update desired hashes per tick to 10M"), (validate_fee_collector_account::id(), "validate fee collector account #33888"), + (disable_rent_fees_collection::id(), "Disable rent fees collection #33945"), + (enable_zk_transfer_with_fee::id(), "enable Zk Token proof program transfer with fee"), + (drop_legacy_shreds::id(), "drops legacy shreds #34328"), + (allow_commission_decrease_at_any_time::id(), "Allow commission decrease at any time in epoch #33843"), + (consume_blockstore_duplicate_proofs::id(), "consume duplicate proofs from blockstore in consensus #34372"), + (index_erasure_conflict_duplicate_proofs::id(), "generate duplicate proofs for index and erasure conflicts #34360"), + (merkle_conflict_duplicate_proofs::id(), "generate duplicate proofs for merkle root conflicts #34270"), + (disable_bpf_loader_instructions::id(), "disable bpf loader management instructions #34194"), + (deprecate_executable_meta_update_in_bpf_loader::id(), "deprecate executable meta flag update in bpf loader #34194"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() diff --git a/sdk/src/fee.rs b/sdk/src/fee.rs index b9fb7329c34cbe..f3377b5254f0a6 100644 --- a/sdk/src/fee.rs +++ b/sdk/src/fee.rs @@ -82,18 +82,13 @@ impl FeeStructure { message: &SanitizedMessage, lamports_per_signature: u64, budget_limits: &FeeBudgetLimits, - remove_congestion_multiplier: bool, include_loaded_account_data_size_in_fee: bool, ) -> u64 { // Fee based on compute units and signatures let congestion_multiplier = if lamports_per_signature == 0 { 0.0 // test only - } else if remove_congestion_multiplier { - 1.0 // multiplier that has no effect } else { - const BASE_CONGESTION: f64 = 5_000.0; - let current_congestion = BASE_CONGESTION.max(lamports_per_signature as f64); - BASE_CONGESTION / current_congestion + 1.0 // multiplier that has no effect }; let signature_fee = message @@ -149,3 +144,40 @@ impl ::solana_frozen_abi::abi_example::AbiExample for FeeStructure { FeeStructure::default() } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_calculate_memory_usage_cost() { + let heap_cost = 99; + const K: usize = 1024; + + // accounts data size are priced in block of 32K, ... + + // ... requesting less than 32K should still be charged as one block + assert_eq!( + heap_cost, + FeeStructure::calculate_memory_usage_cost(31 * K, heap_cost) + ); + + // ... requesting exact 32K should be charged as one block + assert_eq!( + heap_cost, + FeeStructure::calculate_memory_usage_cost(32 * K, heap_cost) + ); + + // ... requesting slightly above 32K should be charged as 2 block + assert_eq!( + heap_cost * 2, + FeeStructure::calculate_memory_usage_cost(33 * K, heap_cost) + ); + + // ... requesting exact 64K should be charged as 2 block + assert_eq!( + heap_cost * 2, + FeeStructure::calculate_memory_usage_cost(64 * K, heap_cost) + ); + } +} diff --git a/sdk/src/inner_instruction.rs b/sdk/src/inner_instruction.rs new file mode 100644 index 00000000000000..1a715979ebf1c5 --- /dev/null +++ b/sdk/src/inner_instruction.rs @@ -0,0 +1,21 @@ +use { + crate::instruction::CompiledInstruction, + serde::{Deserialize, Serialize}, +}; + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct InnerInstruction { + pub instruction: CompiledInstruction, + /// Invocation stack height of this instruction. Instruction stack height + /// starts at 1 for transaction instructions. + pub stack_height: u8, +} + +/// An ordered list of compiled instructions that were invoked during a +/// transaction instruction +pub type InnerInstructions = Vec; + +/// A list of compiled instructions that were invoked during each instruction of +/// a transaction +pub type InnerInstructionsList = Vec; diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index b56d9c2fdb3166..11121debd8eb1d 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -26,7 +26,7 @@ //! [`solana-client`]: https://docs.rs/solana-client //! [`solana-cli-config`]: https://docs.rs/solana-cli-config //! [`solana-clap-utils`]: https://docs.rs/solana-clap-utils -//! [json]: https://docs.solana.com/developing/clients/jsonrpc-api +//! [json]: https://solana.com/docs/rpc //! [`clap`]: https://docs.rs/clap #![allow(incomplete_features)] @@ -46,15 +46,16 @@ pub use solana_program::address_lookup_table_account; pub use solana_program::program_stubs; pub use solana_program::{ account_info, address_lookup_table, alt_bn128, big_mod_exp, blake3, borsh, borsh0_10, borsh0_9, - bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, clock, config, custom_heap_default, - custom_panic_default, debug_account_data, declare_deprecated_sysvar_id, declare_sysvar_id, - decode_error, ed25519_program, epoch_rewards, epoch_schedule, fee_calculator, impl_sysvar_get, - incinerator, instruction, keccak, lamports, loader_instruction, loader_upgradeable_instruction, - loader_v4, loader_v4_instruction, message, msg, native_token, nonce, poseidon, program, - program_error, program_memory, program_option, program_pack, rent, sanitize, sdk_ids, - secp256k1_program, secp256k1_recover,secp256r1_program, serde_varint, serialize_utils, short_vec, slot_hashes, - slot_history, stable_layout, stake, stake_history, syscalls, system_instruction, - system_program, sysvar, unchecked_div_by_const, vote, wasm_bindgen, + borsh1, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, clock, config, + custom_heap_default, custom_panic_default, debug_account_data, declare_deprecated_sysvar_id, + declare_sysvar_id, decode_error, ed25519_program, epoch_rewards, epoch_schedule, + fee_calculator, impl_sysvar_get, incinerator, instruction, keccak, lamports, + loader_instruction, loader_upgradeable_instruction, loader_v4, loader_v4_instruction, message, + msg, native_token, nonce, poseidon, program, program_error, program_memory, program_option, + program_pack, rent, sanitize, sdk_ids, secp256k1_program, secp256k1_recover,secp256r1_program, serde_varint, + serialize_utils, short_vec, slot_hashes, slot_history, stable_layout, stake, stake_history, + syscalls, system_instruction, system_program, sysvar, unchecked_div_by_const, vote, + wasm_bindgen, }; pub mod account; @@ -77,6 +78,7 @@ pub mod genesis_config; pub mod hard_forks; pub mod hash; pub mod inflation; +pub mod inner_instruction; pub mod log; pub mod native_loader; pub mod net; @@ -96,6 +98,7 @@ pub mod secp256r1_instruction; pub mod shred_version; pub mod signature; pub mod signer; +pub mod simple_vote_transaction_checker; pub mod system_transaction; pub mod timing; pub mod transaction; diff --git a/sdk/src/quic.rs b/sdk/src/quic.rs index dd75efccf70fc0..6e9e0a656c3396 100644 --- a/sdk/src/quic.rs +++ b/sdk/src/quic.rs @@ -1,5 +1,6 @@ +#![cfg(feature = "full")] //! Definitions related to Solana over QUIC. -use std::time::Duration; +use {crate::signer::keypair::Keypair, std::time::Duration}; pub const QUIC_PORT_OFFSET: u16 = 6; // Empirically found max number of concurrent streams @@ -26,12 +27,16 @@ pub const QUIC_CONNECTION_HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(60); /// The receive window for QUIC connection from unstaked nodes is /// set to this ratio times [`solana_sdk::packet::PACKET_DATA_SIZE`] -pub const QUIC_UNSTAKED_RECEIVE_WINDOW_RATIO: u64 = 1; +pub const QUIC_UNSTAKED_RECEIVE_WINDOW_RATIO: u64 = 128; /// The receive window for QUIC connection from minimum staked nodes is /// set to this ratio times [`solana_sdk::packet::PACKET_DATA_SIZE`] -pub const QUIC_MIN_STAKED_RECEIVE_WINDOW_RATIO: u64 = 2; +pub const QUIC_MIN_STAKED_RECEIVE_WINDOW_RATIO: u64 = 128; /// The receive window for QUIC connection from maximum staked nodes is /// set to this ratio times [`solana_sdk::packet::PACKET_DATA_SIZE`] -pub const QUIC_MAX_STAKED_RECEIVE_WINDOW_RATIO: u64 = 10; +pub const QUIC_MAX_STAKED_RECEIVE_WINDOW_RATIO: u64 = 512; + +pub trait NotifyKeyUpdate { + fn update_key(&self, key: &Keypair) -> Result<(), Box>; +} diff --git a/sdk/src/secp256k1_instruction.rs b/sdk/src/secp256k1_instruction.rs index fc7cfaf7456116..a7bdff0b6dfb22 100644 --- a/sdk/src/secp256k1_instruction.rs +++ b/sdk/src/secp256k1_instruction.rs @@ -1,6 +1,6 @@ //! Instructions for the [secp256k1 native program][np]. //! -//! [np]: https://docs.solana.com/developing/runtime-facilities/programs#secp256k1-program +//! [np]: https://docs.solanalabs.com/runtime/programs#secp256k1-program //! //! _This module provides low-level cryptographic building blocks that must be //! used carefully to ensure proper security. Read this documentation and @@ -790,8 +790,7 @@ use { crate::{ feature_set::{ - libsecp256k1_0_5_upgrade_enabled, libsecp256k1_fail_on_bad_count, - libsecp256k1_fail_on_bad_count2, FeatureSet, + libsecp256k1_fail_on_bad_count, libsecp256k1_fail_on_bad_count2, FeatureSet, }, instruction::Instruction, precompiles::PrecompileError, @@ -973,17 +972,10 @@ pub fn verify( return Err(PrecompileError::InvalidSignature); } - let sig_parse_result = if feature_set.is_active(&libsecp256k1_0_5_upgrade_enabled::id()) { - libsecp256k1::Signature::parse_standard_slice( - &signature_instruction[sig_start..sig_end], - ) - } else { - libsecp256k1::Signature::parse_overflowing_slice( - &signature_instruction[sig_start..sig_end], - ) - }; - - let signature = sig_parse_result.map_err(|_| PrecompileError::InvalidSignature)?; + let signature = libsecp256k1::Signature::parse_standard_slice( + &signature_instruction[sig_start..sig_end], + ) + .map_err(|_| PrecompileError::InvalidSignature)?; let recovery_id = libsecp256k1::RecoveryId::parse(signature_instruction[sig_end]) .map_err(|_| PrecompileError::InvalidRecoveryId)?; @@ -1068,14 +1060,7 @@ pub mod test { instruction_data[0] = num_signatures; let writer = std::io::Cursor::new(&mut instruction_data[1..]); bincode::serialize_into(writer, &offsets).unwrap(); - let mut feature_set = FeatureSet::all_enabled(); - feature_set - .active - .remove(&libsecp256k1_0_5_upgrade_enabled::id()); - feature_set - .inactive - .insert(libsecp256k1_0_5_upgrade_enabled::id()); - + let feature_set = FeatureSet::all_enabled(); verify(&instruction_data, &[&[0u8; 100]], &feature_set) } @@ -1089,13 +1074,7 @@ pub mod test { let writer = std::io::Cursor::new(&mut instruction_data[1..]); bincode::serialize_into(writer, &offsets).unwrap(); instruction_data.truncate(instruction_data.len() - 1); - let mut feature_set = FeatureSet::all_enabled(); - feature_set - .active - .remove(&libsecp256k1_0_5_upgrade_enabled::id()); - feature_set - .inactive - .insert(libsecp256k1_0_5_upgrade_enabled::id()); + let feature_set = FeatureSet::all_enabled(); assert_eq!( verify(&instruction_data, &[&[0u8; 100]], &feature_set), @@ -1224,13 +1203,7 @@ pub mod test { instruction_data[0] = 0; let writer = std::io::Cursor::new(&mut instruction_data[1..]); bincode::serialize_into(writer, &offsets).unwrap(); - let mut feature_set = FeatureSet::all_enabled(); - feature_set - .active - .remove(&libsecp256k1_0_5_upgrade_enabled::id()); - feature_set - .inactive - .insert(libsecp256k1_0_5_upgrade_enabled::id()); + let feature_set = FeatureSet::all_enabled(); assert_eq!( verify(&instruction_data, &[&[0u8; 100]], &feature_set), @@ -1251,14 +1224,7 @@ pub mod test { let message_arr = b"hello"; let mut secp_instruction = new_secp256k1_instruction(&secp_privkey, message_arr); let mint_keypair = Keypair::new(); - let mut feature_set = feature_set::FeatureSet::all_enabled(); - feature_set - .active - .remove(&feature_set::libsecp256k1_0_5_upgrade_enabled::id()); - feature_set - .inactive - .insert(feature_set::libsecp256k1_0_5_upgrade_enabled::id()); - let feature_set = feature_set; + let feature_set = feature_set::FeatureSet::all_enabled(); let tx = Transaction::new_signed_with_payer( &[secp_instruction.clone()], diff --git a/sdk/src/shred_version.rs b/sdk/src/shred_version.rs index a113a2af1306e0..52560a1709c102 100644 --- a/sdk/src/shred_version.rs +++ b/sdk/src/shred_version.rs @@ -1,6 +1,6 @@ //! Calculation of [shred] versions. //! -//! [shred]: https://docs.solana.com/terminology#shred +//! [shred]: https://solana.com/docs/terminology#shred #![cfg(feature = "full")] diff --git a/sdk/src/signer/mod.rs b/sdk/src/signer/mod.rs index 710860e231981b..79aab63d3ab208 100644 --- a/sdk/src/signer/mod.rs +++ b/sdk/src/signer/mod.rs @@ -81,7 +81,7 @@ pub trait Signer { } /// Fallibly produces an Ed25519 signature over the provided `message` bytes. fn try_sign_message(&self, message: &[u8]) -> Result; - /// Whether the impelmentation requires user interaction to sign + /// Whether the implementation requires user interaction to sign fn is_interactive(&self) -> bool; } @@ -94,6 +94,7 @@ where } } +/// This impl allows using Signer with types like Box/Rc/Arc. impl> Signer for Container { #[inline] fn pubkey(&self) -> Pubkey { diff --git a/sdk/src/simple_vote_transaction_checker.rs b/sdk/src/simple_vote_transaction_checker.rs new file mode 100644 index 00000000000000..33be20773afb21 --- /dev/null +++ b/sdk/src/simple_vote_transaction_checker.rs @@ -0,0 +1,28 @@ +#![cfg(feature = "full")] + +use crate::{message::VersionedMessage, transaction::SanitizedVersionedTransaction}; + +/// Simple vote transaction meets these conditions: +/// 1. has 1 or 2 signatures; +/// 2. is legacy message; +/// 3. has only one instruction; +/// 4. which must be Vote instruction; +pub fn is_simple_vote_transaction( + sanitized_versioned_transaction: &SanitizedVersionedTransaction, +) -> bool { + let signatures_count = sanitized_versioned_transaction.signatures.len(); + let is_legacy_message = matches!( + sanitized_versioned_transaction.message.message, + VersionedMessage::Legacy(_) + ); + let mut instructions = sanitized_versioned_transaction + .message + .program_instructions_iter(); + signatures_count < 3 + && is_legacy_message + && instructions + .next() + .xor(instructions.next()) + .map(|(program_id, _ix)| program_id == &solana_sdk::vote::program::id()) + .unwrap_or(false) +} diff --git a/sdk/src/transaction/mod.rs b/sdk/src/transaction/mod.rs index 072c7baa984bb5..4173a93b62215e 100644 --- a/sdk/src/transaction/mod.rs +++ b/sdk/src/transaction/mod.rs @@ -43,7 +43,7 @@ //! transactions. //! //! [`RpcClient::get_latest_blockhash`]: https://docs.rs/solana-rpc-client/latest/solana_rpc_client/rpc_client/struct.RpcClient.html#method.get_latest_blockhash -//! [durable transaction nonce]: https://docs.solana.com/implemented-proposals/durable-tx-nonces +//! [durable transaction nonce]: https://docs.solanalabs.com/implemented-proposals/durable-tx-nonces //! //! # Examples //! @@ -147,7 +147,7 @@ pub enum TransactionVerificationMode { pub type Result = result::Result; -/// An atomically-commited sequence of instructions. +/// An atomically-committed sequence of instructions. /// /// While [`Instruction`]s are the basic unit of computation in Solana, /// they are submitted by clients in [`Transaction`]s containing one or diff --git a/sdk/src/transaction/sanitized.rs b/sdk/src/transaction/sanitized.rs index 117ae0a8e00148..4189f1b64b86e2 100644 --- a/sdk/src/transaction/sanitized.rs +++ b/sdk/src/transaction/sanitized.rs @@ -14,6 +14,7 @@ use { pubkey::Pubkey, sanitize::Sanitize, signature::Signature, + simple_vote_transaction_checker::is_simple_vote_transaction, solana_sdk::feature_set, transaction::{Result, Transaction, TransactionError, VersionedTransaction}, }, @@ -96,44 +97,19 @@ impl SanitizedTransaction { is_simple_vote_tx: Option, address_loader: impl AddressLoader, ) -> Result { - tx.sanitize()?; - + let sanitized_versioned_tx = SanitizedVersionedTransaction::try_from(tx)?; + let is_simple_vote_tx = is_simple_vote_tx + .unwrap_or_else(|| is_simple_vote_transaction(&sanitized_versioned_tx)); let message_hash = match message_hash.into() { - MessageHash::Compute => tx.message.hash(), + MessageHash::Compute => sanitized_versioned_tx.message.message.hash(), MessageHash::Precomputed(hash) => hash, }; - - let signatures = tx.signatures; - let message = match tx.message { - VersionedMessage::Legacy(message) => { - SanitizedMessage::Legacy(LegacyMessage::new(message)) - } - VersionedMessage::V0(message) => { - let loaded_addresses = - address_loader.load_addresses(&message.address_table_lookups)?; - SanitizedMessage::V0(v0::LoadedMessage::new(message, loaded_addresses)) - } - }; - - let is_simple_vote_tx = is_simple_vote_tx.unwrap_or_else(|| { - if signatures.len() < 3 - && message.instructions().len() == 1 - && matches!(message, SanitizedMessage::Legacy(_)) - { - let mut ix_iter = message.program_instructions_iter(); - ix_iter.next().map(|(program_id, _ix)| program_id) - == Some(&crate::vote::program::id()) - } else { - false - } - }); - - Ok(Self { - message, + Self::try_new( + sanitized_versioned_tx, message_hash, is_simple_vote_tx, - signatures, - }) + address_loader, + ) } pub fn try_from_legacy_transaction(tx: Transaction) -> Result { diff --git a/sdk/src/transaction/versioned/sanitized.rs b/sdk/src/transaction/versioned/sanitized.rs index 68d0581e23d4ba..61ecdfea56bb2a 100644 --- a/sdk/src/transaction/versioned/sanitized.rs +++ b/sdk/src/transaction/versioned/sanitized.rs @@ -32,6 +32,11 @@ impl SanitizedVersionedTransaction { pub fn get_message(&self) -> &SanitizedVersionedMessage { &self.message } + + /// Consumes the SanitizedVersionedTransaction, returning the fields individually. + pub fn destruct(self) -> (Vec, SanitizedVersionedMessage) { + (self.signatures, self.message) + } } #[cfg(test)] diff --git a/sdk/src/transaction_context.rs b/sdk/src/transaction_context.rs index 77cbb831fb0561..981f64870f6063 100644 --- a/sdk/src/transaction_context.rs +++ b/sdk/src/transaction_context.rs @@ -17,7 +17,8 @@ use { }; use { crate::{ - account::{AccountSharedData, ReadableAccount}, + account::{is_builtin, is_executable, AccountSharedData, ReadableAccount}, + feature_set::FeatureSet, instruction::InstructionError, pubkey::Pubkey, }, @@ -489,7 +490,7 @@ impl InstructionContext { self.instruction_accounts.len() as IndexOfAccount } - /// Assert that enough account were supplied to this Instruction + /// Assert that enough accounts were supplied to this Instruction pub fn check_number_of_instruction_accounts( &self, expected_at_least: IndexOfAccount, @@ -739,7 +740,11 @@ impl<'a> BorrowedAccount<'a> { /// Assignes the owner of this account (transaction wide) #[cfg(not(target_os = "solana"))] - pub fn set_owner(&mut self, pubkey: &[u8]) -> Result<(), InstructionError> { + pub fn set_owner( + &mut self, + pubkey: &[u8], + feature_set: &FeatureSet, + ) -> Result<(), InstructionError> { // Only the owner can assign a new owner if !self.is_owned_by_current_program() { return Err(InstructionError::ModifiedProgramId); @@ -749,7 +754,7 @@ impl<'a> BorrowedAccount<'a> { return Err(InstructionError::ModifiedProgramId); } // and only if the account is not executable - if self.is_executable() { + if self.is_executable(feature_set) { return Err(InstructionError::ModifiedProgramId); } // and only if the data is zero-initialized or empty @@ -773,7 +778,11 @@ impl<'a> BorrowedAccount<'a> { /// Overwrites the number of lamports of this account (transaction wide) #[cfg(not(target_os = "solana"))] - pub fn set_lamports(&mut self, lamports: u64) -> Result<(), InstructionError> { + pub fn set_lamports( + &mut self, + lamports: u64, + feature_set: &FeatureSet, + ) -> Result<(), InstructionError> { // An account not owned by the program cannot have its balance decrease if !self.is_owned_by_current_program() && lamports < self.get_lamports() { return Err(InstructionError::ExternalAccountLamportSpend); @@ -783,7 +792,7 @@ impl<'a> BorrowedAccount<'a> { return Err(InstructionError::ReadonlyLamportChange); } // The balance of executable accounts may not change - if self.is_executable() { + if self.is_executable(feature_set) { return Err(InstructionError::ExecutableLamportChange); } // don't touch the account if the lamports do not change @@ -797,21 +806,31 @@ impl<'a> BorrowedAccount<'a> { /// Adds lamports to this account (transaction wide) #[cfg(not(target_os = "solana"))] - pub fn checked_add_lamports(&mut self, lamports: u64) -> Result<(), InstructionError> { + pub fn checked_add_lamports( + &mut self, + lamports: u64, + feature_set: &FeatureSet, + ) -> Result<(), InstructionError> { self.set_lamports( self.get_lamports() .checked_add(lamports) .ok_or(InstructionError::ArithmeticOverflow)?, + feature_set, ) } /// Subtracts lamports from this account (transaction wide) #[cfg(not(target_os = "solana"))] - pub fn checked_sub_lamports(&mut self, lamports: u64) -> Result<(), InstructionError> { + pub fn checked_sub_lamports( + &mut self, + lamports: u64, + feature_set: &FeatureSet, + ) -> Result<(), InstructionError> { self.set_lamports( self.get_lamports() .checked_sub(lamports) .ok_or(InstructionError::ArithmeticOverflow)?, + feature_set, ) } @@ -823,8 +842,11 @@ impl<'a> BorrowedAccount<'a> { /// Returns a writable slice of the account data (transaction wide) #[cfg(not(target_os = "solana"))] - pub fn get_data_mut(&mut self) -> Result<&mut [u8], InstructionError> { - self.can_data_be_changed()?; + pub fn get_data_mut( + &mut self, + feature_set: &FeatureSet, + ) -> Result<&mut [u8], InstructionError> { + self.can_data_be_changed(feature_set)?; self.touch()?; self.make_data_mut(); Ok(self.account.data_as_mut_slice()) @@ -849,9 +871,13 @@ impl<'a> BorrowedAccount<'a> { not(target_os = "solana"), any(test, feature = "dev-context-only-utils") ))] - pub fn set_data(&mut self, data: Vec) -> Result<(), InstructionError> { + pub fn set_data( + &mut self, + data: Vec, + feature_set: &FeatureSet, + ) -> Result<(), InstructionError> { self.can_data_be_resized(data.len())?; - self.can_data_be_changed()?; + self.can_data_be_changed(feature_set)?; self.touch()?; self.update_accounts_resize_delta(data.len())?; @@ -864,14 +890,18 @@ impl<'a> BorrowedAccount<'a> { /// Call this when you have a slice of data you do not own and want to /// replace the account data with it. #[cfg(not(target_os = "solana"))] - pub fn set_data_from_slice(&mut self, data: &[u8]) -> Result<(), InstructionError> { + pub fn set_data_from_slice( + &mut self, + data: &[u8], + feature_set: &FeatureSet, + ) -> Result<(), InstructionError> { self.can_data_be_resized(data.len())?; - self.can_data_be_changed()?; + self.can_data_be_changed(feature_set)?; self.touch()?; self.update_accounts_resize_delta(data.len())?; // Calling make_data_mut() here guarantees that set_data_from_slice() // copies in places, extending the account capacity if necessary but - // never reducing it. This is required as the account migh be directly + // never reducing it. This is required as the account might be directly // mapped into a MemoryRegion, and therefore reducing capacity would // leave a hole in the vm address space. After CPI or upon program // termination, the runtime will zero the extra capacity. @@ -885,9 +915,13 @@ impl<'a> BorrowedAccount<'a> { /// /// Fills it with zeros at the end if is extended or truncates at the end otherwise. #[cfg(not(target_os = "solana"))] - pub fn set_data_length(&mut self, new_length: usize) -> Result<(), InstructionError> { + pub fn set_data_length( + &mut self, + new_length: usize, + feature_set: &FeatureSet, + ) -> Result<(), InstructionError> { self.can_data_be_resized(new_length)?; - self.can_data_be_changed()?; + self.can_data_be_changed(feature_set)?; // don't touch the account if the length does not change if self.get_data().len() == new_length { return Ok(()); @@ -900,10 +934,14 @@ impl<'a> BorrowedAccount<'a> { /// Appends all elements in a slice to the account #[cfg(not(target_os = "solana"))] - pub fn extend_from_slice(&mut self, data: &[u8]) -> Result<(), InstructionError> { + pub fn extend_from_slice( + &mut self, + data: &[u8], + feature_set: &FeatureSet, + ) -> Result<(), InstructionError> { let new_len = self.get_data().len().saturating_add(data.len()); self.can_data_be_resized(new_len)?; - self.can_data_be_changed()?; + self.can_data_be_changed(feature_set)?; if data.is_empty() { return Ok(()); @@ -957,7 +995,7 @@ impl<'a> BorrowedAccount<'a> { // about to write into it. Make the account mutable by copying it in a // buffer with MAX_PERMITTED_DATA_INCREASE capacity so that if the // transaction reallocs, we don't have to copy the whole account data a - // second time to fullfill the realloc. + // second time to fulfill the realloc. // // NOTE: The account memory region CoW code in bpf_loader::create_vm() implements the same // logic and must be kept in sync. @@ -976,8 +1014,12 @@ impl<'a> BorrowedAccount<'a> { /// Serializes a state into the account data #[cfg(not(target_os = "solana"))] - pub fn set_state(&mut self, state: &T) -> Result<(), InstructionError> { - let data = self.get_data_mut()?; + pub fn set_state( + &mut self, + state: &T, + feature_set: &FeatureSet, + ) -> Result<(), InstructionError> { + let data = self.get_data_mut(feature_set)?; let serialized_size = bincode::serialized_size(state).map_err(|_| InstructionError::GenericError)?; if serialized_size > data.len() as u64 { @@ -998,8 +1040,8 @@ impl<'a> BorrowedAccount<'a> { /// Returns whether this account is executable (transaction wide) #[inline] - pub fn is_executable(&self) -> bool { - self.account.executable() + pub fn is_executable(&self, feature_set: &FeatureSet) -> bool { + is_builtin(&*self.account) || is_executable(&*self.account, feature_set) } /// Configures whether this account is executable (transaction wide) @@ -1022,11 +1064,11 @@ impl<'a> BorrowedAccount<'a> { return Err(InstructionError::ExecutableModified); } // one can not clear the executable flag - if self.is_executable() && !is_executable { + if self.account.executable() && !is_executable { return Err(InstructionError::ExecutableModified); } // don't touch the account if the executable flag does not change - if self.is_executable() == is_executable { + if self.account.executable() == is_executable { return Ok(()); } self.touch()?; @@ -1077,9 +1119,9 @@ impl<'a> BorrowedAccount<'a> { /// Returns an error if the account data can not be mutated by the current program #[cfg(not(target_os = "solana"))] - pub fn can_data_be_changed(&self) -> Result<(), InstructionError> { + pub fn can_data_be_changed(&self, feature_set: &FeatureSet) -> Result<(), InstructionError> { // Only non-executable accounts data can be changed - if self.is_executable() { + if self.is_executable(feature_set) { return Err(InstructionError::ExecutableDataModified); } // and only if the account is writable @@ -1152,7 +1194,7 @@ pub struct ExecutionRecord { impl From for ExecutionRecord { fn from(context: TransactionContext) -> Self { let accounts = Rc::try_unwrap(context.accounts) - .expect("transaction_context.accounts has unexpectd outstanding refs"); + .expect("transaction_context.accounts has unexpected outstanding refs"); let touched_account_count = accounts.touched_count() as u64; let accounts = accounts.into_accounts(); Self { diff --git a/send-transaction-service/Cargo.toml b/send-transaction-service/Cargo.toml index 71431037f57471..35e76524d9017a 100644 --- a/send-transaction-service/Cargo.toml +++ b/send-transaction-service/Cargo.toml @@ -21,6 +21,7 @@ solana-tpu-client = { workspace = true } [dev-dependencies] solana-logger = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/send-transaction-service/src/send_transaction_service.rs b/send-transaction-service/src/send_transaction_service.rs index 27aa1bea400ac4..896600e93123cb 100644 --- a/send-transaction-service/src/send_transaction_service.rs +++ b/send-transaction-service/src/send_transaction_service.rs @@ -892,28 +892,37 @@ mod test { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(4); - let bank = Bank::new_for_tests(&genesis_config); - let bank_forks = BankForks::new_rw_arc(bank); + let (_, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let tpu_address = "127.0.0.1:0".parse().unwrap(); let config = Config { leader_forward_count: 1, ..Config::default() }; - let root_bank = Arc::new(Bank::new_from_parent( + let root_bank = Bank::new_from_parent( bank_forks.read().unwrap().working_bank(), &Pubkey::default(), 1, - )); + ); + let root_bank = bank_forks + .write() + .unwrap() + .insert(root_bank) + .clone_without_scheduler(); + let rooted_signature = root_bank .transfer(1, &mint_keypair, &mint_keypair.pubkey()) .unwrap(); - let working_bank = Arc::new(Bank::new_from_parent( - root_bank.clone(), - &Pubkey::default(), - 2, - )); + let working_bank = bank_forks + .write() + .unwrap() + .insert(Bank::new_from_parent( + root_bank.clone(), + &Pubkey::default(), + 2, + )) + .clone_without_scheduler(); let non_rooted_signature = working_bank .transfer(2, &mint_keypair, &mint_keypair.pubkey()) @@ -1158,19 +1167,24 @@ mod test { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(4); - let bank = Bank::new_for_tests(&genesis_config); - let bank_forks = BankForks::new_rw_arc(bank); + let (_, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let tpu_address = "127.0.0.1:0".parse().unwrap(); let config = Config { leader_forward_count: 1, ..Config::default() }; - let root_bank = Arc::new(Bank::new_from_parent( + let root_bank = Bank::new_from_parent( bank_forks.read().unwrap().working_bank(), &Pubkey::default(), 1, - )); + ); + let root_bank = bank_forks + .write() + .unwrap() + .insert(root_bank) + .clone_without_scheduler(); + let rooted_signature = root_bank .transfer(1, &mint_keypair, &mint_keypair.pubkey()) .unwrap(); @@ -1184,11 +1198,15 @@ mod test { AccountSharedData::new_data(43, &nonce_state, &system_program::id()).unwrap(); root_bank.store_account(&nonce_address, &nonce_account); - let working_bank = Arc::new(Bank::new_from_parent( - root_bank.clone(), - &Pubkey::default(), - 2, - )); + let working_bank = bank_forks + .write() + .unwrap() + .insert(Bank::new_from_parent( + root_bank.clone(), + &Pubkey::default(), + 2, + )) + .clone_without_scheduler(); let non_rooted_signature = working_bank .transfer(2, &mint_keypair, &mint_keypair.pubkey()) .unwrap(); diff --git a/stake-accounts/Cargo.toml b/stake-accounts/Cargo.toml index 23a84addb4b70b..228be023b68e72 100644 --- a/stake-accounts/Cargo.toml +++ b/stake-accounts/Cargo.toml @@ -21,7 +21,7 @@ solana-stake-program = { workspace = true } solana-version = { workspace = true } [dev-dependencies] -solana-runtime = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/stake-accounts/src/stake_accounts.rs b/stake-accounts/src/stake_accounts.rs index b40cb1dcec5b30..caf04fc438f8cf 100644 --- a/stake-accounts/src/stake_accounts.rs +++ b/stake-accounts/src/stake_accounts.rs @@ -292,11 +292,12 @@ mod tests { stake::state::StakeStateV2, }, solana_stake_program::stake_state, + std::sync::Arc, }; - fn create_bank(lamports: u64) -> (Bank, Keypair, u64, u64) { + fn create_bank(lamports: u64) -> (Arc, Keypair, u64, u64) { let (genesis_config, mint_keypair) = create_genesis_config(lamports); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let stake_rent = bank.get_minimum_balance_for_rent_exemption(StakeStateV2::size_of()); let system_rent = bank.get_minimum_balance_for_rent_exemption(0); (bank, mint_keypair, stake_rent, system_rent) @@ -355,7 +356,7 @@ mod tests { fn test_new_derived_stake_account() { let (bank, funding_keypair, stake_rent, system_rent) = create_bank(10_000_000); let funding_pubkey = funding_keypair.pubkey(); - let bank_client = BankClient::new(bank); + let bank_client = BankClient::new_shared(bank); let fee_payer_keypair = create_account(&bank_client, &funding_keypair, system_rent); let fee_payer_pubkey = fee_payer_keypair.pubkey(); @@ -392,7 +393,7 @@ mod tests { fn test_authorize_stake_accounts() { let (bank, funding_keypair, stake_rent, system_rent) = create_bank(10_000_000); let funding_pubkey = funding_keypair.pubkey(); - let bank_client = BankClient::new(bank); + let bank_client = BankClient::new_shared(bank); let fee_payer_keypair = create_account(&bank_client, &funding_keypair, system_rent); let fee_payer_pubkey = fee_payer_keypair.pubkey(); @@ -454,7 +455,7 @@ mod tests { fn test_lockup_stake_accounts() { let (bank, funding_keypair, stake_rent, system_rent) = create_bank(10_000_000); let funding_pubkey = funding_keypair.pubkey(); - let bank_client = BankClient::new(bank); + let bank_client = BankClient::new_shared(bank); let fee_payer_keypair = create_account(&bank_client, &funding_keypair, system_rent); let fee_payer_pubkey = fee_payer_keypair.pubkey(); @@ -545,7 +546,7 @@ mod tests { fn test_rebase_stake_accounts() { let (bank, funding_keypair, stake_rent, system_rent) = create_bank(10_000_000); let funding_pubkey = funding_keypair.pubkey(); - let bank_client = BankClient::new(bank); + let bank_client = BankClient::new_shared(bank); let fee_payer_keypair = create_account(&bank_client, &funding_keypair, system_rent); let fee_payer_pubkey = fee_payer_keypair.pubkey(); @@ -608,7 +609,7 @@ mod tests { fn test_move_stake_accounts() { let (bank, funding_keypair, stake_rent, system_rent) = create_bank(10_000_000); let funding_pubkey = funding_keypair.pubkey(); - let bank_client = BankClient::new(bank); + let bank_client = BankClient::new_shared(bank); let fee_payer_keypair = create_account(&bank_client, &funding_keypair, system_rent); let fee_payer_pubkey = fee_payer_keypair.pubkey(); diff --git a/storage-bigtable/init-bigtable.sh b/storage-bigtable/init-bigtable.sh index 3b988e2ef65f79..43ea293bb793ba 100755 --- a/storage-bigtable/init-bigtable.sh +++ b/storage-bigtable/init-bigtable.sh @@ -16,7 +16,7 @@ if [[ -n $BIGTABLE_EMULATOR_HOST ]]; then cbt+=(-project emulator) fi -for table in blocks tx tx-by-addr; do +for table in blocks entries tx tx-by-addr; do ( set -x "${cbt[@]}" createtable $table diff --git a/storage-bigtable/src/access_token.rs b/storage-bigtable/src/access_token.rs index f4d5e9ade98bee..c2cd53057d6538 100644 --- a/storage-bigtable/src/access_token.rs +++ b/storage-bigtable/src/access_token.rs @@ -34,12 +34,24 @@ fn load_stringified_credentials(credential: String) -> Result, - token: Arc>, + token: RwLock<(Token, Instant)>, + refresh_active: AtomicBool, +} + +#[derive(Clone)] +pub struct AccessToken { + inner: Arc, +} + +impl std::ops::Deref for AccessToken { + type Target = AccessTokenInner; + + fn deref(&self) -> &Self::Target { + &self.inner + } } impl AccessToken { @@ -52,12 +64,14 @@ impl AccessToken { if let Err(err) = credentials.rsa_key() { Err(format!("Invalid rsa key: {err}")) } else { - let token = Arc::new(RwLock::new(Self::get_token(&credentials, &scope).await?)); + let token = RwLock::new(Self::get_token(&credentials, &scope).await?); let access_token = Self { - credentials, - scope, - token, - refresh_active: Arc::new(AtomicBool::new(false)), + inner: Arc::new(AccessTokenInner { + credentials, + scope, + token, + refresh_active: AtomicBool::new(false), + }), }; Ok(access_token) } @@ -91,41 +105,46 @@ impl AccessToken { } /// Call this function regularly to ensure the access token does not expire - pub async fn refresh(&self) { + pub fn refresh(&self) { // Check if it's time to try a token refresh - { - let token_r = self.token.read().unwrap(); - if token_r.1.elapsed().as_secs() < token_r.0.expires_in() as u64 / 2 { - return; - } + let token_r = self.token.read().unwrap(); + if token_r.1.elapsed().as_secs() < token_r.0.expires_in() as u64 / 2 { + debug!("Token is not expired yet"); + return; + } + drop(token_r); - #[allow(deprecated)] - if self - .refresh_active - .compare_and_swap(false, true, Ordering::Relaxed) - { - // Refresh already pending - return; - } + // Refresh already is progress + let refresh_progress = + self.refresh_active + .compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed); + if refresh_progress.is_err() { + debug!("Token update is already in progress"); + return; } - info!("Refreshing token"); - match time::timeout( - time::Duration::from_secs(5), - Self::get_token(&self.credentials, &self.scope), - ) - .await - { - Ok(new_token) => match (new_token, self.token.write()) { - (Ok(new_token), Ok(mut token_w)) => *token_w = new_token, - (Ok(_new_token), Err(err)) => warn!("{}", err), - (Err(err), _) => warn!("{}", err), - }, - Err(_) => { - warn!("Token refresh timeout") + let this = self.clone(); + tokio::spawn(async move { + match time::timeout( + time::Duration::from_secs(5), + Self::get_token(&this.credentials, &this.scope), + ) + .await + { + Ok(new_token) => match new_token { + Ok(new_token) => { + let mut token_w = this.token.write().unwrap(); + *token_w = new_token; + } + Err(err) => error!("Failed to fetch new token: {}", err), + }, + Err(_timeout) => { + warn!("Token refresh timeout") + } } - } - self.refresh_active.store(false, Ordering::Relaxed); + this.refresh_active.store(false, Ordering::Relaxed); + info!("Token refreshed"); + }); } /// Return an access token suitable for use in an HTTP authorization header diff --git a/storage-bigtable/src/bigtable.rs b/storage-bigtable/src/bigtable.rs index fd17cfa8dd2568..3eeee6f6eb2cda 100644 --- a/storage-bigtable/src/bigtable.rs +++ b/storage-bigtable/src/bigtable.rs @@ -6,7 +6,7 @@ use { compression::{compress_best, decompress}, root_ca_certificate, CredentialType, }, - backoff::{future::retry, ExponentialBackoff}, + backoff::{future::retry, Error as BackoffError, ExponentialBackoff}, log::*, std::{ str::FromStr, @@ -16,7 +16,7 @@ use { tonic::{codegen::InterceptedService, transport::ClientTlsConfig, Request, Status}, }; -#[allow(clippy::derive_partial_eq_without_eq)] +#[allow(clippy::derive_partial_eq_without_eq, clippy::enum_variant_names)] mod google { mod rpc { include!(concat!( @@ -84,6 +84,15 @@ pub enum Error { Timeout, } +fn to_backoff_err(err: Error) -> BackoffError { + if let Error::Rpc(ref status) = err { + if status.code() == tonic::Code::NotFound && status.message().starts_with("table") { + return BackoffError::Permanent(err); + } + } + err.into() +} + impl std::convert::From for Error { fn from(err: std::io::Error) -> Self { Self::Io(err) @@ -265,7 +274,8 @@ impl BigTableConnection { { retry(ExponentialBackoff::default(), || async { let mut client = self.client(); - Ok(client.put_bincode_cells(table, cells).await?) + let result = client.put_bincode_cells(table, cells).await; + result.map_err(to_backoff_err) }) .await } @@ -303,7 +313,8 @@ impl BigTableConnection { { retry(ExponentialBackoff::default(), || async { let mut client = self.client(); - Ok(client.put_protobuf_cells(table, cells).await?) + let result = client.put_protobuf_cells(table, cells).await; + result.map_err(to_backoff_err) }) .await } @@ -399,9 +410,9 @@ impl) -> InterceptedRequestResult> BigTable { Ok(rows) } - async fn refresh_access_token(&self) { + fn refresh_access_token(&self) { if let Some(ref access_token) = self.access_token { - access_token.refresh().await; + access_token.refresh(); } } @@ -423,7 +434,7 @@ impl) -> InterceptedRequestResult> BigTable { if rows_limit == 0 { return Ok(vec![]); } - self.refresh_access_token().await; + self.refresh_access_token(); let response = self .client .read_rows(ReadRowsRequest { @@ -468,7 +479,7 @@ impl) -> InterceptedRequestResult> BigTable { /// Check whether a row key exists in a `table` pub async fn row_key_exists(&mut self, table_name: &str, row_key: RowKey) -> Result { - self.refresh_access_token().await; + self.refresh_access_token(); let response = self .client @@ -513,7 +524,7 @@ impl) -> InterceptedRequestResult> BigTable { if rows_limit == 0 { return Ok(vec![]); } - self.refresh_access_token().await; + self.refresh_access_token(); let response = self .client .read_rows(ReadRowsRequest { @@ -547,7 +558,7 @@ impl) -> InterceptedRequestResult> BigTable { table_name: &str, row_keys: &[RowKey], ) -> Result> { - self.refresh_access_token().await; + self.refresh_access_token(); let response = self .client @@ -583,7 +594,7 @@ impl) -> InterceptedRequestResult> BigTable { table_name: &str, row_key: RowKey, ) -> Result { - self.refresh_access_token().await; + self.refresh_access_token(); let response = self .client @@ -612,7 +623,7 @@ impl) -> InterceptedRequestResult> BigTable { /// Delete one or more `table` rows async fn delete_rows(&mut self, table_name: &str, row_keys: &[RowKey]) -> Result<()> { - self.refresh_access_token().await; + self.refresh_access_token(); let mut entries = vec![]; for row_key in row_keys { @@ -658,7 +669,7 @@ impl) -> InterceptedRequestResult> BigTable { family_name: &str, row_data: &[(&RowKey, RowData)], ) -> Result<()> { - self.refresh_access_token().await; + self.refresh_access_token(); let mut entries = vec![]; for (row_key, row_data) in row_data { @@ -735,6 +746,14 @@ impl) -> InterceptedRequestResult> BigTable { .collect()) } + pub async fn get_protobuf_cell

    (&mut self, table: &str, key: RowKey) -> Result

    + where + P: prost::Message + Default, + { + let row_data = self.get_single_row_data(table, key.clone()).await?; + deserialize_protobuf_cell_data(&row_data, table, key.to_string()) + } + pub async fn get_protobuf_or_bincode_cell( &mut self, table: &str, diff --git a/storage-bigtable/src/lib.rs b/storage-bigtable/src/lib.rs index 0b8ed4d3a593c3..9f6667cece7a76 100644 --- a/storage-bigtable/src/lib.rs +++ b/storage-bigtable/src/lib.rs @@ -15,12 +15,13 @@ use { timing::AtomicInterval, transaction::{TransactionError, VersionedTransaction}, }, - solana_storage_proto::convert::{generated, tx_by_addr}, + solana_storage_proto::convert::{entries, generated, tx_by_addr}, solana_transaction_status::{ extract_and_fmt_memos, ConfirmedBlock, ConfirmedTransactionStatusWithSignature, - ConfirmedTransactionWithStatusMeta, Reward, TransactionByAddrInfo, + ConfirmedTransactionWithStatusMeta, EntrySummary, Reward, TransactionByAddrInfo, TransactionConfirmationStatus, TransactionStatus, TransactionStatusMeta, - TransactionWithStatusMeta, VersionedConfirmedBlock, VersionedTransactionWithStatusMeta, + TransactionWithStatusMeta, VersionedConfirmedBlock, VersionedConfirmedBlockWithEntries, + VersionedTransactionWithStatusMeta, }, std::{ collections::{HashMap, HashSet}, @@ -91,6 +92,10 @@ fn slot_to_blocks_key(slot: Slot) -> String { slot_to_key(slot) } +fn slot_to_entries_key(slot: Slot) -> String { + slot_to_key(slot) +} + fn slot_to_tx_by_addr_key(slot: Slot) -> String { slot_to_key(!slot) } @@ -606,6 +611,25 @@ impl LedgerStorage { Ok(block_exists) } + /// Fetches a vector of block entries via a multirow fetch + pub async fn get_entries(&self, slot: Slot) -> Result> { + trace!( + "LedgerStorage::get_block_entries request received: {:?}", + slot + ); + self.stats.increment_num_queries(); + let mut bigtable = self.connection.client(); + let entry_cell_data = bigtable + .get_protobuf_cell::("entries", slot_to_entries_key(slot)) + .await + .map_err(|err| match err { + bigtable::Error::RowNotFound => Error::BlockNotFound(slot), + _ => err.into(), + })?; + let entries = entry_cell_data.entries.into_iter().map(Into::into); + Ok(entries) + } + pub async fn get_signature_status(&self, signature: &Signature) -> Result { trace!( "LedgerStorage::get_signature_status request received: {:?}", @@ -799,7 +823,7 @@ impl LedgerStorage { .unwrap_or(0); // Return the next tx-by-addr data of amount `limit` plus extra to account for the largest - // number that might be flitered out + // number that might be filtered out let tx_by_addr_data = bigtable .get_row_data( "tx-by-addr", @@ -883,9 +907,32 @@ impl LedgerStorage { "LedgerStorage::upload_confirmed_block request received: {:?}", slot ); + self.upload_confirmed_block_with_entries( + slot, + VersionedConfirmedBlockWithEntries { + block: confirmed_block, + entries: vec![], + }, + ) + .await + } + + pub async fn upload_confirmed_block_with_entries( + &self, + slot: Slot, + confirmed_block: VersionedConfirmedBlockWithEntries, + ) -> Result<()> { + trace!( + "LedgerStorage::upload_confirmed_block_with_entries request received: {:?}", + slot + ); let mut by_addr: HashMap<&Pubkey, Vec> = HashMap::new(); + let VersionedConfirmedBlockWithEntries { + block: confirmed_block, + entries, + } = confirmed_block; - let mut tx_cells = vec![]; + let mut tx_cells = Vec::with_capacity(confirmed_block.transactions.len()); for (index, transaction_with_meta) in confirmed_block.transactions.iter().enumerate() { let VersionedTransactionWithStatusMeta { meta, transaction } = transaction_with_meta; let err = meta.status.clone().err(); @@ -934,6 +981,14 @@ impl LedgerStorage { }) .collect(); + let num_entries = entries.len(); + let entry_cell = ( + slot_to_entries_key(slot), + entries::Entries { + entries: entries.into_iter().enumerate().map(Into::into).collect(), + }, + ); + let mut tasks = vec![]; if !tx_cells.is_empty() { @@ -955,6 +1010,14 @@ impl LedgerStorage { })); } + if num_entries > 0 { + let conn = self.connection.clone(); + tasks.push(tokio::spawn(async move { + conn.put_protobuf_cells_with_retry::("entries", &[entry_cell]) + .await + })); + } + let mut bytes_written = 0; let mut maybe_first_err: Option = None; @@ -995,6 +1058,7 @@ impl LedgerStorage { "storage-bigtable-upload-block", ("slot", slot, i64), ("transactions", num_transactions, i64), + ("entries", num_entries, i64), ("bytes", bytes_written, i64), ); Ok(()) @@ -1088,6 +1152,13 @@ impl LedgerStorage { vec![] }; + let entries_exist = self + .connection + .client() + .row_key_exists("entries", slot_to_entries_key(slot)) + .await + .is_ok_and(|x| x); + if !dry_run { if !address_slot_rows.is_empty() { self.connection @@ -1101,17 +1172,24 @@ impl LedgerStorage { .await?; } + if entries_exist { + self.connection + .delete_rows_with_retry("entries", &[slot_to_entries_key(slot)]) + .await?; + } + self.connection .delete_rows_with_retry("blocks", &[slot_to_blocks_key(slot)]) .await?; } info!( - "{}deleted ledger data for slot {}: {} transaction rows, {} address slot rows", + "{}deleted ledger data for slot {}: {} transaction rows, {} address slot rows, {} entry row", if dry_run { "[dry run] " } else { "" }, slot, tx_deletion_rows.len(), - address_slot_rows.len() + address_slot_rows.len(), + if entries_exist { "with" } else {"WITHOUT"} ); Ok(()) diff --git a/storage-proto/build.rs b/storage-proto/build.rs index 947f562c1c6f74..583a95650e6f3a 100644 --- a/storage-proto/build.rs +++ b/storage-proto/build.rs @@ -6,7 +6,11 @@ fn main() -> Result<(), std::io::Error> { } let proto_base_path = std::path::PathBuf::from("proto"); - let proto_files = ["confirmed_block.proto", "transaction_by_addr.proto"]; + let proto_files = [ + "confirmed_block.proto", + "entries.proto", + "transaction_by_addr.proto", + ]; let mut protos = Vec::new(); for proto_file in &proto_files { let proto = proto_base_path.join(proto_file); diff --git a/storage-proto/proto/entries.proto b/storage-proto/proto/entries.proto new file mode 100644 index 00000000000000..64108925ad0ca6 --- /dev/null +++ b/storage-proto/proto/entries.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package solana.storage.Entries; + +message Entries { + repeated Entry entries = 1; +} + +message Entry { + uint32 index = 1; + uint64 num_hashes = 2; + bytes hash = 3; + uint64 num_transactions = 4; + uint32 starting_transaction_index = 5; +} diff --git a/storage-proto/src/convert.rs b/storage-proto/src/convert.rs index 7ca5728d398ec1..e9070951942e2c 100644 --- a/storage-proto/src/convert.rs +++ b/storage-proto/src/convert.rs @@ -15,7 +15,7 @@ use { transaction_context::TransactionReturnData, }, solana_transaction_status::{ - ConfirmedBlock, InnerInstruction, InnerInstructions, Reward, RewardType, + ConfirmedBlock, EntrySummary, InnerInstruction, InnerInstructions, Reward, RewardType, TransactionByAddrInfo, TransactionStatusMeta, TransactionTokenBalance, TransactionWithStatusMeta, VersionedConfirmedBlock, VersionedTransactionWithStatusMeta, }, @@ -41,6 +41,11 @@ pub mod tx_by_addr { )); } +#[allow(clippy::derive_partial_eq_without_eq)] +pub mod entries { + include!(concat!(env!("OUT_DIR"), "/solana.storage.entries.rs")); +} + impl From> for generated::Rewards { fn from(rewards: Vec) -> Self { Self { @@ -1189,6 +1194,29 @@ impl TryFrom for Vec { } } +impl From<(usize, EntrySummary)> for entries::Entry { + fn from((index, entry_summary): (usize, EntrySummary)) -> Self { + entries::Entry { + index: index as u32, + num_hashes: entry_summary.num_hashes, + hash: entry_summary.hash.as_ref().into(), + num_transactions: entry_summary.num_transactions, + starting_transaction_index: entry_summary.starting_transaction_index as u32, + } + } +} + +impl From for EntrySummary { + fn from(entry: entries::Entry) -> Self { + EntrySummary { + num_hashes: entry.num_hashes, + hash: Hash::new(&entry.hash), + num_transactions: entry.num_transactions, + starting_transaction_index: entry.starting_transaction_index as usize, + } + } +} + #[cfg(test)] mod test { use {super::*, enum_iterator::all}; diff --git a/streamer/src/nonblocking/quic.rs b/streamer/src/nonblocking/quic.rs index d238552abb0e7c..951ec6cb317fcd 100644 --- a/streamer/src/nonblocking/quic.rs +++ b/streamer/src/nonblocking/quic.rs @@ -1,6 +1,6 @@ use { crate::{ - quic::{configure_server, QuicServerError, StreamStats}, + quic::{configure_server, QuicServerError, StreamStats, MAX_UNSTAKED_CONNECTIONS}, streamer::StakedNodes, tls_certificates::get_pubkey_from_tls_certificate, }, @@ -39,6 +39,10 @@ use { tokio::{task::JoinHandle, time::timeout}, }; +/// Limit to 500K PPS +const MAX_STREAMS_PER_100MS: u64 = 500_000 / 10; +const MAX_UNSTAKED_STREAMS_PERCENT: u64 = 20; +const STREAM_THROTTLING_INTERVAL: Duration = Duration::from_millis(100); const WAIT_FOR_STREAM_TIMEOUT: Duration = Duration::from_millis(100); pub const DEFAULT_WAIT_FOR_CHUNK_TIMEOUT: Duration = Duration::from_secs(10); @@ -55,6 +59,7 @@ const CONNECTION_CLOSE_REASON_EXCEED_MAX_STREAM_COUNT: &[u8] = b"exceed_max_stre const CONNECTION_CLOSE_CODE_TOO_MANY: u32 = 4; const CONNECTION_CLOSE_REASON_TOO_MANY: &[u8] = b"too_many"; +const STREAM_STOP_CODE_THROTTLING: u32 = 15; // A sequence of bytes that is part of a packet // along with where in the packet it is @@ -264,6 +269,7 @@ enum ConnectionHandlerError { MaxStreamError, } +#[derive(Clone)] struct NewConnectionHandlerParams { // In principle, the code can be made to work with a crossbeam channel // as long as we're careful never to use a blocking recv or send call @@ -348,13 +354,11 @@ fn handle_and_cache_new_connection( drop(connection_table_l); tokio::spawn(handle_connection( connection, - params.packet_sender.clone(), remote_addr, - params.remote_pubkey, last_update, connection_table, stream_exit, - params.stats.clone(), + params.clone(), peer_type, wait_for_chunk_timeout, )); @@ -681,19 +685,42 @@ async fn packet_batch_sender( } } -#[allow(clippy::too_many_arguments)] +fn max_streams_for_connection_in_100ms( + connection_type: ConnectionPeerType, + stake: u64, + total_stake: u64, +) -> u64 { + if matches!(connection_type, ConnectionPeerType::Unstaked) || stake == 0 { + Percentage::from(MAX_UNSTAKED_STREAMS_PERCENT) + .apply_to(MAX_STREAMS_PER_100MS) + .saturating_div(MAX_UNSTAKED_CONNECTIONS as u64) + } else { + let max_total_staked_streams: u64 = MAX_STREAMS_PER_100MS + - Percentage::from(MAX_UNSTAKED_STREAMS_PERCENT).apply_to(MAX_STREAMS_PER_100MS); + ((max_total_staked_streams as f64 / total_stake as f64) * stake as f64) as u64 + } +} + +fn reset_throttling_params_if_needed(last_instant: &mut tokio::time::Instant) -> bool { + if tokio::time::Instant::now().duration_since(*last_instant) > STREAM_THROTTLING_INTERVAL { + *last_instant = tokio::time::Instant::now(); + true + } else { + false + } +} + async fn handle_connection( connection: Connection, - packet_sender: AsyncSender, remote_addr: SocketAddr, - remote_pubkey: Option, last_update: Arc, connection_table: Arc>, stream_exit: Arc, - stats: Arc, + params: NewConnectionHandlerParams, peer_type: ConnectionPeerType, wait_for_chunk_timeout: Duration, ) { + let stats = params.stats; debug!( "quic new connection {} streams: {} connections: {}", remote_addr, @@ -702,17 +729,29 @@ async fn handle_connection( ); let stable_id = connection.stable_id(); stats.total_connections.fetch_add(1, Ordering::Relaxed); + let max_streams_per_100ms = + max_streams_for_connection_in_100ms(peer_type, params.stake, params.total_stake); + let mut last_throttling_instant = tokio::time::Instant::now(); + let mut streams_in_current_interval = 0; while !stream_exit.load(Ordering::Relaxed) { if let Ok(stream) = tokio::time::timeout(WAIT_FOR_STREAM_TIMEOUT, connection.accept_uni()).await { match stream { Ok(mut stream) => { + if reset_throttling_params_if_needed(&mut last_throttling_instant) { + streams_in_current_interval = 0; + } else if streams_in_current_interval >= max_streams_per_100ms { + stats.throttled_streams.fetch_add(1, Ordering::Relaxed); + let _ = stream.stop(VarInt::from_u32(STREAM_STOP_CODE_THROTTLING)); + continue; + } + streams_in_current_interval = streams_in_current_interval.saturating_add(1); stats.total_streams.fetch_add(1, Ordering::Relaxed); stats.total_new_streams.fetch_add(1, Ordering::Relaxed); let stream_exit = stream_exit.clone(); let stats = stats.clone(); - let packet_sender = packet_sender.clone(); + let packet_sender = params.packet_sender.clone(); let last_update = last_update.clone(); tokio::spawn(async move { let mut maybe_batch = None; @@ -765,7 +804,7 @@ async fn handle_connection( } let removed_connection_count = connection_table.lock().unwrap().remove_connection( - ConnectionTableKey::new(remote_addr.ip(), remote_pubkey), + ConnectionTableKey::new(remote_addr.ip(), params.remote_pubkey), remote_addr.port(), stable_id, ); @@ -1989,4 +2028,40 @@ pub mod test { compute_receive_window_ratio_for_staked_node(max_stake, min_stake, max_stake + 10); assert_eq!(ratio, max_ratio); } + + #[test] + fn test_max_streams_for_connection_in_100ms() { + // 50K packets per ms * 20% / 500 max unstaked connections + assert_eq!( + max_streams_for_connection_in_100ms(ConnectionPeerType::Unstaked, 0, 10000), + 20 + ); + + // 50K packets per ms * 20% / 500 max unstaked connections + assert_eq!( + max_streams_for_connection_in_100ms(ConnectionPeerType::Unstaked, 10, 10000), + 20 + ); + + // If stake is 0, same limits as unstaked connections will apply. + // 50K packets per ms * 20% / 500 max unstaked connections + assert_eq!( + max_streams_for_connection_in_100ms(ConnectionPeerType::Staked, 0, 10000), + 20 + ); + + // max staked streams = 50K packets per ms * 80% = 40K + // function = 40K * stake / total_stake + assert_eq!( + max_streams_for_connection_in_100ms(ConnectionPeerType::Staked, 15, 10000), + 60 + ); + + // max staked streams = 50K packets per ms * 80% = 40K + // function = 40K * stake / total_stake + assert_eq!( + max_streams_for_connection_in_100ms(ConnectionPeerType::Staked, 1000, 10000), + 4000 + ); + } } diff --git a/streamer/src/quic.rs b/streamer/src/quic.rs index fee0db110f11ec..5a56c74f452716 100644 --- a/streamer/src/quic.rs +++ b/streamer/src/quic.rs @@ -10,7 +10,7 @@ use { solana_perf::packet::PacketBatch, solana_sdk::{ packet::PACKET_DATA_SIZE, - quic::{QUIC_MAX_TIMEOUT, QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS}, + quic::{NotifyKeyUpdate, QUIC_MAX_TIMEOUT, QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS}, signature::Keypair, }, std::{ @@ -36,6 +36,12 @@ impl SkipClientVerification { } } +pub struct SpawnServerResult { + pub endpoint: Endpoint, + pub thread: thread::JoinHandle<()>, + pub key_updater: Arc, +} + impl rustls::server::ClientCertVerifier for SkipClientVerification { fn client_auth_root_subjects(&self) -> &[DistinguishedName] { &[] @@ -113,6 +119,19 @@ pub enum QuicServerError { TlsError(#[from] rustls::Error), } +pub struct EndpointKeyUpdater { + endpoint: Endpoint, + gossip_host: IpAddr, +} + +impl NotifyKeyUpdate for EndpointKeyUpdater { + fn update_key(&self, key: &Keypair) -> Result<(), Box> { + let (config, _) = configure_server(key, self.gossip_host)?; + self.endpoint.set_server_config(Some(config)); + Ok(()) + } +} + #[derive(Default)] pub struct StreamStats { pub(crate) total_connections: AtomicUsize, @@ -156,6 +175,7 @@ pub struct StreamStats { pub(crate) connection_setup_error_locally_closed: AtomicUsize, pub(crate) connection_removed: AtomicUsize, pub(crate) connection_remove_failed: AtomicUsize, + pub(crate) throttled_streams: AtomicUsize, } impl StreamStats { @@ -386,6 +406,11 @@ impl StreamStats { self.total_stream_read_timeouts.swap(0, Ordering::Relaxed), i64 ), + ( + "throttled_streams", + self.throttled_streams.swap(0, Ordering::Relaxed), + i64 + ), ); } } @@ -404,7 +429,7 @@ pub fn spawn_server( max_unstaked_connections: usize, wait_for_chunk_timeout: Duration, coalesce: Duration, -) -> Result<(Endpoint, thread::JoinHandle<()>), QuicServerError> { +) -> Result { let runtime = rt(); let (endpoint, _stats, task) = { let _guard = runtime.enter(); @@ -431,7 +456,15 @@ pub fn spawn_server( } }) .unwrap(); - Ok((endpoint, handle)) + let updater = EndpointKeyUpdater { + endpoint: endpoint.clone(), + gossip_host, + }; + Ok(SpawnServerResult { + endpoint, + thread: handle, + key_updater: Arc::new(updater), + }) } #[cfg(test)] @@ -457,7 +490,11 @@ mod test { let ip = "127.0.0.1".parse().unwrap(); let server_address = s.local_addr().unwrap(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); - let (_, t) = spawn_server( + let SpawnServerResult { + endpoint: _, + thread: t, + key_updater: _, + } = spawn_server( "quic_streamer_test", s, &keypair, @@ -513,7 +550,11 @@ mod test { let ip = "127.0.0.1".parse().unwrap(); let server_address = s.local_addr().unwrap(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); - let (_, t) = spawn_server( + let SpawnServerResult { + endpoint: _, + thread: t, + key_updater: _, + } = spawn_server( "quic_streamer_test", s, &keypair, @@ -556,7 +597,11 @@ mod test { let ip = "127.0.0.1".parse().unwrap(); let server_address = s.local_addr().unwrap(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); - let (_, t) = spawn_server( + let SpawnServerResult { + endpoint: _, + thread: t, + key_updater: _, + } = spawn_server( "quic_streamer_test", s, &keypair, diff --git a/system-test/testnet-automation.sh b/system-test/testnet-automation.sh index 9f4cbc19295d5f..e4256b7a7f0e82 100755 --- a/system-test/testnet-automation.sh +++ b/system-test/testnet-automation.sh @@ -12,7 +12,7 @@ function cleanup_testnet { Test failed during step: ${STEP} -Failure occured when running the following command: +Failure occurred when running the following command: $*" fi diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index e807f80c9692f0..9f994eee9a19df 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -777,12 +777,14 @@ impl TestValidator { validator_stake_lamports, validator_identity_lamports, config.fee_rate_governor.clone(), - config.rent, + config.rent.clone(), solana_sdk::genesis_config::ClusterType::Development, accounts.into_iter().collect(), ); genesis_config.epoch_schedule = config .epoch_schedule + .as_ref() + .cloned() .unwrap_or_else(EpochSchedule::without_warmup); if let Some(ticks_per_slot) = config.ticks_per_slot { diff --git a/tokens/README.md b/tokens/README.md index e63f363d7c78ae..be10c8211e7150 100644 --- a/tokens/README.md +++ b/tokens/README.md @@ -120,7 +120,7 @@ solana-tokens distribute-stake --stake-account-address \ --stake-authority --withdraw-authority --fee-payer ``` -Currently, this will subtract 1 SOL from each allocation and store it the +Currently, this will subtract 1 SOL from each allocation and store it in the recipient address. That SOL can be used to pay transaction fees on staking operations such as delegating stake. The rest of the allocation is put in a stake account. The new stake account address is output in the transaction diff --git a/tokens/src/arg_parser.rs b/tokens/src/arg_parser.rs index 924c4e3e8eebb6..024bdf52832d73 100644 --- a/tokens/src/arg_parser.rs +++ b/tokens/src/arg_parser.rs @@ -652,7 +652,7 @@ where { let matches = get_matches(args); let config_file = matches.value_of("config_file").unwrap().to_string(); - let url = matches.value_of("url").map(|x| x.to_string()); + let url = matches.value_of("json_rpc_url").map(|x| x.to_string()); let command = match matches.subcommand() { ("distribute-tokens", Some(matches)) => { diff --git a/transaction-dos/src/main.rs b/transaction-dos/src/main.rs index 5d69e9e291b6b5..94fecf5e6d5f73 100644 --- a/transaction-dos/src/main.rs +++ b/transaction-dos/src/main.rs @@ -238,6 +238,7 @@ fn run_transactions_dos( config.signers = vec![payer_keypairs[0], &program_keypair]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(program_location), + fee_payer_signer_index: 0, program_signer_index: Some(1), program_pubkey: None, buffer_signer_index: None, @@ -513,7 +514,7 @@ fn main() { .long("batch-sleep-ms") .takes_value(true) .value_name("NUM") - .help("Sleep for this long the num outstanding transctions is greater than the batch size."), + .help("Sleep for this long the num outstanding transactions is greater than the batch size."), ) .arg( Arg::with_name("check_gossip") diff --git a/transaction-status/Cargo.toml b/transaction-status/Cargo.toml index 3c830f591403fe..a4837acb3e107d 100644 --- a/transaction-status/Cargo.toml +++ b/transaction-status/Cargo.toml @@ -13,7 +13,9 @@ edition = { workspace = true } Inflector = { workspace = true } base64 = { workspace = true } bincode = { workspace = true } -borsh = { workspace = true } +# Update this borsh dependency to the workspace version once +# spl-associated-token-account is upgraded and used in the monorepo. +borsh = { version = "0.10.3" } bs58 = { workspace = true } lazy_static = { workspace = true } log = { workspace = true } diff --git a/transaction-status/src/lib.rs b/transaction-status/src/lib.rs index fac20d9859cdbd..0eb13d36819c4a 100644 --- a/transaction-status/src/lib.rs +++ b/transaction-status/src/lib.rs @@ -230,6 +230,27 @@ pub struct InnerInstruction { pub stack_height: Option, } +/// Maps a list of inner instructions from `solana_sdk` into a list of this +/// crate's representation of inner instructions (with instruction indices). +pub fn map_inner_instructions( + inner_instructions: solana_sdk::inner_instruction::InnerInstructionsList, +) -> impl Iterator { + inner_instructions + .into_iter() + .enumerate() + .map(|(index, instructions)| InnerInstructions { + index: index as u8, + instructions: instructions + .into_iter() + .map(|info| InnerInstruction { + stack_height: Some(u32::from(info.stack_height)), + instruction: info.instruction, + }) + .collect(), + }) + .filter(|i| !i.instructions.is_empty()) +} + #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct UiInnerInstructions { @@ -240,7 +261,7 @@ pub struct UiInnerInstructions { } impl UiInnerInstructions { - fn parse(inner_instructions: InnerInstructions, account_keys: &AccountKeys) -> Self { + pub fn parse(inner_instructions: InnerInstructions, account_keys: &AccountKeys) -> Self { Self { index: inner_instructions.index, instructions: inner_instructions diff --git a/transaction-status/src/parse_address_lookup_table.rs b/transaction-status/src/parse_address_lookup_table.rs index 94127c8e06e203..a76b0138b0e1d0 100644 --- a/transaction-status/src/parse_address_lookup_table.rs +++ b/transaction-status/src/parse_address_lookup_table.rs @@ -126,7 +126,7 @@ mod test { #[test] fn test_parse_create_address_lookup_table_ix() { let from_pubkey = Pubkey::new_unique(); - // use explicit key to have predicatble bump_seed + // use explicit key to have predictable bump_seed let authority = Pubkey::from_str("HkxY6vXdrKzoCQLmdJ3cYo9534FdZQxzBNWTyrJzzqJM").unwrap(); let slot = 42; diff --git a/transaction-status/src/parse_token.rs b/transaction-status/src/parse_token.rs index ee9a04db3a7184..c7111ee622ad4e 100644 --- a/transaction-status/src/parse_token.rs +++ b/transaction-status/src/parse_token.rs @@ -4,9 +4,9 @@ use { }, extension::{ confidential_transfer::*, confidential_transfer_fee::*, cpi_guard::*, - default_account_state::*, interest_bearing_mint::*, memo_transfer::*, metadata_pointer::*, - mint_close_authority::*, permanent_delegate::*, reallocate::*, transfer_fee::*, - transfer_hook::*, + default_account_state::*, group_member_pointer::*, group_pointer::*, + interest_bearing_mint::*, memo_transfer::*, metadata_pointer::*, mint_close_authority::*, + permanent_delegate::*, reallocate::*, transfer_fee::*, transfer_hook::*, }, serde_json::{json, Map, Value}, solana_account_decoder::parse_token::{token_amount_to_ui_amount, UiAccountState}, @@ -233,7 +233,9 @@ pub fn parse_token( | AuthorityType::ConfidentialTransferMint | AuthorityType::TransferHookProgramId | AuthorityType::ConfidentialTransferFeeConfig - | AuthorityType::MetadataPointer => "mint", + | AuthorityType::MetadataPointer + | AuthorityType::GroupPointer + | AuthorityType::GroupMemberPointer => "mint", AuthorityType::AccountOwner | AuthorityType::CloseAccount => "account", }; let mut value = json!({ @@ -650,6 +652,30 @@ pub fn parse_token( account_keys, ) } + TokenInstruction::GroupPointerExtension => { + if instruction.data.len() < 2 { + return Err(ParseInstructionError::InstructionNotParsable( + ParsableProgram::SplToken, + )); + } + parse_group_pointer_instruction( + &instruction.data[1..], + &instruction.accounts, + account_keys, + ) + } + TokenInstruction::GroupMemberPointerExtension => { + if instruction.data.len() < 2 { + return Err(ParseInstructionError::InstructionNotParsable( + ParsableProgram::SplToken, + )); + } + parse_group_member_pointer_instruction( + &instruction.data[1..], + &instruction.accounts, + account_keys, + ) + } } } @@ -669,6 +695,8 @@ pub enum UiAuthorityType { TransferHookProgramId, ConfidentialTransferFeeConfig, MetadataPointer, + GroupPointer, + GroupMemberPointer, } impl From for UiAuthorityType { @@ -689,6 +717,8 @@ impl From for UiAuthorityType { UiAuthorityType::ConfidentialTransferFeeConfig } AuthorityType::MetadataPointer => UiAuthorityType::MetadataPointer, + AuthorityType::GroupPointer => UiAuthorityType::GroupPointer, + AuthorityType::GroupMemberPointer => UiAuthorityType::GroupMemberPointer, } } } @@ -716,6 +746,10 @@ pub enum UiExtensionType { ConfidentialTransferFeeAmount, MetadataPointer, TokenMetadata, + GroupPointer, + GroupMemberPointer, + TokenGroup, + TokenGroupMember, } impl From for UiExtensionType { @@ -747,6 +781,10 @@ impl From for UiExtensionType { } ExtensionType::MetadataPointer => UiExtensionType::MetadataPointer, ExtensionType::TokenMetadata => UiExtensionType::TokenMetadata, + ExtensionType::GroupPointer => UiExtensionType::GroupPointer, + ExtensionType::GroupMemberPointer => UiExtensionType::GroupMemberPointer, + ExtensionType::TokenGroup => UiExtensionType::TokenGroup, + ExtensionType::TokenGroupMember => UiExtensionType::TokenGroupMember, } } } diff --git a/transaction-status/src/parse_token/extension/group_member_pointer.rs b/transaction-status/src/parse_token/extension/group_member_pointer.rs new file mode 100644 index 00000000000000..24d0503dc51ef7 --- /dev/null +++ b/transaction-status/src/parse_token/extension/group_member_pointer.rs @@ -0,0 +1,189 @@ +use { + super::*, + spl_token_2022::{ + extension::group_member_pointer::instruction::*, + instruction::{decode_instruction_data, decode_instruction_type}, + }, +}; + +pub(in crate::parse_token) fn parse_group_member_pointer_instruction( + instruction_data: &[u8], + account_indexes: &[u8], + account_keys: &AccountKeys, +) -> Result { + match decode_instruction_type(instruction_data) + .map_err(|_| ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken))? + { + GroupMemberPointerInstruction::Initialize => { + check_num_token_accounts(account_indexes, 1)?; + let InitializeInstructionData { + authority, + member_address, + } = *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + }); + let map = value.as_object_mut().unwrap(); + if let Some(authority) = Option::::from(authority) { + map.insert("authority".to_string(), json!(authority.to_string())); + } + if let Some(member_address) = Option::::from(member_address) { + map.insert( + "memberAddress".to_string(), + json!(member_address.to_string()), + ); + } + Ok(ParsedInstructionEnum { + instruction_type: "initializeGroupMemberPointer".to_string(), + info: value, + }) + } + GroupMemberPointerInstruction::Update => { + check_num_token_accounts(account_indexes, 2)?; + let UpdateInstructionData { member_address } = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + }); + let map = value.as_object_mut().unwrap(); + if let Some(member_address) = Option::::from(member_address) { + map.insert( + "memberAddress".to_string(), + json!(member_address.to_string()), + ); + } + parse_signers( + map, + 1, + account_keys, + account_indexes, + "authority", + "multisigAuthority", + ); + Ok(ParsedInstructionEnum { + instruction_type: "updateGroupMemberPointer".to_string(), + info: value, + }) + } + } +} + +#[cfg(test)] +mod test { + use {super::*, solana_sdk::pubkey::Pubkey, spl_token_2022::solana_program::message::Message}; + + #[test] + fn test_parse_group_member_pointer_instruction() { + let mint_pubkey = Pubkey::new_unique(); + let authority = Pubkey::new_unique(); + let member_address = Pubkey::new_unique(); + + // Initialize variations + let init_ix = initialize( + &spl_token_2022::id(), + &mint_pubkey, + Some(authority), + Some(member_address), + ) + .unwrap(); + let mut message = Message::new(&[init_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert_eq!( + parse_token( + compiled_instruction, + &AccountKeys::new(&message.account_keys, None) + ) + .unwrap(), + ParsedInstructionEnum { + instruction_type: "initializeGroupMemberPointer".to_string(), + info: json!({ + "mint": mint_pubkey.to_string(), + "authority": authority.to_string(), + "memberAddress": member_address.to_string(), + }) + } + ); + + let init_ix = initialize(&spl_token_2022::id(), &mint_pubkey, None, None).unwrap(); + let mut message = Message::new(&[init_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert_eq!( + parse_token( + compiled_instruction, + &AccountKeys::new(&message.account_keys, None) + ) + .unwrap(), + ParsedInstructionEnum { + instruction_type: "initializeGroupMemberPointer".to_string(), + info: json!({ + "mint": mint_pubkey.to_string(), + }) + } + ); + + // Single owner Update + let update_ix = update( + &spl_token_2022::id(), + &mint_pubkey, + &authority, + &[], + Some(member_address), + ) + .unwrap(); + let mut message = Message::new(&[update_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert_eq!( + parse_token( + compiled_instruction, + &AccountKeys::new(&message.account_keys, None) + ) + .unwrap(), + ParsedInstructionEnum { + instruction_type: "updateGroupMemberPointer".to_string(), + info: json!({ + "mint": mint_pubkey.to_string(), + "authority": authority.to_string(), + "memberAddress": member_address.to_string(), + }) + } + ); + + // Multisig Update + let multisig_pubkey = Pubkey::new_unique(); + let multisig_signer0 = Pubkey::new_unique(); + let multisig_signer1 = Pubkey::new_unique(); + let update_ix = update( + &spl_token_2022::id(), + &mint_pubkey, + &multisig_pubkey, + &[&multisig_signer0, &multisig_signer1], + Some(member_address), + ) + .unwrap(); + let mut message = Message::new(&[update_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert_eq!( + parse_token( + compiled_instruction, + &AccountKeys::new(&message.account_keys, None) + ) + .unwrap(), + ParsedInstructionEnum { + instruction_type: "updateGroupMemberPointer".to_string(), + info: json!({ + "mint": mint_pubkey.to_string(), + "memberAddress": member_address.to_string(), + "multisigAuthority": multisig_pubkey.to_string(), + "signers": vec![ + multisig_signer0.to_string(), + multisig_signer1.to_string(), + ], + }) + } + ); + } +} diff --git a/transaction-status/src/parse_token/extension/group_pointer.rs b/transaction-status/src/parse_token/extension/group_pointer.rs new file mode 100644 index 00000000000000..5800a2fd8850f9 --- /dev/null +++ b/transaction-status/src/parse_token/extension/group_pointer.rs @@ -0,0 +1,183 @@ +use { + super::*, + spl_token_2022::{ + extension::group_pointer::instruction::*, + instruction::{decode_instruction_data, decode_instruction_type}, + }, +}; + +pub(in crate::parse_token) fn parse_group_pointer_instruction( + instruction_data: &[u8], + account_indexes: &[u8], + account_keys: &AccountKeys, +) -> Result { + match decode_instruction_type(instruction_data) + .map_err(|_| ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken))? + { + GroupPointerInstruction::Initialize => { + check_num_token_accounts(account_indexes, 1)?; + let InitializeInstructionData { + authority, + group_address, + } = *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + }); + let map = value.as_object_mut().unwrap(); + if let Some(authority) = Option::::from(authority) { + map.insert("authority".to_string(), json!(authority.to_string())); + } + if let Some(group_address) = Option::::from(group_address) { + map.insert("groupAddress".to_string(), json!(group_address.to_string())); + } + Ok(ParsedInstructionEnum { + instruction_type: "initializeGroupPointer".to_string(), + info: value, + }) + } + GroupPointerInstruction::Update => { + check_num_token_accounts(account_indexes, 2)?; + let UpdateInstructionData { group_address } = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + }); + let map = value.as_object_mut().unwrap(); + if let Some(group_address) = Option::::from(group_address) { + map.insert("groupAddress".to_string(), json!(group_address.to_string())); + } + parse_signers( + map, + 1, + account_keys, + account_indexes, + "authority", + "multisigAuthority", + ); + Ok(ParsedInstructionEnum { + instruction_type: "updateGroupPointer".to_string(), + info: value, + }) + } + } +} + +#[cfg(test)] +mod test { + use {super::*, solana_sdk::pubkey::Pubkey, spl_token_2022::solana_program::message::Message}; + + #[test] + fn test_parse_group_pointer_instruction() { + let mint_pubkey = Pubkey::new_unique(); + let authority = Pubkey::new_unique(); + let group_address = Pubkey::new_unique(); + + // Initialize variations + let init_ix = initialize( + &spl_token_2022::id(), + &mint_pubkey, + Some(authority), + Some(group_address), + ) + .unwrap(); + let mut message = Message::new(&[init_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert_eq!( + parse_token( + compiled_instruction, + &AccountKeys::new(&message.account_keys, None) + ) + .unwrap(), + ParsedInstructionEnum { + instruction_type: "initializeGroupPointer".to_string(), + info: json!({ + "mint": mint_pubkey.to_string(), + "authority": authority.to_string(), + "groupAddress": group_address.to_string(), + }) + } + ); + + let init_ix = initialize(&spl_token_2022::id(), &mint_pubkey, None, None).unwrap(); + let mut message = Message::new(&[init_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert_eq!( + parse_token( + compiled_instruction, + &AccountKeys::new(&message.account_keys, None) + ) + .unwrap(), + ParsedInstructionEnum { + instruction_type: "initializeGroupPointer".to_string(), + info: json!({ + "mint": mint_pubkey.to_string(), + }) + } + ); + + // Single owner Update + let update_ix = update( + &spl_token_2022::id(), + &mint_pubkey, + &authority, + &[], + Some(group_address), + ) + .unwrap(); + let mut message = Message::new(&[update_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert_eq!( + parse_token( + compiled_instruction, + &AccountKeys::new(&message.account_keys, None) + ) + .unwrap(), + ParsedInstructionEnum { + instruction_type: "updateGroupPointer".to_string(), + info: json!({ + "mint": mint_pubkey.to_string(), + "authority": authority.to_string(), + "groupAddress": group_address.to_string(), + }) + } + ); + + // Multisig Update + let multisig_pubkey = Pubkey::new_unique(); + let multisig_signer0 = Pubkey::new_unique(); + let multisig_signer1 = Pubkey::new_unique(); + let update_ix = update( + &spl_token_2022::id(), + &mint_pubkey, + &multisig_pubkey, + &[&multisig_signer0, &multisig_signer1], + Some(group_address), + ) + .unwrap(); + let mut message = Message::new(&[update_ix], None); + let compiled_instruction = &mut message.instructions[0]; + assert_eq!( + parse_token( + compiled_instruction, + &AccountKeys::new(&message.account_keys, None) + ) + .unwrap(), + ParsedInstructionEnum { + instruction_type: "updateGroupPointer".to_string(), + info: json!({ + "mint": mint_pubkey.to_string(), + "groupAddress": group_address.to_string(), + "multisigAuthority": multisig_pubkey.to_string(), + "signers": vec![ + multisig_signer0.to_string(), + multisig_signer1.to_string(), + ], + }) + } + ); + } +} diff --git a/transaction-status/src/parse_token/extension/mod.rs b/transaction-status/src/parse_token/extension/mod.rs index 8e65ddfcfc691f..19dd4f14b2fd04 100644 --- a/transaction-status/src/parse_token/extension/mod.rs +++ b/transaction-status/src/parse_token/extension/mod.rs @@ -4,6 +4,8 @@ pub(super) mod confidential_transfer; pub(super) mod confidential_transfer_fee; pub(super) mod cpi_guard; pub(super) mod default_account_state; +pub(super) mod group_member_pointer; +pub(super) mod group_pointer; pub(super) mod interest_bearing_mint; pub(super) mod memo_transfer; pub(super) mod metadata_pointer; diff --git a/turbine/Cargo.toml b/turbine/Cargo.toml index 8562ab6525a069..e205c10bf6608f 100644 --- a/turbine/Cargo.toml +++ b/turbine/Cargo.toml @@ -43,6 +43,7 @@ tokio = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } solana-logger = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } [[bench]] name = "cluster_info" diff --git a/turbine/src/broadcast_stage/standard_broadcast_run.rs b/turbine/src/broadcast_stage/standard_broadcast_run.rs index f574dca0d4bba4..82bd7f940c508d 100644 --- a/turbine/src/broadcast_stage/standard_broadcast_run.rs +++ b/turbine/src/broadcast_stage/standard_broadcast_run.rs @@ -580,7 +580,7 @@ mod test { // Slot 2 interrupted slot 1 let shreds = run.finish_prev_slot(&keypair, 0, &mut ProcessShredsStats::default()); let shred = shreds - .get(0) + .first() .expect("Expected a shred that signals an interrupt"); // Validate the shred diff --git a/turbine/src/cluster_nodes.rs b/turbine/src/cluster_nodes.rs index 57676a34b75eff..8079178cf415b9 100644 --- a/turbine/src/cluster_nodes.rs +++ b/turbine/src/cluster_nodes.rs @@ -311,7 +311,7 @@ fn get_nodes(cluster_info: &ClusterInfo, stakes: &HashMap) -> Vec( fanout: usize, - index: usize, // Local node's index withing the nodes slice. + index: usize, // Local node's index within the nodes slice. nodes: &[T], ) -> impl Iterator + '_ { // Node's index within its neighborhood. @@ -513,7 +513,7 @@ fn enable_turbine_fanout_experiments(shred_slot: Slot, root_bank: &Bank) -> bool // Returns true if the feature is effective for the shred slot. #[must_use] -fn check_feature_activation(feature: &Pubkey, shred_slot: Slot, root_bank: &Bank) -> bool { +pub fn check_feature_activation(feature: &Pubkey, shred_slot: Slot, root_bank: &Bank) -> bool { match root_bank.feature_set.activated_slot(feature) { None => false, Some(feature_slot) => { diff --git a/turbine/src/quic_endpoint.rs b/turbine/src/quic_endpoint.rs index e8a316420b42d8..a947f212296fb7 100644 --- a/turbine/src/quic_endpoint.rs +++ b/turbine/src/quic_endpoint.rs @@ -435,10 +435,21 @@ async fn send_datagram_task( connection: Connection, mut receiver: AsyncReceiver, ) -> Result<(), Error> { - while let Some(bytes) = receiver.recv().await { - connection.send_datagram(bytes)?; + tokio::pin! { + let connection_closed = connection.closed(); + } + loop { + tokio::select! { + biased; + bytes = receiver.recv() => { + match bytes { + None => return Ok(()), + Some(bytes) => connection.send_datagram(bytes)?, + } + } + err = &mut connection_closed => return Err(Error::from(err)), + } } - Ok(()) } async fn make_connection_task( diff --git a/udp-client/src/lib.rs b/udp-client/src/lib.rs index 06eeca00185898..bb0c897a3dbc7a 100644 --- a/udp-client/src/lib.rs +++ b/udp-client/src/lib.rs @@ -15,6 +15,7 @@ use { }, connection_cache_stats::ConnectionCacheStats, }, + solana_sdk::signature::Keypair, std::{ net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, sync::Arc, @@ -112,4 +113,8 @@ impl ConnectionManager for UdpConnectionManager { fn new_connection_config(&self) -> Self::NewConnectionConfig { UdpConfig::new().unwrap() } + + fn update_key(&self, _key: &Keypair) -> Result<(), Box> { + Ok(()) + } } diff --git a/unified-scheduler-logic/Cargo.toml b/unified-scheduler-logic/Cargo.toml new file mode 100644 index 00000000000000..764bb0192f5632 --- /dev/null +++ b/unified-scheduler-logic/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "solana-unified-scheduler-logic" +description = "The Solana unified scheduler logic" +documentation = "https://docs.rs/solana-unified-scheduler-logic" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } diff --git a/unified-scheduler-logic/src/lib.rs b/unified-scheduler-logic/src/lib.rs new file mode 100644 index 00000000000000..73a5a82f6d3a7b --- /dev/null +++ b/unified-scheduler-logic/src/lib.rs @@ -0,0 +1 @@ +// This file will be populated with actual implementation later. diff --git a/unified-scheduler-pool/Cargo.toml b/unified-scheduler-pool/Cargo.toml new file mode 100644 index 00000000000000..213bc5bb86c0ef --- /dev/null +++ b/unified-scheduler-pool/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "solana-unified-scheduler-pool" +description = "The Solana unified scheduler pool" +documentation = "https://docs.rs/solana-unified-scheduler-pool" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +solana-ledger = { workspace = true } +solana-program-runtime = { workspace = true } +solana-runtime = { workspace = true } +solana-sdk = { workspace = true } +solana-unified-scheduler-logic = { workspace = true } +solana-vote = { workspace = true } + +[dev-dependencies] +assert_matches = { workspace = true } +solana-logger = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } diff --git a/unified-scheduler-pool/src/lib.rs b/unified-scheduler-pool/src/lib.rs new file mode 100644 index 00000000000000..10cb5309e5e01d --- /dev/null +++ b/unified-scheduler-pool/src/lib.rs @@ -0,0 +1,761 @@ +//! Transaction scheduling code. +//! +//! This crate implements 3 solana-runtime traits (`InstalledScheduler`, `UninstalledScheduler` and +//! `InstalledSchedulerPool`) to provide a concrete transaction scheduling implementation +//! (including executing txes and committing tx results). +//! +//! At the highest level, this crate takes `SanitizedTransaction`s via its `schedule_execution()` +//! and commits any side-effects (i.e. on-chain state changes) into the associated `Bank` via +//! `solana-ledger`'s helper function called `execute_batch()`. + +use { + solana_ledger::blockstore_processor::{ + execute_batch, TransactionBatchWithIndexes, TransactionStatusSender, + }, + solana_program_runtime::timings::ExecuteTimings, + solana_runtime::{ + bank::Bank, + installed_scheduler_pool::{ + InstalledScheduler, InstalledSchedulerBox, InstalledSchedulerPool, + InstalledSchedulerPoolArc, ResultWithTimings, SchedulerId, SchedulingContext, + UninstalledScheduler, UninstalledSchedulerBox, + }, + prioritization_fee_cache::PrioritizationFeeCache, + }, + solana_sdk::transaction::{Result, SanitizedTransaction}, + solana_vote::vote_sender_types::ReplayVoteSender, + std::{ + fmt::Debug, + marker::PhantomData, + sync::{ + atomic::{AtomicU64, Ordering::Relaxed}, + Arc, Mutex, Weak, + }, + }, +}; + +type AtomicSchedulerId = AtomicU64; + +// SchedulerPool must be accessed as a dyn trait from solana-runtime, because SchedulerPool +// contains some internal fields, whose types aren't available in solana-runtime (currently +// TransactionStatusSender; also, PohRecorder in the future)... +#[derive(Debug)] +pub struct SchedulerPool, TH: TaskHandler> { + scheduler_inners: Mutex>, + handler_context: HandlerContext, + // weak_self could be elided by changing InstalledScheduler::take_scheduler()'s receiver to + // Arc from &Self, because SchedulerPool is used as in the form of Arc + // almost always. But, this would cause wasted and noisy Arc::clone()'s at every call sites. + // + // Alternatively, `impl InstalledScheduler for Arc` approach could be explored + // but it entails its own problems due to rustc's coherence and necessitated newtype with the + // type graph of InstalledScheduler being quite elaborate. + // + // After these considerations, this weak_self approach is chosen at the cost of some additional + // memory increase. + weak_self: Weak, + next_scheduler_id: AtomicSchedulerId, + _phantom: PhantomData, +} + +#[derive(Debug)] +pub struct HandlerContext { + log_messages_bytes_limit: Option, + transaction_status_sender: Option, + replay_vote_sender: Option, + prioritization_fee_cache: Arc, +} + +pub type DefaultSchedulerPool = + SchedulerPool, DefaultTaskHandler>; + +impl SchedulerPool +where + S: SpawnableScheduler, + TH: TaskHandler, +{ + // Some internal impl and test code want an actual concrete type, NOT the + // `dyn InstalledSchedulerPool`. So don't merge this into `Self::new_dyn()`. + fn new( + log_messages_bytes_limit: Option, + transaction_status_sender: Option, + replay_vote_sender: Option, + prioritization_fee_cache: Arc, + ) -> Arc { + Arc::new_cyclic(|weak_self| Self { + scheduler_inners: Mutex::default(), + handler_context: HandlerContext { + log_messages_bytes_limit, + transaction_status_sender, + replay_vote_sender, + prioritization_fee_cache, + }, + weak_self: weak_self.clone(), + next_scheduler_id: AtomicSchedulerId::default(), + _phantom: PhantomData, + }) + } + + // This apparently-meaningless wrapper is handy, because some callers explicitly want + // `dyn InstalledSchedulerPool` to be returned for type inference convenience. + pub fn new_dyn( + log_messages_bytes_limit: Option, + transaction_status_sender: Option, + replay_vote_sender: Option, + prioritization_fee_cache: Arc, + ) -> InstalledSchedulerPoolArc { + Self::new( + log_messages_bytes_limit, + transaction_status_sender, + replay_vote_sender, + prioritization_fee_cache, + ) + } + + // See a comment at the weak_self field for justification of this method's existence. + fn self_arc(&self) -> Arc { + self.weak_self + .upgrade() + .expect("self-referencing Arc-ed pool") + } + + fn new_scheduler_id(&self) -> SchedulerId { + self.next_scheduler_id.fetch_add(1, Relaxed) + } + + fn return_scheduler(&self, scheduler: S::Inner) { + self.scheduler_inners + .lock() + .expect("not poisoned") + .push(scheduler); + } + + fn do_take_scheduler(&self, context: SchedulingContext) -> S { + // pop is intentional for filo, expecting relatively warmed-up scheduler due to having been + // returned recently + if let Some(inner) = self.scheduler_inners.lock().expect("not poisoned").pop() { + S::from_inner(inner, context) + } else { + S::spawn(self.self_arc(), context) + } + } +} + +impl InstalledSchedulerPool for SchedulerPool +where + S: SpawnableScheduler, + TH: TaskHandler, +{ + fn take_scheduler(&self, context: SchedulingContext) -> InstalledSchedulerBox { + Box::new(self.do_take_scheduler(context)) + } +} + +pub trait TaskHandler: Send + Sync + Debug + Sized + 'static { + fn handle( + result: &mut Result<()>, + timings: &mut ExecuteTimings, + bank: &Arc, + transaction: &SanitizedTransaction, + index: usize, + handler_context: &HandlerContext, + ); +} + +#[derive(Debug)] +pub struct DefaultTaskHandler; + +impl TaskHandler for DefaultTaskHandler { + fn handle( + result: &mut Result<()>, + timings: &mut ExecuteTimings, + bank: &Arc, + transaction: &SanitizedTransaction, + index: usize, + handler_context: &HandlerContext, + ) { + // scheduler must properly prevent conflicting tx executions. thus, task handler isn't + // responsible for locking. + let batch = bank.prepare_unlocked_batch_from_single_tx(transaction); + let batch_with_indexes = TransactionBatchWithIndexes { + batch, + transaction_indexes: vec![index], + }; + + *result = execute_batch( + &batch_with_indexes, + bank, + handler_context.transaction_status_sender.as_ref(), + handler_context.replay_vote_sender.as_ref(), + timings, + handler_context.log_messages_bytes_limit, + &handler_context.prioritization_fee_cache, + ); + } +} + +// Currently, simplest possible implementation (i.e. single-threaded) +// this will be replaced with more proper implementation... +// not usable at all, especially for mainnet-beta +#[derive(Debug)] +pub struct PooledScheduler { + inner: PooledSchedulerInner, + context: SchedulingContext, + result_with_timings: Mutex, +} + +#[derive(Debug)] +pub struct PooledSchedulerInner, TH: TaskHandler> { + id: SchedulerId, + pool: Arc>, +} + +impl PooledScheduler { + fn do_spawn(pool: Arc>, initial_context: SchedulingContext) -> Self { + Self::from_inner( + PooledSchedulerInner:: { + id: pool.new_scheduler_id(), + pool, + }, + initial_context, + ) + } +} + +pub trait SpawnableScheduler: InstalledScheduler { + type Inner: Debug + Send + Sync; + + fn into_inner(self) -> (ResultWithTimings, Self::Inner); + + fn from_inner(inner: Self::Inner, context: SchedulingContext) -> Self; + + fn spawn(pool: Arc>, initial_context: SchedulingContext) -> Self + where + Self: Sized; +} + +impl SpawnableScheduler for PooledScheduler { + type Inner = PooledSchedulerInner; + + fn into_inner(self) -> (ResultWithTimings, Self::Inner) { + ( + self.result_with_timings.into_inner().expect("not poisoned"), + self.inner, + ) + } + + fn from_inner(inner: Self::Inner, context: SchedulingContext) -> Self { + Self { + inner, + context, + result_with_timings: Mutex::new((Ok(()), ExecuteTimings::default())), + } + } + + fn spawn(pool: Arc>, initial_context: SchedulingContext) -> Self { + Self::do_spawn(pool, initial_context) + } +} + +impl InstalledScheduler for PooledScheduler { + fn id(&self) -> SchedulerId { + self.inner.id + } + + fn context(&self) -> &SchedulingContext { + &self.context + } + + fn schedule_execution(&self, &(transaction, index): &(&SanitizedTransaction, usize)) { + let (result, timings) = &mut *self.result_with_timings.lock().expect("not poisoned"); + if result.is_err() { + // just bail out early to short-circuit the processing altogether + return; + } + + // ... so, we're NOT scheduling at all here; rather, just execute tx straight off. the + // inter-tx locking deps aren't needed to be resolved in the case of single-threaded FIFO + // like this. + TH::handle( + result, + timings, + self.context().bank(), + transaction, + index, + &self.inner.pool.handler_context, + ); + } + + fn wait_for_termination( + self: Box, + _is_dropped: bool, + ) -> (ResultWithTimings, UninstalledSchedulerBox) { + let (result_with_timings, uninstalled_scheduler) = self.into_inner(); + (result_with_timings, Box::new(uninstalled_scheduler)) + } + + fn pause_for_recent_blockhash(&mut self) { + // not surprisingly, there's nothing to do for this min impl! + } +} + +impl UninstalledScheduler for PooledSchedulerInner +where + S: SpawnableScheduler>, + TH: TaskHandler, +{ + fn return_to_pool(self: Box) { + self.pool.clone().return_scheduler(*self) + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + assert_matches::assert_matches, + solana_runtime::{ + bank::Bank, + bank_forks::BankForks, + genesis_utils::{create_genesis_config, GenesisConfigInfo}, + installed_scheduler_pool::{BankWithScheduler, SchedulingContext}, + prioritization_fee_cache::PrioritizationFeeCache, + }, + solana_sdk::{ + clock::MAX_PROCESSING_AGE, + pubkey::Pubkey, + signer::keypair::Keypair, + system_transaction, + transaction::{SanitizedTransaction, TransactionError}, + }, + std::{sync::Arc, thread::JoinHandle}, + }; + + #[test] + fn test_scheduler_pool_new() { + solana_logger::setup(); + + let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let pool = + DefaultSchedulerPool::new_dyn(None, None, None, ignored_prioritization_fee_cache); + + // this indirectly proves that there should be circular link because there's only one Arc + // at this moment now + assert_eq!((Arc::strong_count(&pool), Arc::weak_count(&pool)), (1, 1)); + let debug = format!("{pool:#?}"); + assert!(!debug.is_empty()); + } + + #[test] + fn test_scheduler_spawn() { + solana_logger::setup(); + + let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let pool = + DefaultSchedulerPool::new_dyn(None, None, None, ignored_prioritization_fee_cache); + let bank = Arc::new(Bank::default_for_tests()); + let context = SchedulingContext::new(bank); + let scheduler = pool.take_scheduler(context); + + let debug = format!("{scheduler:#?}"); + assert!(!debug.is_empty()); + } + + #[test] + fn test_scheduler_pool_filo() { + solana_logger::setup(); + + let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let pool = DefaultSchedulerPool::new(None, None, None, ignored_prioritization_fee_cache); + let bank = Arc::new(Bank::default_for_tests()); + let context = &SchedulingContext::new(bank); + + let scheduler1 = pool.do_take_scheduler(context.clone()); + let scheduler_id1 = scheduler1.id(); + let scheduler2 = pool.do_take_scheduler(context.clone()); + let scheduler_id2 = scheduler2.id(); + assert_ne!(scheduler_id1, scheduler_id2); + + let (result_with_timings, scheduler1) = scheduler1.into_inner(); + assert_matches!(result_with_timings, (Ok(()), _)); + pool.return_scheduler(scheduler1); + let (result_with_timings, scheduler2) = scheduler2.into_inner(); + assert_matches!(result_with_timings, (Ok(()), _)); + pool.return_scheduler(scheduler2); + + let scheduler3 = pool.do_take_scheduler(context.clone()); + assert_eq!(scheduler_id2, scheduler3.id()); + let scheduler4 = pool.do_take_scheduler(context.clone()); + assert_eq!(scheduler_id1, scheduler4.id()); + } + + #[test] + fn test_scheduler_pool_context_drop_unless_reinitialized() { + solana_logger::setup(); + + let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let pool = DefaultSchedulerPool::new(None, None, None, ignored_prioritization_fee_cache); + let bank = Arc::new(Bank::default_for_tests()); + let context = &SchedulingContext::new(bank); + let mut scheduler = pool.do_take_scheduler(context.clone()); + + // should never panic. + scheduler.pause_for_recent_blockhash(); + assert_matches!( + Box::new(scheduler).wait_for_termination(false), + ((Ok(()), _), _) + ); + } + + #[test] + fn test_scheduler_pool_context_replace() { + solana_logger::setup(); + + let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let pool = DefaultSchedulerPool::new(None, None, None, ignored_prioritization_fee_cache); + let old_bank = &Arc::new(Bank::default_for_tests()); + let new_bank = &Arc::new(Bank::default_for_tests()); + assert!(!Arc::ptr_eq(old_bank, new_bank)); + + let old_context = &SchedulingContext::new(old_bank.clone()); + let new_context = &SchedulingContext::new(new_bank.clone()); + + let scheduler = pool.do_take_scheduler(old_context.clone()); + let scheduler_id = scheduler.id(); + pool.return_scheduler(scheduler.into_inner().1); + + let scheduler = pool.take_scheduler(new_context.clone()); + assert_eq!(scheduler_id, scheduler.id()); + assert!(Arc::ptr_eq(scheduler.context().bank(), new_bank)); + } + + #[test] + fn test_scheduler_pool_install_into_bank_forks() { + solana_logger::setup(); + + let bank = Bank::default_for_tests(); + let bank_forks = BankForks::new_rw_arc(bank); + let mut bank_forks = bank_forks.write().unwrap(); + let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let pool = + DefaultSchedulerPool::new_dyn(None, None, None, ignored_prioritization_fee_cache); + bank_forks.install_scheduler_pool(pool); + } + + #[test] + fn test_scheduler_install_into_bank() { + solana_logger::setup(); + + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let child_bank = Bank::new_from_parent(bank, &Pubkey::default(), 1); + + let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let pool = + DefaultSchedulerPool::new_dyn(None, None, None, ignored_prioritization_fee_cache); + + let bank = Bank::default_for_tests(); + let bank_forks = BankForks::new_rw_arc(bank); + let mut bank_forks = bank_forks.write().unwrap(); + + // existing banks in bank_forks shouldn't process transactions anymore in general, so + // shouldn't be touched + assert!(!bank_forks + .working_bank_with_scheduler() + .has_installed_scheduler()); + bank_forks.install_scheduler_pool(pool); + assert!(!bank_forks + .working_bank_with_scheduler() + .has_installed_scheduler()); + + let mut child_bank = bank_forks.insert(child_bank); + assert!(child_bank.has_installed_scheduler()); + bank_forks.remove(child_bank.slot()); + child_bank.drop_scheduler(); + assert!(!child_bank.has_installed_scheduler()); + } + + fn setup_dummy_fork_graph(bank: Bank) -> Arc { + let slot = bank.slot(); + let bank_fork = BankForks::new_rw_arc(bank); + let bank = bank_fork.read().unwrap().get(slot).unwrap(); + bank.loaded_programs_cache + .write() + .unwrap() + .set_fork_graph(bank_fork); + bank + } + + #[test] + fn test_scheduler_schedule_execution_success() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10_000); + let tx0 = &SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + &mint_keypair, + &solana_sdk::pubkey::new_rand(), + 2, + genesis_config.hash(), + )); + let bank = Bank::new_for_tests(&genesis_config); + let bank = setup_dummy_fork_graph(bank); + let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let pool = + DefaultSchedulerPool::new_dyn(None, None, None, ignored_prioritization_fee_cache); + let context = SchedulingContext::new(bank.clone()); + + assert_eq!(bank.transaction_count(), 0); + let scheduler = pool.take_scheduler(context); + scheduler.schedule_execution(&(tx0, 0)); + let bank = BankWithScheduler::new(bank, Some(scheduler)); + assert_matches!(bank.wait_for_completed_scheduler(), Some((Ok(()), _))); + assert_eq!(bank.transaction_count(), 1); + } + + #[test] + fn test_scheduler_schedule_execution_failure() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10_000); + let bank = Bank::new_for_tests(&genesis_config); + let bank = setup_dummy_fork_graph(bank); + + let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let pool = + DefaultSchedulerPool::new_dyn(None, None, None, ignored_prioritization_fee_cache); + let context = SchedulingContext::new(bank.clone()); + let mut scheduler = pool.take_scheduler(context); + + let unfunded_keypair = Keypair::new(); + let bad_tx = + &SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + &unfunded_keypair, + &solana_sdk::pubkey::new_rand(), + 2, + genesis_config.hash(), + )); + assert_eq!(bank.transaction_count(), 0); + scheduler.schedule_execution(&(bad_tx, 0)); + scheduler.pause_for_recent_blockhash(); + assert_eq!(bank.transaction_count(), 0); + + let good_tx_after_bad_tx = + &SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + &mint_keypair, + &solana_sdk::pubkey::new_rand(), + 3, + genesis_config.hash(), + )); + // make sure this tx is really a good one to execute. + assert_matches!( + bank.simulate_transaction_unchecked(good_tx_after_bad_tx, false) + .result, + Ok(_) + ); + scheduler.schedule_execution(&(good_tx_after_bad_tx, 0)); + scheduler.pause_for_recent_blockhash(); + // transaction_count should remain same as scheduler should be bailing out. + assert_eq!(bank.transaction_count(), 0); + + let bank = BankWithScheduler::new(bank, Some(scheduler)); + assert_matches!( + bank.wait_for_completed_scheduler(), + Some(( + Err(solana_sdk::transaction::TransactionError::AccountNotFound), + _timings + )) + ); + } + + #[derive(Debug)] + struct AsyncScheduler( + PooledScheduler, + Mutex>>, + ); + + impl AsyncScheduler { + fn do_wait(&self) { + let mut overall_result = Ok(()); + let mut overall_timings = ExecuteTimings::default(); + for handle in self.1.lock().unwrap().drain(..) { + let (result, timings) = handle.join().unwrap(); + match result { + Ok(()) => {} + Err(e) => overall_result = Err(e), + } + overall_timings.accumulate(&timings); + } + *self.0.result_with_timings.lock().unwrap() = (overall_result, overall_timings); + } + } + + impl InstalledScheduler + for AsyncScheduler + { + fn id(&self) -> SchedulerId { + self.0.id() + } + + fn context(&self) -> &SchedulingContext { + self.0.context() + } + + fn schedule_execution(&self, &(transaction, index): &(&SanitizedTransaction, usize)) { + let transaction_and_index = (transaction.clone(), index); + let context = self.context().clone(); + let pool = self.0.inner.pool.clone(); + + self.1.lock().unwrap().push(std::thread::spawn(move || { + // intentionally sleep to simulate race condition where register_recent_blockhash + // is handle before finishing executing scheduled transactions + std::thread::sleep(std::time::Duration::from_secs(1)); + + let mut result = Ok(()); + let mut timings = ExecuteTimings::default(); + + ::handle( + &mut result, + &mut timings, + context.bank(), + &transaction_and_index.0, + transaction_and_index.1, + &pool.handler_context, + ); + (result, timings) + })); + } + + fn wait_for_termination( + self: Box, + is_dropped: bool, + ) -> (ResultWithTimings, UninstalledSchedulerBox) { + self.do_wait(); + Box::new(self.0).wait_for_termination(is_dropped) + } + + fn pause_for_recent_blockhash(&mut self) { + if TRIGGER_RACE_CONDITION { + // this is equivalent to NOT calling wait_for_paused_scheduler() in + // register_recent_blockhash(). + return; + } + self.do_wait(); + } + } + + impl SpawnableScheduler + for AsyncScheduler + { + // well, i wish i can use ! (never type)..... + type Inner = Self; + + fn into_inner(self) -> (ResultWithTimings, Self::Inner) { + todo!(); + } + + fn from_inner(_inner: Self::Inner, _context: SchedulingContext) -> Self { + todo!(); + } + + fn spawn( + pool: Arc>, + initial_context: SchedulingContext, + ) -> Self { + AsyncScheduler::( + PooledScheduler::::from_inner( + PooledSchedulerInner { + id: pool.new_scheduler_id(), + pool: SchedulerPool::new( + pool.handler_context.log_messages_bytes_limit, + pool.handler_context.transaction_status_sender.clone(), + pool.handler_context.replay_vote_sender.clone(), + pool.handler_context.prioritization_fee_cache.clone(), + ), + }, + initial_context, + ), + Mutex::new(vec![]), + ) + } + } + + fn do_test_scheduler_schedule_execution_recent_blockhash_edge_case< + const TRIGGER_RACE_CONDITION: bool, + >() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10_000); + let very_old_valid_tx = + SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + &mint_keypair, + &solana_sdk::pubkey::new_rand(), + 2, + genesis_config.hash(), + )); + let mut bank = Bank::new_for_tests(&genesis_config); + for _ in 0..MAX_PROCESSING_AGE { + bank.fill_bank_with_ticks_for_tests(); + bank.freeze(); + let slot = bank.slot(); + bank = Bank::new_from_parent( + Arc::new(bank), + &Pubkey::default(), + slot.checked_add(1).unwrap(), + ); + } + let bank = setup_dummy_fork_graph(bank); + let context = SchedulingContext::new(bank.clone()); + + let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let pool = + SchedulerPool::, DefaultTaskHandler>::new_dyn( + None, + None, + None, + ignored_prioritization_fee_cache, + ); + let scheduler = pool.take_scheduler(context); + + let bank = BankWithScheduler::new(bank, Some(scheduler)); + assert_eq!(bank.transaction_count(), 0); + + // schedule but not immediately execute transaction + bank.schedule_transaction_executions([(&very_old_valid_tx, &0)].into_iter()); + // this calls register_recent_blockhash internally + bank.fill_bank_with_ticks_for_tests(); + + if TRIGGER_RACE_CONDITION { + // very_old_valid_tx is wrongly handled as expired! + assert_matches!( + bank.wait_for_completed_scheduler(), + Some((Err(TransactionError::BlockhashNotFound), _)) + ); + assert_eq!(bank.transaction_count(), 0); + } else { + assert_matches!(bank.wait_for_completed_scheduler(), Some((Ok(()), _))); + assert_eq!(bank.transaction_count(), 1); + } + } + + #[test] + fn test_scheduler_schedule_execution_recent_blockhash_edge_case_with_race() { + do_test_scheduler_schedule_execution_recent_blockhash_edge_case::(); + } + + #[test] + fn test_scheduler_schedule_execution_recent_blockhash_edge_case_without_race() { + do_test_scheduler_schedule_execution_recent_blockhash_edge_case::(); + } +} diff --git a/validator/Cargo.toml b/validator/Cargo.toml index 845bdda7eeab6b..6c7f691c27b5fa 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -67,6 +67,7 @@ thiserror = { workspace = true } [dev-dependencies] solana-account-decoder = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } spl-token-2022 = { workspace = true, features = ["no-entrypoint"] } [target.'cfg(not(target_env = "msvc"))'.dependencies] diff --git a/validator/src/admin_rpc_service.rs b/validator/src/admin_rpc_service.rs index 69584822097c45..67f2309a9c98bc 100644 --- a/validator/src/admin_rpc_service.rs +++ b/validator/src/admin_rpc_service.rs @@ -682,6 +682,12 @@ impl AdminRpcImpl { })?; } + for n in post_init.notifies.iter() { + if let Err(err) = n.update_key(&identity_keypair) { + error!("Error updating network layer keypair: {err}"); + } + } + solana_metrics::set_host_id(identity_keypair.pubkey().to_string()); post_init .cluster_info @@ -888,6 +894,7 @@ mod tests { bank_forks: bank_forks.clone(), vote_account, repair_whitelist, + notifies: Vec::new(), }))), staked_nodes_overrides: Arc::new(RwLock::new(HashMap::new())), rpc_to_plugin_manager_sender: None, diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 9aa1c466f8e336..b77993fe54954c 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1246,12 +1246,6 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .help("How much memory the accounts index can consume. If this is exceeded, some account index entries will be stored on disk."), ) - .arg( - Arg::with_name("disable_accounts_disk_index") - .long("disable-accounts-disk-index") - .help("Disable the disk-based accounts index if it is enabled by default.") - .conflicts_with("accounts_index_memory_limit_mb") - ) .arg( Arg::with_name("accounts_index_bins") .long("accounts-index-bins") @@ -1285,21 +1279,6 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { May be specified multiple times. \ [default: [ledger]/accounts_index]"), ) - .arg(Arg::with_name("accounts_filler_count") - .long("accounts-filler-count") - .value_name("COUNT") - .validator(is_parsable::) - .takes_value(true) - .default_value(&default_args.accounts_filler_count) - .help("How many accounts to add to stress the system. Accounts are ignored in operations related to correctness.")) - .arg(Arg::with_name("accounts_filler_size") - .long("accounts-filler-size") - .value_name("BYTES") - .validator(is_parsable::) - .takes_value(true) - .default_value(&default_args.accounts_filler_size) - .requires("accounts_filler_count") - .help("Size per filler account in bytes.")) .arg( Arg::with_name("accounts_db_test_hash_calculation") .long("accounts-db-test-hash-calculation") @@ -1387,7 +1366,6 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .arg( Arg::with_name("block_production_method") .long("block-production-method") - .hidden(hidden_unless_forced()) .value_name("METHOD") .takes_value(true) .possible_values(BlockProductionMethod::cli_names()) @@ -1793,6 +1771,10 @@ fn deprecated_arguments() -> Vec { Ok(()) } })); + add_arg!(Arg::with_name("disable_accounts_disk_index") + .long("disable-accounts-disk-index") + .help("Disable the disk-based accounts index if it is enabled by default.") + .conflicts_with("accounts_index_memory_limit_mb")); add_arg!( Arg::with_name("disable_quic_servers") .long("disable-quic-servers") @@ -1957,8 +1939,6 @@ pub struct DefaultArgs { pub contact_debug_interval: String, - pub accounts_filler_count: String, - pub accounts_filler_size: String, pub accountsdb_repl_threads: String, pub snapshot_version: SnapshotVersion, @@ -2032,8 +2012,6 @@ impl DefaultArgs { .to_string(), rpc_pubsub_worker_threads: "4".to_string(), accountsdb_repl_threads: num_cpus::get().to_string(), - accounts_filler_count: "0".to_string(), - accounts_filler_size: "0".to_string(), maximum_full_snapshot_archives_to_retain: DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN .to_string(), maximum_incremental_snapshot_archives_to_retain: diff --git a/validator/src/dashboard.rs b/validator/src/dashboard.rs index f6df5693c0260d..365f02065ebc96 100644 --- a/validator/src/dashboard.rs +++ b/validator/src/dashboard.rs @@ -220,13 +220,13 @@ async fn wait_for_validator_startup( if start_progress == ValidatorStartProgress::Running { let admin_client = admin_client.take().unwrap(); - match async move { + let validator_info = async move { let rpc_addr = admin_client.rpc_addr().await?; let start_time = admin_client.start_time().await?; Ok::<_, jsonrpc_core_client::RpcError>((rpc_addr, start_time)) } - .await - { + .await; + match validator_info { Ok((None, _)) => progress_bar.set_message("RPC service not available"), Ok((Some(rpc_addr), start_time)) => return Some((rpc_addr, start_time)), Err(err) => { diff --git a/validator/src/lib.rs b/validator/src/lib.rs index 4e7ed43ec78309..e1b9df96b9b03e 100644 --- a/validator/src/lib.rs +++ b/validator/src/lib.rs @@ -24,12 +24,7 @@ pub mod dashboard; #[cfg(unix)] fn redirect_stderr(filename: &str) { use std::os::unix::io::AsRawFd; - match OpenOptions::new() - .write(true) - .create(true) - .append(true) - .open(filename) - { + match OpenOptions::new().create(true).append(true).open(filename) { Ok(file) => unsafe { libc::dup2(file.as_raw_fd(), libc::STDERR_FILENO); }, diff --git a/validator/src/main.rs b/validator/src/main.rs index bb8fa537b8ecdb..a3c8d170dab784 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -8,10 +8,7 @@ use { log::*, rand::{seq::SliceRandom, thread_rng}, solana_accounts_db::{ - accounts_db::{ - AccountShrinkThreshold, AccountsDb, AccountsDbConfig, CreateAncientStorage, - FillerAccountsConfig, - }, + accounts_db::{AccountShrinkThreshold, AccountsDb, AccountsDbConfig, CreateAncientStorage}, accounts_index::{ AccountIndex, AccountSecondaryIndexes, AccountSecondaryIndexesIncludeExclude, AccountsIndexConfig, IndexLimitMb, @@ -244,7 +241,7 @@ fn wait_for_restart_window( Err("Current epoch is almost complete".to_string()) } else { while leader_schedule - .get(0) + .front() .map(|slot| *slot < epoch_info.absolute_slot) .unwrap_or(false) { @@ -258,7 +255,7 @@ fn wait_for_restart_window( upcoming_idle_windows.pop(); } - match leader_schedule.get(0) { + match leader_schedule.front() { None => { Ok(()) // Validator has no leader slots } @@ -1186,16 +1183,10 @@ pub fn main() { .ok() .map(|mb| mb * MB); - let filler_accounts_config = FillerAccountsConfig { - count: value_t_or_exit!(matches, "accounts_filler_count", usize), - size: value_t_or_exit!(matches, "accounts_filler_size", usize), - }; - let accounts_db_config = AccountsDbConfig { index: Some(accounts_index_config), base_working_path: Some(ledger_path.clone()), accounts_hash_cache_path: Some(accounts_hash_cache_path), - filler_accounts_config, write_cache_limit_bytes: value_t!(matches, "accounts_db_cache_limit_mb", u64) .ok() .map(|mb| mb * MB as u64), @@ -1682,7 +1673,7 @@ pub fn main() { if SystemMonitorService::check_os_network_limits() { info!("OS network limits test passed."); } else { - eprintln!("OS network limit test failed. See: https://docs.solana.com/running-validator/validator-start#system-tuning"); + eprintln!("OS network limit test failed. See: https://docs.solanalabs.com/operations/guides/validator-start#system-tuning"); exit(1); } } diff --git a/vote/src/vote_parser.rs b/vote/src/vote_parser.rs index 4b4e12770ec291..5ca00fa9445ed1 100644 --- a/vote/src/vote_parser.rs +++ b/vote/src/vote_parser.rs @@ -23,7 +23,7 @@ pub fn parse_sanitized_vote_transaction(tx: &SanitizedTransaction) -> Option Option { let first_account = usize::from(*first_instruction.accounts.first()?); let key = message.account_keys.get(first_account)?; let (vote, switch_proof_hash) = parse_vote_instruction_data(&first_instruction.data)?; - let signature = tx.signatures.get(0).cloned().unwrap_or_default(); + let signature = tx.signatures.first().cloned().unwrap_or_default(); Some((*key, vote, switch_proof_hash, signature)) } diff --git a/zk-token-sdk/src/encryption/auth_encryption.rs b/zk-token-sdk/src/encryption/auth_encryption.rs index 046f529ca4e634..3bc5c657de103e 100644 --- a/zk-token-sdk/src/encryption/auth_encryption.rs +++ b/zk-token-sdk/src/encryption/auth_encryption.rs @@ -52,6 +52,8 @@ pub enum AuthenticatedEncryptionError { SeedLengthTooShort, #[error("seed length too long for derivation")] SeedLengthTooLong, + #[error("failed to deserialize")] + Deserialization, } struct AuthenticatedEncryption; diff --git a/zk-token-sdk/src/encryption/elgamal.rs b/zk-token-sdk/src/encryption/elgamal.rs index bee5cb39c307ec..5b4e2dba872530 100644 --- a/zk-token-sdk/src/encryption/elgamal.rs +++ b/zk-token-sdk/src/encryption/elgamal.rs @@ -78,6 +78,10 @@ pub enum ElGamalError { SeedLengthTooShort, #[error("seed length too long for derivation")] SeedLengthTooLong, + #[error("failed to deserialize ciphertext")] + CiphertextDeserialization, + #[error("failed to deserialize public key")] + PubkeyDeserialization, } /// Algorithm handle for the twisted ElGamal encryption scheme @@ -160,7 +164,7 @@ impl ElGamal { } /// On input a secret key and a ciphertext, the function returns the decrypted amount - /// interpretted as a positive 32-bit number (but still of type `u64`). + /// interpreted as a positive 32-bit number (but still of type `u64`). /// /// If the originally encrypted amount is not a positive 32-bit number, then the function /// returns `None`. diff --git a/zk-token-sdk/src/errors.rs b/zk-token-sdk/src/errors.rs index e9e36e43f73c18..ad43b680dc9b0d 100644 --- a/zk-token-sdk/src/errors.rs +++ b/zk-token-sdk/src/errors.rs @@ -1,55 +1,52 @@ //! Errors related to proving and verifying proofs. use { - crate::{range_proof::errors::RangeProofError, sigma_proofs::errors::*}, + crate::{ + encryption::elgamal::ElGamalError, + range_proof::errors::{RangeProofGenerationError, RangeProofVerificationError}, + sigma_proofs::errors::*, + }, thiserror::Error, }; #[derive(Error, Clone, Debug, Eq, PartialEq)] -pub enum ProofError { - #[error("invalid transfer amount range")] - TransferAmount, - #[error("proof generation failed")] - Generation, - #[error("proof verification failed")] - VerificationError(ProofType, ProofVerificationError), - #[error("failed to decrypt ciphertext")] - Decryption, - #[error("invalid ciphertext data")] - CiphertextDeserialization, - #[error("invalid pubkey data")] - PubkeyDeserialization, - #[error("ciphertext does not exist in instruction data")] - MissingCiphertext, +pub enum ProofGenerationError { + #[error("not enough funds in account")] + NotEnoughFunds, + #[error("transfer fee calculation error")] + FeeCalculation, + #[error("illegal number of commitments")] + IllegalCommitmentLength, + #[error("illegal amount bit length")] + IllegalAmountBitLength, + #[error("invalid commitment")] + InvalidCommitment, + #[error("range proof generation failed")] + RangeProof(#[from] RangeProofGenerationError), + #[error("unexpected proof length")] + ProofLength, +} + +#[derive(Error, Clone, Debug, Eq, PartialEq)] +pub enum ProofVerificationError { + #[error("range proof verification failed")] + RangeProof(#[from] RangeProofVerificationError), + #[error("sigma proof verification failed")] + SigmaProof(SigmaProofType, SigmaProofVerificationError), + #[error("ElGamal ciphertext or public key error")] + ElGamal(#[from] ElGamalError), + #[error("Invalid proof context")] + ProofContext, + #[error("illegal commitment length")] + IllegalCommitmentLength, } #[derive(Clone, Debug, Eq, PartialEq)] -pub enum ProofType { +pub enum SigmaProofType { EqualityProof, ValidityProof, ZeroBalanceProof, FeeSigmaProof, PubkeyValidityProof, - RangeProof, -} - -#[derive(Error, Clone, Debug, Eq, PartialEq)] -pub enum ProofVerificationError { - #[error("required algebraic relation does not hold")] - AlgebraicRelation, - #[error("malformed proof")] - Deserialization, - #[error("multiscalar multiplication failed")] - MultiscalarMul, - #[error("transcript failed to produce a challenge")] - Transcript(#[from] TranscriptError), - #[error( - "attempted to verify range proof with a non-power-of-two bit size or bit size is too big" - )] - InvalidBitSize, - #[error("insufficient generators for the proof")] - InvalidGeneratorsLength, - #[error("number of blinding factors do not match the number of values")] - WrongNumBlindingFactors, } #[derive(Error, Clone, Debug, Eq, PartialEq)] @@ -58,37 +55,31 @@ pub enum TranscriptError { ValidationError, } -impl From for ProofError { - fn from(err: RangeProofError) -> Self { - Self::VerificationError(ProofType::RangeProof, err.0) - } -} - -impl From for ProofError { - fn from(err: EqualityProofError) -> Self { - Self::VerificationError(ProofType::EqualityProof, err.0) +impl From for ProofVerificationError { + fn from(err: EqualityProofVerificationError) -> Self { + Self::SigmaProof(SigmaProofType::EqualityProof, err.0) } } -impl From for ProofError { - fn from(err: FeeSigmaProofError) -> Self { - Self::VerificationError(ProofType::FeeSigmaProof, err.0) +impl From for ProofVerificationError { + fn from(err: FeeSigmaProofVerificationError) -> Self { + Self::SigmaProof(SigmaProofType::FeeSigmaProof, err.0) } } -impl From for ProofError { - fn from(err: ZeroBalanceProofError) -> Self { - Self::VerificationError(ProofType::ZeroBalanceProof, err.0) +impl From for ProofVerificationError { + fn from(err: ZeroBalanceProofVerificationError) -> Self { + Self::SigmaProof(SigmaProofType::ZeroBalanceProof, err.0) } } -impl From for ProofError { - fn from(err: ValidityProofError) -> Self { - Self::VerificationError(ProofType::ValidityProof, err.0) +impl From for ProofVerificationError { + fn from(err: ValidityProofVerificationError) -> Self { + Self::SigmaProof(SigmaProofType::ValidityProof, err.0) } } -impl From for ProofError { - fn from(err: PubkeyValidityProofError) -> Self { - Self::VerificationError(ProofType::PubkeyValidityProof, err.0) +impl From for ProofVerificationError { + fn from(err: PubkeyValidityProofVerificationError) -> Self { + Self::SigmaProof(SigmaProofType::PubkeyValidityProof, err.0) } } diff --git a/zk-token-sdk/src/instruction/batched_grouped_ciphertext_validity.rs b/zk-token-sdk/src/instruction/batched_grouped_ciphertext_validity.rs index 2d412f5746c3bc..10a9d790804e30 100644 --- a/zk-token-sdk/src/instruction/batched_grouped_ciphertext_validity.rs +++ b/zk-token-sdk/src/instruction/batched_grouped_ciphertext_validity.rs @@ -19,7 +19,7 @@ use { elgamal::ElGamalPubkey, grouped_elgamal::GroupedElGamalCiphertext, pedersen::PedersenOpening, }, - errors::ProofError, + errors::{ProofGenerationError, ProofVerificationError}, sigma_proofs::batched_grouped_ciphertext_validity_proof::BatchedGroupedCiphertext2HandlesValidityProof, transcript::TranscriptProtocol, }, @@ -69,7 +69,7 @@ impl BatchedGroupedCiphertext2HandlesValidityProofData { amount_hi: u64, opening_lo: &PedersenOpening, opening_hi: &PedersenOpening, - ) -> Result { + ) -> Result { let pod_destination_pubkey = pod::ElGamalPubkey(destination_pubkey.to_bytes()); let pod_auditor_pubkey = pod::ElGamalPubkey(auditor_pubkey.to_bytes()); let pod_grouped_ciphertext_lo = (*grouped_ciphertext_lo).into(); @@ -106,7 +106,7 @@ impl ZkProofData } #[cfg(not(target_os = "solana"))] - fn verify_proof(&self) -> Result<(), ProofError> { + fn verify_proof(&self) -> Result<(), ProofVerificationError> { let mut transcript = self.context.new_transcript(); let destination_pubkey = self.context.destination_pubkey.try_into()?; @@ -116,10 +116,10 @@ impl ZkProofData let grouped_ciphertext_hi: GroupedElGamalCiphertext<2> = self.context.grouped_ciphertext_hi.try_into()?; - let destination_handle_lo = grouped_ciphertext_lo.handles.get(0).unwrap(); + let destination_handle_lo = grouped_ciphertext_lo.handles.first().unwrap(); let auditor_handle_lo = grouped_ciphertext_lo.handles.get(1).unwrap(); - let destination_handle_hi = grouped_ciphertext_hi.handles.get(0).unwrap(); + let destination_handle_hi = grouped_ciphertext_hi.handles.first().unwrap(); let auditor_handle_hi = grouped_ciphertext_hi.handles.get(1).unwrap(); let proof: BatchedGroupedCiphertext2HandlesValidityProof = self.proof.try_into()?; diff --git a/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u128.rs b/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u128.rs index 8867823cfee393..4036be9a94c940 100644 --- a/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u128.rs +++ b/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u128.rs @@ -4,7 +4,8 @@ use { crate::{ encryption::pedersen::{PedersenCommitment, PedersenOpening}, - errors::ProofError, + errors::{ProofGenerationError, ProofVerificationError}, + instruction::batched_range_proof::MAX_COMMITMENTS, range_proof::RangeProof, }, std::convert::TryInto, @@ -39,26 +40,29 @@ impl BatchedRangeProofU128Data { amounts: Vec, bit_lengths: Vec, openings: Vec<&PedersenOpening>, - ) -> Result { + ) -> Result { // the sum of the bit lengths must be 64 let batched_bit_length = bit_lengths .iter() .try_fold(0_usize, |acc, &x| acc.checked_add(x)) - .ok_or(ProofError::Generation)?; + .ok_or(ProofGenerationError::IllegalAmountBitLength)?; - // `u64::BITS` is 128, which fits in a single byte and should not overflow to `usize` for + // `u128::BITS` is 128, which fits in a single byte and should not overflow to `usize` for // an overwhelming number of platforms. However, to be extra cautious, use `try_from` and // `unwrap` here. A simple case `u128::BITS as usize` can silently overflow. let expected_bit_length = usize::try_from(u128::BITS).unwrap(); if batched_bit_length != expected_bit_length { - return Err(ProofError::Generation); + return Err(ProofGenerationError::IllegalAmountBitLength); } let context = BatchedRangeProofContext::new(&commitments, &amounts, &bit_lengths, &openings)?; let mut transcript = context.new_transcript(); - let proof = RangeProof::new(amounts, bit_lengths, openings, &mut transcript).try_into()?; + let proof: pod::RangeProofU128 = + RangeProof::new(amounts, bit_lengths, openings, &mut transcript)? + .try_into() + .map_err(|_| ProofGenerationError::ProofLength)?; Ok(Self { context, proof }) } @@ -72,8 +76,14 @@ impl ZkProofData for BatchedRangeProofU128Data { } #[cfg(not(target_os = "solana"))] - fn verify_proof(&self) -> Result<(), ProofError> { + fn verify_proof(&self) -> Result<(), ProofVerificationError> { let (commitments, bit_lengths) = self.context.try_into()?; + let num_commitments = commitments.len(); + + if num_commitments > MAX_COMMITMENTS || num_commitments != bit_lengths.len() { + return Err(ProofVerificationError::IllegalCommitmentLength); + } + let mut transcript = self.context_data().new_transcript(); let proof: RangeProof = self.proof.try_into()?; @@ -88,8 +98,8 @@ mod test { use { super::*, crate::{ - encryption::pedersen::Pedersen, - errors::{ProofType, ProofVerificationError}, + encryption::pedersen::Pedersen, errors::ProofVerificationError, + range_proof::errors::RangeProofVerificationError, }, }; @@ -179,10 +189,7 @@ mod test { assert_eq!( proof_data.verify_proof().unwrap_err(), - ProofError::VerificationError( - ProofType::RangeProof, - ProofVerificationError::AlgebraicRelation - ), + ProofVerificationError::RangeProof(RangeProofVerificationError::AlgebraicRelation), ); } } diff --git a/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u256.rs b/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u256.rs index da25267cded33a..1bdba644f3c296 100644 --- a/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u256.rs +++ b/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u256.rs @@ -4,7 +4,8 @@ use { crate::{ encryption::pedersen::{PedersenCommitment, PedersenOpening}, - errors::ProofError, + errors::{ProofGenerationError, ProofVerificationError}, + instruction::batched_range_proof::MAX_COMMITMENTS, range_proof::RangeProof, }, std::convert::TryInto, @@ -42,21 +43,23 @@ impl BatchedRangeProofU256Data { amounts: Vec, bit_lengths: Vec, openings: Vec<&PedersenOpening>, - ) -> Result { + ) -> Result { // the sum of the bit lengths must be 64 let batched_bit_length = bit_lengths .iter() .try_fold(0_usize, |acc, &x| acc.checked_add(x)) - .ok_or(ProofError::Generation)?; + .ok_or(ProofGenerationError::IllegalAmountBitLength)?; if batched_bit_length != BATCHED_RANGE_PROOF_U256_BIT_LENGTH { - return Err(ProofError::Generation); + return Err(ProofGenerationError::IllegalAmountBitLength); } let context = BatchedRangeProofContext::new(&commitments, &amounts, &bit_lengths, &openings)?; let mut transcript = context.new_transcript(); - let proof = RangeProof::new(amounts, bit_lengths, openings, &mut transcript).try_into()?; + let proof = RangeProof::new(amounts, bit_lengths, openings, &mut transcript)? + .try_into() + .map_err(|_| ProofGenerationError::ProofLength)?; Ok(Self { context, proof }) } @@ -70,8 +73,14 @@ impl ZkProofData for BatchedRangeProofU256Data { } #[cfg(not(target_os = "solana"))] - fn verify_proof(&self) -> Result<(), ProofError> { + fn verify_proof(&self) -> Result<(), ProofVerificationError> { let (commitments, bit_lengths) = self.context.try_into()?; + let num_commitments = commitments.len(); + + if num_commitments > MAX_COMMITMENTS || num_commitments != bit_lengths.len() { + return Err(ProofVerificationError::IllegalCommitmentLength); + } + let mut transcript = self.context_data().new_transcript(); let proof: RangeProof = self.proof.try_into()?; @@ -86,8 +95,8 @@ mod test { use { super::*, crate::{ - encryption::pedersen::Pedersen, - errors::{ProofType, ProofVerificationError}, + encryption::pedersen::Pedersen, errors::ProofVerificationError, + range_proof::errors::RangeProofVerificationError, }, }; @@ -177,10 +186,7 @@ mod test { assert_eq!( proof_data.verify_proof().unwrap_err(), - ProofError::VerificationError( - ProofType::RangeProof, - ProofVerificationError::AlgebraicRelation - ), + ProofVerificationError::RangeProof(RangeProofVerificationError::AlgebraicRelation), ); } } diff --git a/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u64.rs b/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u64.rs index ae34771aa5697a..94b76b5beff89d 100644 --- a/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u64.rs +++ b/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u64.rs @@ -4,7 +4,8 @@ use { crate::{ encryption::pedersen::{PedersenCommitment, PedersenOpening}, - errors::ProofError, + errors::{ProofGenerationError, ProofVerificationError}, + instruction::batched_range_proof::MAX_COMMITMENTS, range_proof::RangeProof, }, std::convert::TryInto, @@ -39,26 +40,28 @@ impl BatchedRangeProofU64Data { amounts: Vec, bit_lengths: Vec, openings: Vec<&PedersenOpening>, - ) -> Result { + ) -> Result { // the sum of the bit lengths must be 64 let batched_bit_length = bit_lengths .iter() .try_fold(0_usize, |acc, &x| acc.checked_add(x)) - .ok_or(ProofError::Generation)?; + .ok_or(ProofGenerationError::IllegalAmountBitLength)?; // `u64::BITS` is 64, which fits in a single byte and should not overflow to `usize` for an // overwhelming number of platforms. However, to be extra cautious, use `try_from` and // `unwrap` here. A simple case `u64::BITS as usize` can silently overflow. let expected_bit_length = usize::try_from(u64::BITS).unwrap(); if batched_bit_length != expected_bit_length { - return Err(ProofError::Generation); + return Err(ProofGenerationError::IllegalAmountBitLength); } let context = BatchedRangeProofContext::new(&commitments, &amounts, &bit_lengths, &openings)?; let mut transcript = context.new_transcript(); - let proof = RangeProof::new(amounts, bit_lengths, openings, &mut transcript).try_into()?; + let proof = RangeProof::new(amounts, bit_lengths, openings, &mut transcript)? + .try_into() + .map_err(|_| ProofGenerationError::ProofLength)?; Ok(Self { context, proof }) } @@ -72,8 +75,14 @@ impl ZkProofData for BatchedRangeProofU64Data { } #[cfg(not(target_os = "solana"))] - fn verify_proof(&self) -> Result<(), ProofError> { + fn verify_proof(&self) -> Result<(), ProofVerificationError> { let (commitments, bit_lengths) = self.context.try_into()?; + let num_commitments = commitments.len(); + + if num_commitments > MAX_COMMITMENTS || num_commitments != bit_lengths.len() { + return Err(ProofVerificationError::IllegalCommitmentLength); + } + let mut transcript = self.context_data().new_transcript(); let proof: RangeProof = self.proof.try_into()?; @@ -88,8 +97,8 @@ mod test { use { super::*, crate::{ - encryption::pedersen::Pedersen, - errors::{ProofType, ProofVerificationError}, + encryption::pedersen::Pedersen, errors::ProofVerificationError, + range_proof::errors::RangeProofVerificationError, }, }; @@ -179,10 +188,7 @@ mod test { assert_eq!( proof_data.verify_proof().unwrap_err(), - ProofError::VerificationError( - ProofType::RangeProof, - ProofVerificationError::AlgebraicRelation - ), + ProofVerificationError::RangeProof(RangeProofVerificationError::AlgebraicRelation), ); } } diff --git a/zk-token-sdk/src/instruction/batched_range_proof/mod.rs b/zk-token-sdk/src/instruction/batched_range_proof/mod.rs index 07351d87854c89..a5bd3b5fe56874 100644 --- a/zk-token-sdk/src/instruction/batched_range_proof/mod.rs +++ b/zk-token-sdk/src/instruction/batched_range_proof/mod.rs @@ -28,7 +28,7 @@ use { use { crate::{ encryption::pedersen::{PedersenCommitment, PedersenOpening}, - errors::ProofError, + errors::{ProofGenerationError, ProofVerificationError}, }, bytemuck::bytes_of, curve25519_dalek::traits::IsIdentity, @@ -54,16 +54,16 @@ impl BatchedRangeProofContext { fn new_transcript(&self) -> Transcript { let mut transcript = Transcript::new(b"BatchedRangeProof"); transcript.append_message(b"commitments", bytes_of(&self.commitments)); - transcript.append_message(b"bit-legnths", bytes_of(&self.bit_lengths)); + transcript.append_message(b"bit-lengths", bytes_of(&self.bit_lengths)); transcript } fn new( - commitments: &Vec<&PedersenCommitment>, - amounts: &Vec, - bit_lengths: &Vec, - openings: &Vec<&PedersenOpening>, - ) -> Result { + commitments: &[&PedersenCommitment], + amounts: &[u64], + bit_lengths: &[usize], + openings: &[&PedersenOpening], + ) -> Result { // the number of commitments is capped at 8 let num_commitments = commitments.len(); if num_commitments > MAX_COMMITMENTS @@ -71,14 +71,14 @@ impl BatchedRangeProofContext { || num_commitments != bit_lengths.len() || num_commitments != openings.len() { - return Err(ProofError::Generation); + return Err(ProofGenerationError::IllegalCommitmentLength); } let mut pod_commitments = [pod::PedersenCommitment::zeroed(); MAX_COMMITMENTS]; for (i, commitment) in commitments.iter().enumerate() { // all-zero commitment is invalid if commitment.get_point().is_identity() { - return Err(ProofError::Generation); + return Err(ProofGenerationError::InvalidCommitment); } pod_commitments[i] = pod::PedersenCommitment(commitment.to_bytes()); } @@ -87,7 +87,7 @@ impl BatchedRangeProofContext { for (i, bit_length) in bit_lengths.iter().enumerate() { pod_bit_lengths[i] = (*bit_length) .try_into() - .map_err(|_| ProofError::Generation)?; + .map_err(|_| ProofGenerationError::IllegalAmountBitLength)?; } Ok(BatchedRangeProofContext { @@ -99,7 +99,7 @@ impl BatchedRangeProofContext { #[cfg(not(target_os = "solana"))] impl TryInto<(Vec, Vec)> for BatchedRangeProofContext { - type Error = ProofError; + type Error = ProofVerificationError; fn try_into(self) -> Result<(Vec, Vec), Self::Error> { let commitments = self @@ -107,7 +107,8 @@ impl TryInto<(Vec, Vec)> for BatchedRangeProofContext .into_iter() .take_while(|commitment| *commitment != pod::PedersenCommitment::zeroed()) .map(|commitment| commitment.try_into()) - .collect::, _>>()?; + .collect::, _>>() + .map_err(|_| ProofVerificationError::ProofContext)?; let bit_lengths: Vec<_> = self .bit_lengths diff --git a/zk-token-sdk/src/instruction/ciphertext_ciphertext_equality.rs b/zk-token-sdk/src/instruction/ciphertext_ciphertext_equality.rs index 32aa566323fb0c..5d5fa7e54f04c8 100644 --- a/zk-token-sdk/src/instruction/ciphertext_ciphertext_equality.rs +++ b/zk-token-sdk/src/instruction/ciphertext_ciphertext_equality.rs @@ -15,7 +15,7 @@ use { elgamal::{ElGamalCiphertext, ElGamalKeypair, ElGamalPubkey}, pedersen::PedersenOpening, }, - errors::ProofError, + errors::{ProofGenerationError, ProofVerificationError}, sigma_proofs::ciphertext_ciphertext_equality_proof::CiphertextCiphertextEqualityProof, transcript::TranscriptProtocol, }, @@ -65,7 +65,7 @@ impl CiphertextCiphertextEqualityProofData { destination_ciphertext: &ElGamalCiphertext, destination_opening: &PedersenOpening, amount: u64, - ) -> Result { + ) -> Result { let pod_source_pubkey = pod::ElGamalPubkey(source_keypair.pubkey().to_bytes()); let pod_destination_pubkey = pod::ElGamalPubkey(destination_pubkey.to_bytes()); let pod_source_ciphertext = pod::ElGamalCiphertext(source_ciphertext.to_bytes()); @@ -104,7 +104,7 @@ impl ZkProofData } #[cfg(not(target_os = "solana"))] - fn verify_proof(&self) -> Result<(), ProofError> { + fn verify_proof(&self) -> Result<(), ProofVerificationError> { let mut transcript = self.context.new_transcript(); let source_pubkey = self.context.source_pubkey.try_into()?; diff --git a/zk-token-sdk/src/instruction/ciphertext_commitment_equality.rs b/zk-token-sdk/src/instruction/ciphertext_commitment_equality.rs index 8eab49e505ff0d..4f4f49fab57c68 100644 --- a/zk-token-sdk/src/instruction/ciphertext_commitment_equality.rs +++ b/zk-token-sdk/src/instruction/ciphertext_commitment_equality.rs @@ -12,7 +12,7 @@ use { elgamal::{ElGamalCiphertext, ElGamalKeypair}, pedersen::{PedersenCommitment, PedersenOpening}, }, - errors::ProofError, + errors::{ProofGenerationError, ProofVerificationError}, sigma_proofs::ciphertext_commitment_equality_proof::CiphertextCommitmentEqualityProof, transcript::TranscriptProtocol, }, @@ -60,7 +60,7 @@ impl CiphertextCommitmentEqualityProofData { commitment: &PedersenCommitment, opening: &PedersenOpening, amount: u64, - ) -> Result { + ) -> Result { let context = CiphertextCommitmentEqualityProofContext { pubkey: pod::ElGamalPubkey(keypair.pubkey().to_bytes()), ciphertext: pod::ElGamalCiphertext(ciphertext.to_bytes()), @@ -91,7 +91,7 @@ impl ZkProofData } #[cfg(not(target_os = "solana"))] - fn verify_proof(&self) -> Result<(), ProofError> { + fn verify_proof(&self) -> Result<(), ProofVerificationError> { let mut transcript = self.context.new_transcript(); let pubkey = self.context.pubkey.try_into()?; diff --git a/zk-token-sdk/src/instruction/errors.rs b/zk-token-sdk/src/instruction/errors.rs new file mode 100644 index 00000000000000..a21ac1cf345459 --- /dev/null +++ b/zk-token-sdk/src/instruction/errors.rs @@ -0,0 +1,11 @@ +#[cfg(not(target_os = "solana"))] +use thiserror::Error; + +#[derive(Error, Clone, Debug, Eq, PartialEq)] +#[cfg(not(target_os = "solana"))] +pub enum InstructionError { + #[error("decryption error")] + Decryption, + #[error("missing ciphertext")] + MissingCiphertext, +} diff --git a/zk-token-sdk/src/instruction/fee_sigma.rs b/zk-token-sdk/src/instruction/fee_sigma.rs index a158e3d27632f5..500e21a505cf33 100644 --- a/zk-token-sdk/src/instruction/fee_sigma.rs +++ b/zk-token-sdk/src/instruction/fee_sigma.rs @@ -6,13 +6,13 @@ //! A formal documentation of how transfer fees and fee sigma proof are computed can be found in //! the [`ZK Token proof`] program documentation. //! -//! [`ZK Token proof`]: https://edge.docs.solana.com/developing/runtime-facilities/zk-token-proof +//! [`ZK Token proof`]: https://docs.solanalabs.com/runtime/zk-token-proof #[cfg(not(target_os = "solana"))] use { crate::{ encryption::pedersen::{PedersenCommitment, PedersenOpening}, - errors::ProofError, + errors::{ProofGenerationError, ProofVerificationError}, sigma_proofs::fee_proof::FeeSigmaProof, transcript::TranscriptProtocol, }, @@ -43,7 +43,7 @@ pub struct FeeSigmaProofData { /// /// We refer to [`ZK Token proof`] for the formal details on how the fee sigma proof is computed. /// -/// [`ZK Token proof`]: https://edge.docs.solana.com/developing/runtime-facilities/zk-token-proof +/// [`ZK Token proof`]: https://docs.solanalabs.com/runtime/zk-token-proof #[derive(Clone, Copy, Pod, Zeroable)] #[repr(C)] pub struct FeeSigmaProofContext { @@ -72,7 +72,7 @@ impl FeeSigmaProofData { fee_amount: u64, delta_fee: u64, max_fee: u64, - ) -> Result { + ) -> Result { let pod_fee_commitment = pod::PedersenCommitment(fee_commitment.to_bytes()); let pod_delta_commitment = pod::PedersenCommitment(delta_commitment.to_bytes()); let pod_claimed_commitment = pod::PedersenCommitment(claimed_commitment.to_bytes()); @@ -108,7 +108,7 @@ impl ZkProofData for FeeSigmaProofData { } #[cfg(not(target_os = "solana"))] - fn verify_proof(&self) -> Result<(), ProofError> { + fn verify_proof(&self) -> Result<(), ProofVerificationError> { let mut transcript = self.context.new_transcript(); let fee_commitment = self.context.fee_commitment.try_into()?; diff --git a/zk-token-sdk/src/instruction/grouped_ciphertext_validity.rs b/zk-token-sdk/src/instruction/grouped_ciphertext_validity.rs index 59d19d11838623..bb13a0805ad670 100644 --- a/zk-token-sdk/src/instruction/grouped_ciphertext_validity.rs +++ b/zk-token-sdk/src/instruction/grouped_ciphertext_validity.rs @@ -17,7 +17,7 @@ use { elgamal::ElGamalPubkey, grouped_elgamal::GroupedElGamalCiphertext, pedersen::PedersenOpening, }, - errors::ProofError, + errors::{ProofGenerationError, ProofVerificationError}, sigma_proofs::grouped_ciphertext_validity_proof::GroupedCiphertext2HandlesValidityProof, transcript::TranscriptProtocol, }, @@ -62,7 +62,7 @@ impl GroupedCiphertext2HandlesValidityProofData { grouped_ciphertext: &GroupedElGamalCiphertext<2>, amount: u64, opening: &PedersenOpening, - ) -> Result { + ) -> Result { let pod_destination_pubkey = pod::ElGamalPubkey(destination_pubkey.to_bytes()); let pod_auditor_pubkey = pod::ElGamalPubkey(auditor_pubkey.to_bytes()); let pod_grouped_ciphertext = (*grouped_ciphertext).into(); @@ -97,7 +97,7 @@ impl ZkProofData } #[cfg(not(target_os = "solana"))] - fn verify_proof(&self) -> Result<(), ProofError> { + fn verify_proof(&self) -> Result<(), ProofVerificationError> { let mut transcript = self.context.new_transcript(); let destination_pubkey = self.context.destination_pubkey.try_into()?; @@ -105,7 +105,7 @@ impl ZkProofData let grouped_ciphertext: GroupedElGamalCiphertext<2> = self.context.grouped_ciphertext.try_into()?; - let destination_handle = grouped_ciphertext.handles.get(0).unwrap(); + let destination_handle = grouped_ciphertext.handles.first().unwrap(); let auditor_handle = grouped_ciphertext.handles.get(1).unwrap(); let proof: GroupedCiphertext2HandlesValidityProof = self.proof.try_into()?; diff --git a/zk-token-sdk/src/instruction/mod.rs b/zk-token-sdk/src/instruction/mod.rs index 374c224f58c74d..14b81938c281aa 100644 --- a/zk-token-sdk/src/instruction/mod.rs +++ b/zk-token-sdk/src/instruction/mod.rs @@ -1,11 +1,12 @@ //! The instruction data types for the [`ZK Token proof`] instruction. //! -//! [`ZK Token proof`]: https://edge.docs.solana.com/developing/runtime-facilities/zk-token-proof +//! [`ZK Token proof`]: https://docs.solanalabs.com/runtime/zk-token-proof pub mod batched_grouped_ciphertext_validity; pub mod batched_range_proof; pub mod ciphertext_ciphertext_equality; pub mod ciphertext_commitment_equality; +pub mod errors; pub mod fee_sigma; pub mod grouped_ciphertext_validity; pub mod pubkey_validity; @@ -15,7 +16,7 @@ pub mod withdraw; pub mod zero_balance; #[cfg(not(target_os = "solana"))] -use crate::errors::ProofError; +use crate::errors::ProofVerificationError; use num_derive::{FromPrimitive, ToPrimitive}; pub use { batched_grouped_ciphertext_validity::{ @@ -75,5 +76,5 @@ pub trait ZkProofData { fn context_data(&self) -> &T; #[cfg(not(target_os = "solana"))] - fn verify_proof(&self) -> Result<(), ProofError>; + fn verify_proof(&self) -> Result<(), ProofVerificationError>; } diff --git a/zk-token-sdk/src/instruction/pubkey_validity.rs b/zk-token-sdk/src/instruction/pubkey_validity.rs index 46d8db02327e3b..b0251fbf5631c5 100644 --- a/zk-token-sdk/src/instruction/pubkey_validity.rs +++ b/zk-token-sdk/src/instruction/pubkey_validity.rs @@ -8,8 +8,10 @@ #[cfg(not(target_os = "solana"))] use { crate::{ - encryption::elgamal::ElGamalKeypair, errors::ProofError, - sigma_proofs::pubkey_proof::PubkeyValidityProof, transcript::TranscriptProtocol, + encryption::elgamal::ElGamalKeypair, + errors::{ProofGenerationError, ProofVerificationError}, + sigma_proofs::pubkey_proof::PubkeyValidityProof, + transcript::TranscriptProtocol, }, merlin::Transcript, std::convert::TryInto, @@ -47,7 +49,7 @@ pub struct PubkeyValidityProofContext { #[cfg(not(target_os = "solana"))] impl PubkeyValidityData { - pub fn new(keypair: &ElGamalKeypair) -> Result { + pub fn new(keypair: &ElGamalKeypair) -> Result { let pod_pubkey = pod::ElGamalPubkey(keypair.pubkey().to_bytes()); let context = PubkeyValidityProofContext { pubkey: pod_pubkey }; @@ -67,7 +69,7 @@ impl ZkProofData for PubkeyValidityData { } #[cfg(not(target_os = "solana"))] - fn verify_proof(&self) -> Result<(), ProofError> { + fn verify_proof(&self) -> Result<(), ProofVerificationError> { let mut transcript = self.context.new_transcript(); let pubkey = self.context.pubkey.try_into()?; let proof: PubkeyValidityProof = self.proof.try_into()?; diff --git a/zk-token-sdk/src/instruction/range_proof.rs b/zk-token-sdk/src/instruction/range_proof.rs index 3342f5c34fb8fb..5cc499e1447f91 100644 --- a/zk-token-sdk/src/instruction/range_proof.rs +++ b/zk-token-sdk/src/instruction/range_proof.rs @@ -8,7 +8,7 @@ use { crate::{ encryption::pedersen::{PedersenCommitment, PedersenOpening}, - errors::ProofError, + errors::{ProofGenerationError, ProofVerificationError}, range_proof::RangeProof, transcript::TranscriptProtocol, }, @@ -50,7 +50,7 @@ impl RangeProofU64Data { commitment: &PedersenCommitment, amount: u64, opening: &PedersenOpening, - ) -> Result { + ) -> Result { let pod_commitment = pod::PedersenCommitment(commitment.to_bytes()); let context = RangeProofContext { @@ -64,8 +64,9 @@ impl RangeProofU64Data { // `unwrap` here. A simple case `u64::BITS as usize` can silently overflow. let bit_size = usize::try_from(u64::BITS).unwrap(); - let proof = RangeProof::new(vec![amount], vec![bit_size], vec![opening], &mut transcript) - .try_into()?; + let proof = RangeProof::new(vec![amount], vec![bit_size], vec![opening], &mut transcript)? + .try_into() + .map_err(|_| ProofGenerationError::ProofLength)?; Ok(Self { context, proof }) } @@ -79,7 +80,7 @@ impl ZkProofData for RangeProofU64Data { } #[cfg(not(target_os = "solana"))] - fn verify_proof(&self) -> Result<(), ProofError> { + fn verify_proof(&self) -> Result<(), ProofVerificationError> { let mut transcript = self.context_data().new_transcript(); let commitment = self.context.commitment.try_into()?; let proof: RangeProof = self.proof.try_into()?; diff --git a/zk-token-sdk/src/instruction/transfer/encryption.rs b/zk-token-sdk/src/instruction/transfer/encryption.rs index f9d40c0889b0e0..b554da16439bac 100644 --- a/zk-token-sdk/src/instruction/transfer/encryption.rs +++ b/zk-token-sdk/src/instruction/transfer/encryption.rs @@ -35,7 +35,7 @@ impl TransferAmountCiphertext { pub fn get_source_handle(&self) -> &DecryptHandle { // `TransferAmountCiphertext` is a wrapper for `GroupedElGamalCiphertext<3>`, which // holds exactly three decryption handles. - self.0.handles.get(0).unwrap() + self.0.handles.first().unwrap() } pub fn get_destination_handle(&self) -> &DecryptHandle { @@ -80,7 +80,7 @@ impl FeeEncryption { pub fn get_destination_handle(&self) -> &DecryptHandle { // `FeeEncryption` is a wrapper for `GroupedElGamalCiphertext<2>`, which holds // exactly two decryption handles. - self.0.handles.get(0).unwrap() + self.0.handles.first().unwrap() } pub fn get_withdraw_withheld_authority_handle(&self) -> &DecryptHandle { diff --git a/zk-token-sdk/src/instruction/transfer/mod.rs b/zk-token-sdk/src/instruction/transfer/mod.rs index 736ba937b5ba16..33bc6c08a96a21 100644 --- a/zk-token-sdk/src/instruction/transfer/mod.rs +++ b/zk-token-sdk/src/instruction/transfer/mod.rs @@ -31,8 +31,8 @@ pub enum Role { } /// Takes in a 64-bit number `amount` and a bit length `bit_length`. It returns: -/// - the `bit_length` low bits of `amount` interpretted as u64 -/// - the (64 - `bit_length`) high bits of `amount` interpretted as u64 +/// - the `bit_length` low bits of `amount` interpreted as u64 +/// - the (64 - `bit_length`) high bits of `amount` interpreted as u64 #[cfg(not(target_os = "solana"))] pub fn split_u64(amount: u64, bit_length: usize) -> (u64, u64) { if bit_length == 64 { diff --git a/zk-token-sdk/src/instruction/transfer/with_fee.rs b/zk-token-sdk/src/instruction/transfer/with_fee.rs index 709b0b63797ce9..4bc9a154376840 100644 --- a/zk-token-sdk/src/instruction/transfer/with_fee.rs +++ b/zk-token-sdk/src/instruction/transfer/with_fee.rs @@ -5,12 +5,15 @@ use { elgamal::{ElGamalCiphertext, ElGamalKeypair, ElGamalPubkey, ElGamalSecretKey}, pedersen::{Pedersen, PedersenCommitment, PedersenOpening}, }, - errors::ProofError, - instruction::transfer::{ - combine_lo_hi_ciphertexts, combine_lo_hi_commitments, combine_lo_hi_openings, - combine_lo_hi_u64, - encryption::{FeeEncryption, TransferAmountCiphertext}, - split_u64, FeeParameters, Role, + errors::{ProofGenerationError, ProofVerificationError}, + instruction::{ + errors::InstructionError, + transfer::{ + combine_lo_hi_ciphertexts, combine_lo_hi_commitments, combine_lo_hi_openings, + combine_lo_hi_u64, + encryption::{FeeEncryption, TransferAmountCiphertext}, + split_u64, FeeParameters, Role, + }, }, range_proof::RangeProof, sigma_proofs::{ @@ -38,6 +41,8 @@ use { const MAX_FEE_BASIS_POINTS: u64 = 10_000; #[cfg(not(target_os = "solana"))] const ONE_IN_BASIS_POINTS: u128 = MAX_FEE_BASIS_POINTS as u128; +#[cfg(not(target_os = "solana"))] +const MAX_DELTA_RANGE: u64 = MAX_FEE_BASIS_POINTS - 1; #[cfg(not(target_os = "solana"))] const TRANSFER_SOURCE_AMOUNT_BITS: usize = 64; @@ -48,7 +53,7 @@ const TRANSFER_AMOUNT_LO_NEGATED_BITS: usize = 16; #[cfg(not(target_os = "solana"))] const TRANSFER_AMOUNT_HI_BITS: usize = 32; #[cfg(not(target_os = "solana"))] -const TRANSFER_DELTA_BITS: usize = 48; +const TRANSFER_DELTA_BITS: usize = 16; #[cfg(not(target_os = "solana"))] const FEE_AMOUNT_LO_BITS: usize = 16; #[cfg(not(target_os = "solana"))] @@ -59,6 +64,7 @@ lazy_static::lazy_static! { pub static ref COMMITMENT_MAX: PedersenCommitment = Pedersen::encode((1_u64 << TRANSFER_AMOUNT_LO_NEGATED_BITS) - 1); pub static ref COMMITMENT_MAX_FEE_BASIS_POINTS: PedersenCommitment = Pedersen::encode(MAX_FEE_BASIS_POINTS); + pub static ref COMMITMENT_MAX_DELTA_RANGE: PedersenCommitment = Pedersen::encode(MAX_DELTA_RANGE); } /// The instruction data that is needed for the `ProofInstruction::TransferWithFee` instruction. @@ -120,7 +126,7 @@ impl TransferWithFeeData { (destination_pubkey, auditor_pubkey): (&ElGamalPubkey, &ElGamalPubkey), fee_parameters: FeeParameters, withdraw_withheld_authority_pubkey: &ElGamalPubkey, - ) -> Result { + ) -> Result { // split and encrypt transfer amount let (amount_lo, amount_hi) = split_u64(transfer_amount, TRANSFER_AMOUNT_LO_BITS); @@ -140,7 +146,7 @@ impl TransferWithFeeData { // subtract transfer amount from the spendable ciphertext let new_spendable_balance = spendable_balance .checked_sub(transfer_amount) - .ok_or(ProofError::Generation)?; + .ok_or(ProofGenerationError::NotEnoughFunds)?; let transfer_amount_lo_source = ElGamalCiphertext { commitment: *ciphertext_lo.get_commitment(), @@ -164,7 +170,7 @@ impl TransferWithFeeData { // TODO: add comment on delta fee let (fee_amount, delta_fee) = calculate_fee(transfer_amount, fee_parameters.fee_rate_basis_points) - .ok_or(ProofError::Generation)?; + .ok_or(ProofGenerationError::FeeCalculation)?; let below_max = u64::ct_gt(&fee_parameters.maximum_fee, &fee_amount); let fee_to_encrypt = @@ -222,14 +228,18 @@ impl TransferWithFeeData { withdraw_withheld_authority_pubkey, fee_parameters, &mut transcript, - ); + )?; Ok(Self { context, proof }) } /// Extracts the lo ciphertexts associated with a transfer-with-fee data - fn ciphertext_lo(&self, role: Role) -> Result { - let ciphertext_lo: TransferAmountCiphertext = self.context.ciphertext_lo.try_into()?; + fn ciphertext_lo(&self, role: Role) -> Result { + let ciphertext_lo: TransferAmountCiphertext = self + .context + .ciphertext_lo + .try_into() + .map_err(|_| InstructionError::Decryption)?; let handle_lo = match role { Role::Source => Some(ciphertext_lo.get_source_handle()), @@ -244,13 +254,17 @@ impl TransferWithFeeData { handle: *handle, }) } else { - Err(ProofError::MissingCiphertext) + Err(InstructionError::MissingCiphertext) } } /// Extracts the lo ciphertexts associated with a transfer-with-fee data - fn ciphertext_hi(&self, role: Role) -> Result { - let ciphertext_hi: TransferAmountCiphertext = self.context.ciphertext_hi.try_into()?; + fn ciphertext_hi(&self, role: Role) -> Result { + let ciphertext_hi: TransferAmountCiphertext = self + .context + .ciphertext_hi + .try_into() + .map_err(|_| InstructionError::Decryption)?; let handle_hi = match role { Role::Source => Some(ciphertext_hi.get_source_handle()), @@ -265,13 +279,17 @@ impl TransferWithFeeData { handle: *handle, }) } else { - Err(ProofError::MissingCiphertext) + Err(InstructionError::MissingCiphertext) } } /// Extracts the lo fee ciphertexts associated with a transfer_with_fee data - fn fee_ciphertext_lo(&self, role: Role) -> Result { - let fee_ciphertext_lo: FeeEncryption = self.context.fee_ciphertext_lo.try_into()?; + fn fee_ciphertext_lo(&self, role: Role) -> Result { + let fee_ciphertext_lo: FeeEncryption = self + .context + .fee_ciphertext_lo + .try_into() + .map_err(|_| InstructionError::Decryption)?; let fee_handle_lo = match role { Role::Source => None, @@ -288,13 +306,17 @@ impl TransferWithFeeData { handle: *handle, }) } else { - Err(ProofError::MissingCiphertext) + Err(InstructionError::MissingCiphertext) } } /// Extracts the hi fee ciphertexts associated with a transfer_with_fee data - fn fee_ciphertext_hi(&self, role: Role) -> Result { - let fee_ciphertext_hi: FeeEncryption = self.context.fee_ciphertext_hi.try_into()?; + fn fee_ciphertext_hi(&self, role: Role) -> Result { + let fee_ciphertext_hi: FeeEncryption = self + .context + .fee_ciphertext_hi + .try_into() + .map_err(|_| InstructionError::Decryption)?; let fee_handle_hi = match role { Role::Source => None, @@ -311,12 +333,16 @@ impl TransferWithFeeData { handle: *handle, }) } else { - Err(ProofError::MissingCiphertext) + Err(InstructionError::MissingCiphertext) } } /// Decrypts transfer amount from transfer-with-fee data - pub fn decrypt_amount(&self, role: Role, sk: &ElGamalSecretKey) -> Result { + pub fn decrypt_amount( + &self, + role: Role, + sk: &ElGamalSecretKey, + ) -> Result { let ciphertext_lo = self.ciphertext_lo(role)?; let ciphertext_hi = self.ciphertext_hi(role)?; @@ -327,12 +353,16 @@ impl TransferWithFeeData { let shifted_amount_hi = amount_hi << TRANSFER_AMOUNT_LO_BITS; Ok(amount_lo + shifted_amount_hi) } else { - Err(ProofError::Decryption) + Err(InstructionError::Decryption) } } /// Decrypts transfer amount from transfer-with-fee data - pub fn decrypt_fee_amount(&self, role: Role, sk: &ElGamalSecretKey) -> Result { + pub fn decrypt_fee_amount( + &self, + role: Role, + sk: &ElGamalSecretKey, + ) -> Result { let ciphertext_lo = self.fee_ciphertext_lo(role)?; let ciphertext_hi = self.fee_ciphertext_hi(role)?; @@ -343,7 +373,7 @@ impl TransferWithFeeData { let shifted_fee_amount_hi = fee_amount_hi << FEE_AMOUNT_LO_BITS; Ok(fee_amount_lo + shifted_fee_amount_hi) } else { - Err(ProofError::Decryption) + Err(InstructionError::Decryption) } } } @@ -356,7 +386,7 @@ impl ZkProofData for TransferWithFeeData { } #[cfg(not(target_os = "solana"))] - fn verify_proof(&self) -> Result<(), ProofError> { + fn verify_proof(&self) -> Result<(), ProofVerificationError> { let mut transcript = self.context.new_transcript(); let source_pubkey = self.context.transfer_with_fee_pubkeys.source.try_into()?; @@ -448,7 +478,7 @@ impl TransferWithFeeProof { withdraw_withheld_authority_pubkey: &ElGamalPubkey, fee_parameters: FeeParameters, transcript: &mut Transcript, - ) -> Self { + ) -> Result { let (transfer_amount_lo, ciphertext_lo, opening_lo) = transfer_amount_lo_data; let (transfer_amount_hi, ciphertext_hi, opening_hi) = transfer_amount_hi_data; @@ -530,24 +560,41 @@ impl TransferWithFeeProof { // generate the range proof let opening_claimed_negated = &PedersenOpening::default() - &opening_claimed; + + let combined_amount = combine_lo_hi_u64( + transfer_amount_lo, + transfer_amount_hi, + TRANSFER_AMOUNT_LO_BITS, + ); + let amount_sub_fee = combined_amount + .checked_sub(combined_fee_amount) + .ok_or(ProofGenerationError::FeeCalculation)?; + let amount_sub_fee_opening = combined_opening - combined_fee_opening; + + let delta_negated = MAX_DELTA_RANGE + .checked_sub(delta_fee) + .ok_or(ProofGenerationError::FeeCalculation)?; + let range_proof = RangeProof::new( vec![ source_new_balance, transfer_amount_lo, transfer_amount_hi, delta_fee, - MAX_FEE_BASIS_POINTS - delta_fee, + delta_negated, fee_amount_lo, fee_amount_hi, + amount_sub_fee, ], vec![ TRANSFER_SOURCE_AMOUNT_BITS, // 64 TRANSFER_AMOUNT_LO_BITS, // 16 TRANSFER_AMOUNT_HI_BITS, // 32 - TRANSFER_DELTA_BITS, // 48 - TRANSFER_DELTA_BITS, // 48 + TRANSFER_DELTA_BITS, // 16 + TRANSFER_DELTA_BITS, // 16 FEE_AMOUNT_LO_BITS, // 16 FEE_AMOUNT_HI_BITS, // 32 + TRANSFER_SOURCE_AMOUNT_BITS, // 64 ], vec![ &opening_source, @@ -557,19 +604,22 @@ impl TransferWithFeeProof { &opening_claimed_negated, opening_fee_lo, opening_fee_hi, + &amount_sub_fee_opening, ], transcript, - ); + )?; - Self { + Ok(Self { new_source_commitment: pod_new_source_commitment, claimed_commitment: pod_claimed_commitment, equality_proof: equality_proof.into(), ciphertext_amount_validity_proof: ciphertext_amount_validity_proof.into(), fee_sigma_proof: fee_sigma_proof.into(), fee_ciphertext_validity_proof: fee_ciphertext_validity_proof.into(), - range_proof: range_proof.try_into().expect("range proof: length error"), - } + range_proof: range_proof + .try_into() + .map_err(|_| ProofGenerationError::ProofLength)?, + }) } #[allow(clippy::too_many_arguments)] @@ -587,7 +637,7 @@ impl TransferWithFeeProof { fee_ciphertext_hi: &FeeEncryption, fee_parameters: FeeParameters, transcript: &mut Transcript, - ) -> Result<(), ProofError> { + ) -> Result<(), ProofVerificationError> { transcript.append_commitment(b"commitment-new-source", &self.new_source_commitment); let new_source_commitment: PedersenCommitment = self.new_source_commitment.try_into()?; @@ -679,7 +729,8 @@ impl TransferWithFeeProof { // verify range proof let new_source_commitment = self.new_source_commitment.try_into()?; - let claimed_commitment_negated = &(*COMMITMENT_MAX_FEE_BASIS_POINTS) - &claimed_commitment; + let claimed_commitment_negated = &(*COMMITMENT_MAX_DELTA_RANGE) - &claimed_commitment; + let amount_sub_fee_commitment = combined_commitment - combined_fee_commitment; range_proof.verify( vec![ @@ -690,15 +741,17 @@ impl TransferWithFeeProof { &claimed_commitment_negated, fee_ciphertext_lo.get_commitment(), fee_ciphertext_hi.get_commitment(), + &amount_sub_fee_commitment, ], vec![ TRANSFER_SOURCE_AMOUNT_BITS, // 64 TRANSFER_AMOUNT_LO_BITS, // 16 TRANSFER_AMOUNT_HI_BITS, // 32 - TRANSFER_DELTA_BITS, // 48 - TRANSFER_DELTA_BITS, // 48 + TRANSFER_DELTA_BITS, // 16 + TRANSFER_DELTA_BITS, // 16 FEE_AMOUNT_LO_BITS, // 16 FEE_AMOUNT_HI_BITS, // 32 + TRANSFER_SOURCE_AMOUNT_BITS, // 64 ], transcript, )?; diff --git a/zk-token-sdk/src/instruction/transfer/without_fee.rs b/zk-token-sdk/src/instruction/transfer/without_fee.rs index ad4def104d3b40..39e28994cb809b 100644 --- a/zk-token-sdk/src/instruction/transfer/without_fee.rs +++ b/zk-token-sdk/src/instruction/transfer/without_fee.rs @@ -5,9 +5,12 @@ use { elgamal::{ElGamalCiphertext, ElGamalKeypair, ElGamalPubkey, ElGamalSecretKey}, pedersen::{Pedersen, PedersenCommitment, PedersenOpening}, }, - errors::ProofError, - instruction::transfer::{ - combine_lo_hi_ciphertexts, encryption::TransferAmountCiphertext, split_u64, Role, + errors::{ProofGenerationError, ProofVerificationError}, + instruction::{ + errors::InstructionError, + transfer::{ + combine_lo_hi_ciphertexts, encryption::TransferAmountCiphertext, split_u64, Role, + }, }, range_proof::RangeProof, sigma_proofs::{ @@ -91,7 +94,7 @@ impl TransferData { (spendable_balance, ciphertext_old_source): (u64, &ElGamalCiphertext), source_keypair: &ElGamalKeypair, (destination_pubkey, auditor_pubkey): (&ElGamalPubkey, &ElGamalPubkey), - ) -> Result { + ) -> Result { // split and encrypt transfer amount let (amount_lo, amount_hi) = split_u64(transfer_amount, TRANSFER_AMOUNT_LO_BITS); @@ -112,7 +115,7 @@ impl TransferData { // subtract transfer amount from the spendable ciphertext let new_spendable_balance = spendable_balance .checked_sub(transfer_amount) - .ok_or(ProofError::Generation)?; + .ok_or(ProofGenerationError::NotEnoughFunds)?; let transfer_amount_lo_source = ElGamalCiphertext { commitment: *ciphertext_lo.get_commitment(), @@ -157,14 +160,18 @@ impl TransferData { &opening_hi, (new_spendable_balance, &new_source_ciphertext), &mut transcript, - ); + )?; Ok(Self { context, proof }) } /// Extracts the lo ciphertexts associated with a transfer data - fn ciphertext_lo(&self, role: Role) -> Result { - let ciphertext_lo: TransferAmountCiphertext = self.context.ciphertext_lo.try_into()?; + fn ciphertext_lo(&self, role: Role) -> Result { + let ciphertext_lo: TransferAmountCiphertext = self + .context + .ciphertext_lo + .try_into() + .map_err(|_| InstructionError::Decryption)?; let handle_lo = match role { Role::Source => Some(ciphertext_lo.get_source_handle()), @@ -179,13 +186,17 @@ impl TransferData { handle: *handle, }) } else { - Err(ProofError::MissingCiphertext) + Err(InstructionError::MissingCiphertext) } } /// Extracts the lo ciphertexts associated with a transfer data - fn ciphertext_hi(&self, role: Role) -> Result { - let ciphertext_hi: TransferAmountCiphertext = self.context.ciphertext_hi.try_into()?; + fn ciphertext_hi(&self, role: Role) -> Result { + let ciphertext_hi: TransferAmountCiphertext = self + .context + .ciphertext_hi + .try_into() + .map_err(|_| InstructionError::Decryption)?; let handle_hi = match role { Role::Source => Some(ciphertext_hi.get_source_handle()), @@ -200,12 +211,16 @@ impl TransferData { handle: *handle, }) } else { - Err(ProofError::MissingCiphertext) + Err(InstructionError::MissingCiphertext) } } /// Decrypts transfer amount from transfer data - pub fn decrypt_amount(&self, role: Role, sk: &ElGamalSecretKey) -> Result { + pub fn decrypt_amount( + &self, + role: Role, + sk: &ElGamalSecretKey, + ) -> Result { let ciphertext_lo = self.ciphertext_lo(role)?; let ciphertext_hi = self.ciphertext_hi(role)?; @@ -216,7 +231,7 @@ impl TransferData { let two_power = 1 << TRANSFER_AMOUNT_LO_BITS; Ok(amount_lo + two_power * amount_hi) } else { - Err(ProofError::Decryption) + Err(InstructionError::Decryption) } } } @@ -229,7 +244,7 @@ impl ZkProofData for TransferData { } #[cfg(not(target_os = "solana"))] - fn verify_proof(&self) -> Result<(), ProofError> { + fn verify_proof(&self) -> Result<(), ProofVerificationError> { // generate transcript and append all public inputs let mut transcript = self.context.new_transcript(); @@ -297,7 +312,7 @@ impl TransferProof { opening_hi: &PedersenOpening, (source_new_balance, new_source_ciphertext): (u64, &ElGamalCiphertext), transcript: &mut Transcript, - ) -> Self { + ) -> Result { // generate a Pedersen commitment for the remaining balance in source let (new_source_commitment, source_opening) = Pedersen::new(source_new_balance); @@ -354,14 +369,16 @@ impl TransferProof { vec![&source_opening, opening_lo, &opening_lo_negated, opening_hi], transcript, ) - }; + }?; - Self { + Ok(Self { new_source_commitment: pod_new_source_commitment, equality_proof: equality_proof.into(), validity_proof: validity_proof.into(), - range_proof: range_proof.try_into().expect("range proof: length error"), - } + range_proof: range_proof + .try_into() + .map_err(|_| ProofGenerationError::ProofLength)?, + }) } pub fn verify( @@ -373,7 +390,7 @@ impl TransferProof { ciphertext_hi: &TransferAmountCiphertext, ciphertext_new_spendable: &ElGamalCiphertext, transcript: &mut Transcript, - ) -> Result<(), ProofError> { + ) -> Result<(), ProofVerificationError> { transcript.append_commitment(b"commitment-new-source", &self.new_source_commitment); let commitment: PedersenCommitment = self.new_source_commitment.try_into()?; diff --git a/zk-token-sdk/src/instruction/withdraw.rs b/zk-token-sdk/src/instruction/withdraw.rs index 6a493d541fbc54..3dee9ffc6106f4 100644 --- a/zk-token-sdk/src/instruction/withdraw.rs +++ b/zk-token-sdk/src/instruction/withdraw.rs @@ -5,7 +5,7 @@ use { elgamal::{ElGamal, ElGamalCiphertext, ElGamalKeypair, ElGamalPubkey}, pedersen::{Pedersen, PedersenCommitment}, }, - errors::ProofError, + errors::{ProofGenerationError, ProofVerificationError}, range_proof::RangeProof, sigma_proofs::ciphertext_commitment_equality_proof::CiphertextCommitmentEqualityProof, transcript::TranscriptProtocol, @@ -57,13 +57,13 @@ impl WithdrawData { keypair: &ElGamalKeypair, current_balance: u64, current_ciphertext: &ElGamalCiphertext, - ) -> Result { + ) -> Result { // subtract withdraw amount from current balance // // errors if current_balance < amount let final_balance = current_balance .checked_sub(amount) - .ok_or(ProofError::Generation)?; + .ok_or(ProofGenerationError::NotEnoughFunds)?; // encode withdraw amount as an ElGamal ciphertext and subtract it from // current source balance @@ -78,7 +78,7 @@ impl WithdrawData { }; let mut transcript = context.new_transcript(); - let proof = WithdrawProof::new(keypair, final_balance, &final_ciphertext, &mut transcript); + let proof = WithdrawProof::new(keypair, final_balance, &final_ciphertext, &mut transcript)?; Ok(Self { context, proof }) } @@ -92,7 +92,7 @@ impl ZkProofData for WithdrawData { } #[cfg(not(target_os = "solana"))] - fn verify_proof(&self) -> Result<(), ProofError> { + fn verify_proof(&self) -> Result<(), ProofVerificationError> { let mut transcript = self.context.new_transcript(); let elgamal_pubkey = self.context.pubkey.try_into()?; @@ -140,7 +140,7 @@ impl WithdrawProof { final_balance: u64, final_ciphertext: &ElGamalCiphertext, transcript: &mut Transcript, - ) -> Self { + ) -> Result { // generate a Pedersen commitment for `final_balance` let (commitment, opening) = Pedersen::new(final_balance); let pod_commitment: pod::PedersenCommitment = commitment.into(); @@ -157,13 +157,15 @@ impl WithdrawProof { ); let range_proof = - RangeProof::new(vec![final_balance], vec![64], vec![&opening], transcript); + RangeProof::new(vec![final_balance], vec![64], vec![&opening], transcript)?; - Self { + Ok(Self { commitment: pod_commitment, - equality_proof: equality_proof.try_into().expect("equality proof"), - range_proof: range_proof.try_into().expect("range proof"), - } + equality_proof: equality_proof.into(), + range_proof: range_proof + .try_into() + .map_err(|_| ProofGenerationError::ProofLength)?, + }) } pub fn verify( @@ -171,7 +173,7 @@ impl WithdrawProof { pubkey: &ElGamalPubkey, final_ciphertext: &ElGamalCiphertext, transcript: &mut Transcript, - ) -> Result<(), ProofError> { + ) -> Result<(), ProofVerificationError> { transcript.append_commitment(b"commitment", &self.commitment); let commitment: PedersenCommitment = self.commitment.try_into()?; diff --git a/zk-token-sdk/src/instruction/zero_balance.rs b/zk-token-sdk/src/instruction/zero_balance.rs index e506e51617ee34..d5a51bb3aa8ea0 100644 --- a/zk-token-sdk/src/instruction/zero_balance.rs +++ b/zk-token-sdk/src/instruction/zero_balance.rs @@ -8,7 +8,7 @@ use { crate::{ encryption::elgamal::{ElGamalCiphertext, ElGamalKeypair}, - errors::ProofError, + errors::{ProofGenerationError, ProofVerificationError}, sigma_proofs::zero_balance_proof::ZeroBalanceProof, transcript::TranscriptProtocol, }, @@ -53,7 +53,7 @@ impl ZeroBalanceProofData { pub fn new( keypair: &ElGamalKeypair, ciphertext: &ElGamalCiphertext, - ) -> Result { + ) -> Result { let pod_pubkey = pod::ElGamalPubkey(keypair.pubkey().to_bytes()); let pod_ciphertext = pod::ElGamalCiphertext(ciphertext.to_bytes()); @@ -77,7 +77,7 @@ impl ZkProofData for ZeroBalanceProofData { } #[cfg(not(target_os = "solana"))] - fn verify_proof(&self) -> Result<(), ProofError> { + fn verify_proof(&self) -> Result<(), ProofVerificationError> { let mut transcript = self.context.new_transcript(); let pubkey = self.context.pubkey.try_into()?; let ciphertext = self.context.ciphertext.try_into()?; diff --git a/zk-token-sdk/src/macros.rs b/zk-token-sdk/src/macros.rs index cf7a3836e61d79..6351ba94224adb 100644 --- a/zk-token-sdk/src/macros.rs +++ b/zk-token-sdk/src/macros.rs @@ -74,13 +74,3 @@ macro_rules! define_mul_variants { } }; } - -macro_rules! impl_from_transcript_error { - ($sigma_error_type:ty) => { - impl From for $sigma_error_type { - fn from(err: TranscriptError) -> Self { - ProofVerificationError::Transcript(err).into() - } - } - }; -} diff --git a/zk-token-sdk/src/range_proof/errors.rs b/zk-token-sdk/src/range_proof/errors.rs index f832365fffc7ea..f0c872f7aa3494 100644 --- a/zk-token-sdk/src/range_proof/errors.rs +++ b/zk-token-sdk/src/range_proof/errors.rs @@ -1,10 +1,44 @@ //! Errors related to proving and verifying range proofs. -use { - crate::errors::{ProofVerificationError, TranscriptError}, - thiserror::Error, -}; +use {crate::errors::TranscriptError, thiserror::Error}; #[derive(Error, Clone, Debug, Eq, PartialEq)] -#[error("range proof verification failed: {0}")] -pub struct RangeProofError(#[from] pub(crate) ProofVerificationError); -impl_from_transcript_error!(RangeProofError); +pub enum RangeProofGenerationError { + #[error("maximum generator length exceeded")] + MaximumGeneratorLengthExceeded, + #[error("amounts, commitments, openings, or bit lengths vectors have different lengths")] + VectorLengthMismatch, + #[error("invalid bit size")] + InvalidBitSize, + #[error("insufficient generators for the proof")] + GeneratorLengthMismatch, + #[error("inner product length mismatch")] + InnerProductLengthMismatch, +} + +#[derive(Error, Clone, Debug, Eq, PartialEq)] +pub enum RangeProofVerificationError { + #[error("required algebraic relation does not hold")] + AlgebraicRelation, + #[error("malformed proof")] + Deserialization, + #[error("multiscalar multiplication failed")] + MultiscalarMul, + #[error("transcript failed to produce a challenge")] + Transcript(#[from] TranscriptError), + #[error( + "attempted to verify range proof with a non-power-of-two bit size or bit size is too big" + )] + InvalidBitSize, + #[error("insufficient generators for the proof")] + InvalidGeneratorsLength, + #[error("maximum generator length exceeded")] + MaximumGeneratorLengthExceeded, + #[error("commitments and bit lengths vectors have different lengths")] + VectorLengthMismatch, +} + +#[derive(Error, Clone, Debug, Eq, PartialEq)] +pub enum RangeProofGeneratorError { + #[error("maximum generator length exceeded")] + MaximumGeneratorLengthExceeded, +} diff --git a/zk-token-sdk/src/range_proof/generators.rs b/zk-token-sdk/src/range_proof/generators.rs index bc0ce24fc857b1..a993d753dcad0c 100644 --- a/zk-token-sdk/src/range_proof/generators.rs +++ b/zk-token-sdk/src/range_proof/generators.rs @@ -1,4 +1,5 @@ use { + crate::range_proof::errors::RangeProofGeneratorError, curve25519_dalek::{ digest::{ExtendableOutput, Update, XofReader}, ristretto::RistrettoPoint, @@ -6,6 +7,9 @@ use { sha3::{Sha3XofReader, Shake256}, }; +#[cfg(not(target_os = "solana"))] +const MAX_GENERATOR_LENGTH: usize = u32::MAX as usize; + /// Generators for Pedersen vector commitments that are used for inner-product proofs. struct GeneratorsChain { reader: Sha3XofReader, @@ -67,37 +71,44 @@ pub struct BulletproofGens { } impl BulletproofGens { - pub fn new(gens_capacity: usize) -> Self { + pub fn new(gens_capacity: usize) -> Result { let mut gens = BulletproofGens { gens_capacity: 0, G_vec: Vec::new(), H_vec: Vec::new(), }; - gens.increase_capacity(gens_capacity); - gens + gens.increase_capacity(gens_capacity)?; + Ok(gens) } /// Increases the generators' capacity to the amount specified. /// If less than or equal to the current capacity, does nothing. - pub fn increase_capacity(&mut self, new_capacity: usize) { + pub fn increase_capacity( + &mut self, + new_capacity: usize, + ) -> Result<(), RangeProofGeneratorError> { if self.gens_capacity >= new_capacity { - return; + return Ok(()); + } + + if new_capacity > MAX_GENERATOR_LENGTH { + return Err(RangeProofGeneratorError::MaximumGeneratorLengthExceeded); } - let label = [b'G']; self.G_vec.extend( - &mut GeneratorsChain::new(&[label, [b'G']].concat()) + &mut GeneratorsChain::new(&[b'G']) .fast_forward(self.gens_capacity) .take(new_capacity - self.gens_capacity), ); self.H_vec.extend( - &mut GeneratorsChain::new(&[label, [b'H']].concat()) + &mut GeneratorsChain::new(&[b'H']) .fast_forward(self.gens_capacity) .take(new_capacity - self.gens_capacity), ); self.gens_capacity = new_capacity; + Ok(()) } #[allow(non_snake_case)] diff --git a/zk-token-sdk/src/range_proof/inner_product.rs b/zk-token-sdk/src/range_proof/inner_product.rs index 2693e455a8f4d7..44e8e0674a3d6a 100644 --- a/zk-token-sdk/src/range_proof/inner_product.rs +++ b/zk-token-sdk/src/range_proof/inner_product.rs @@ -1,7 +1,9 @@ use { crate::{ - errors::ProofVerificationError, - range_proof::{errors::RangeProofError, util}, + range_proof::{ + errors::{RangeProofGenerationError, RangeProofVerificationError}, + util, + }, transcript::TranscriptProtocol, }, core::iter, @@ -46,7 +48,7 @@ impl InnerProductProof { mut a_vec: Vec, mut b_vec: Vec, transcript: &mut Transcript, - ) -> Self { + ) -> Result { // Create slices G, H, a, b backed by their respective // vectors. This lets us reslice as we compress the lengths // of the vectors in the main loop below. @@ -58,15 +60,20 @@ impl InnerProductProof { let mut n = G.len(); // All of the input vectors must have the same length. - assert_eq!(G.len(), n); - assert_eq!(H.len(), n); - assert_eq!(a.len(), n); - assert_eq!(b.len(), n); - assert_eq!(G_factors.len(), n); - assert_eq!(H_factors.len(), n); + if G.len() != n + || H.len() != n + || a.len() != n + || b.len() != n + || G_factors.len() != n + || H_factors.len() != n + { + return Err(RangeProofGenerationError::GeneratorLengthMismatch); + } // All of the input vectors must have a length that is a power of two. - assert!(n.is_power_of_two()); + if !n.is_power_of_two() { + return Err(RangeProofGenerationError::InvalidBitSize); + } transcript.innerproduct_domain_separator(n as u64); @@ -77,18 +84,21 @@ impl InnerProductProof { // If it's the first iteration, unroll the Hprime = H*y_inv scalar mults // into multiscalar muls, for performance. if n != 1 { - n /= 2; + n = n.checked_div(2).unwrap(); let (a_L, a_R) = a.split_at_mut(n); let (b_L, b_R) = b.split_at_mut(n); let (G_L, G_R) = G.split_at_mut(n); let (H_L, H_R) = H.split_at_mut(n); - let c_L = util::inner_product(a_L, b_R); - let c_R = util::inner_product(a_R, b_L); + let c_L = util::inner_product(a_L, b_R) + .ok_or(RangeProofGenerationError::InnerProductLengthMismatch)?; + let c_R = util::inner_product(a_R, b_L) + .ok_or(RangeProofGenerationError::InnerProductLengthMismatch)?; let L = RistrettoPoint::multiscalar_mul( a_L.iter() - .zip(G_factors[n..2 * n].iter()) + // `n` was previously divided in half and therefore, it cannot overflow. + .zip(G_factors[n..n.checked_mul(2).unwrap()].iter()) .map(|(a_L_i, g)| a_L_i * g) .chain( b_R.iter() @@ -106,7 +116,7 @@ impl InnerProductProof { .map(|(a_R_i, g)| a_R_i * g) .chain( b_L.iter() - .zip(H_factors[n..2 * n].iter()) + .zip(H_factors[n..n.checked_mul(2).unwrap()].iter()) .map(|(b_L_i, h)| b_L_i * h), ) .chain(iter::once(c_R)), @@ -127,11 +137,17 @@ impl InnerProductProof { a_L[i] = a_L[i] * u + u_inv * a_R[i]; b_L[i] = b_L[i] * u_inv + u * b_R[i]; G_L[i] = RistrettoPoint::multiscalar_mul( - &[u_inv * G_factors[i], u * G_factors[n + i]], + &[ + u_inv * G_factors[i], + u * G_factors[n.checked_add(i).unwrap()], + ], &[G_L[i], G_R[i]], ); H_L[i] = RistrettoPoint::multiscalar_mul( - &[u * H_factors[i], u_inv * H_factors[n + i]], + &[ + u * H_factors[i], + u_inv * H_factors[n.checked_add(i).unwrap()], + ], &[H_L[i], H_R[i]], ) } @@ -143,14 +159,16 @@ impl InnerProductProof { } while n != 1 { - n /= 2; + n = n.checked_div(2).unwrap(); let (a_L, a_R) = a.split_at_mut(n); let (b_L, b_R) = b.split_at_mut(n); let (G_L, G_R) = G.split_at_mut(n); let (H_L, H_R) = H.split_at_mut(n); - let c_L = util::inner_product(a_L, b_R); - let c_R = util::inner_product(a_R, b_L); + let c_L = util::inner_product(a_L, b_R) + .ok_or(RangeProofGenerationError::InnerProductLengthMismatch)?; + let c_R = util::inner_product(a_R, b_L) + .ok_or(RangeProofGenerationError::InnerProductLengthMismatch)?; let L = RistrettoPoint::multiscalar_mul( a_L.iter().chain(b_R.iter()).chain(iter::once(&c_L)), @@ -186,12 +204,12 @@ impl InnerProductProof { H = H_L; } - InnerProductProof { + Ok(InnerProductProof { L_vec, R_vec, a: a[0], b: b[0], - } + }) } /// Computes three vectors of verification scalars \\([u\_{i}^{2}]\\), \\([u\_{i}^{-2}]\\) and @@ -204,15 +222,15 @@ impl InnerProductProof { &self, n: usize, transcript: &mut Transcript, - ) -> Result<(Vec, Vec, Vec), RangeProofError> { + ) -> Result<(Vec, Vec, Vec), RangeProofVerificationError> { let lg_n = self.L_vec.len(); - if lg_n >= 32 { + if lg_n == 0 || lg_n >= 32 { // 4 billion multiplications should be enough for anyone // and this check prevents overflow in 1< Result<(), RangeProofError> + ) -> Result<(), RangeProofVerificationError> where IG: IntoIterator, IG::Item: Borrow, @@ -301,7 +322,7 @@ impl InnerProductProof { .iter() .map(|p| { p.decompress() - .ok_or(ProofVerificationError::Deserialization) + .ok_or(RangeProofVerificationError::Deserialization) }) .collect::, _>>()?; @@ -310,7 +331,7 @@ impl InnerProductProof { .iter() .map(|p| { p.decompress() - .ok_or(ProofVerificationError::Deserialization) + .ok_or(RangeProofVerificationError::Deserialization) }) .collect::, _>>()?; @@ -330,7 +351,7 @@ impl InnerProductProof { if expect_P == *P { Ok(()) } else { - Err(ProofVerificationError::AlgebraicRelation.into()) + Err(RangeProofVerificationError::AlgebraicRelation) } } @@ -364,21 +385,21 @@ impl InnerProductProof { /// * \\(n\\) is larger or equal to 32 (proof is too big), /// * any of \\(2n\\) points are not valid compressed Ristretto points, /// * any of 2 scalars are not canonical scalars modulo Ristretto group order. - pub fn from_bytes(slice: &[u8]) -> Result { + pub fn from_bytes(slice: &[u8]) -> Result { let b = slice.len(); if b % 32 != 0 { - return Err(ProofVerificationError::Deserialization.into()); + return Err(RangeProofVerificationError::Deserialization); } let num_elements = b / 32; if num_elements < 2 { - return Err(ProofVerificationError::Deserialization.into()); + return Err(RangeProofVerificationError::Deserialization); } if (num_elements - 2) % 2 != 0 { - return Err(ProofVerificationError::Deserialization.into()); + return Err(RangeProofVerificationError::Deserialization); } let lg_n = (num_elements - 2) / 2; if lg_n >= 32 { - return Err(ProofVerificationError::Deserialization.into()); + return Err(RangeProofVerificationError::Deserialization); } let mut L_vec: Vec = Vec::with_capacity(lg_n); @@ -391,9 +412,9 @@ impl InnerProductProof { let pos = 2 * lg_n * 32; let a = Scalar::from_canonical_bytes(util::read32(&slice[pos..])) - .ok_or(ProofVerificationError::Deserialization)?; + .ok_or(RangeProofVerificationError::Deserialization)?; let b = Scalar::from_canonical_bytes(util::read32(&slice[pos + 32..])) - .ok_or(ProofVerificationError::Deserialization)?; + .ok_or(RangeProofVerificationError::Deserialization)?; Ok(InnerProductProof { L_vec, R_vec, a, b }) } @@ -411,7 +432,7 @@ mod tests { fn test_basic_correctness() { let n = 32; - let bp_gens = BulletproofGens::new(n); + let bp_gens = BulletproofGens::new(n).unwrap(); let G: Vec = bp_gens.G(n).cloned().collect(); let H: Vec = bp_gens.H(n).cloned().collect(); @@ -419,7 +440,7 @@ mod tests { let a: Vec<_> = (0..n).map(|_| Scalar::random(&mut OsRng)).collect(); let b: Vec<_> = (0..n).map(|_| Scalar::random(&mut OsRng)).collect(); - let c = util::inner_product(&a, &b); + let c = util::inner_product(&a, &b).unwrap(); let G_factors: Vec = iter::repeat(Scalar::one()).take(n).collect(); @@ -452,7 +473,8 @@ mod tests { a.clone(), b.clone(), &mut prover_transcript, - ); + ) + .unwrap(); assert!(proof .verify( diff --git a/zk-token-sdk/src/range_proof/mod.rs b/zk-token-sdk/src/range_proof/mod.rs index 769df8f4628112..6658c350495473 100644 --- a/zk-token-sdk/src/range_proof/mod.rs +++ b/zk-token-sdk/src/range_proof/mod.rs @@ -20,9 +20,10 @@ use { use { crate::{ encryption::pedersen::{G, H}, - errors::ProofVerificationError, range_proof::{ - errors::RangeProofError, generators::BulletproofGens, inner_product::InnerProductProof, + errors::{RangeProofGenerationError, RangeProofVerificationError}, + generators::BulletproofGens, + inner_product::InnerProductProof, }, transcript::TranscriptProtocol, }, @@ -71,17 +72,29 @@ impl RangeProof { bit_lengths: Vec, openings: Vec<&PedersenOpening>, transcript: &mut Transcript, - ) -> Self { + ) -> Result { // amounts, bit-lengths, openings must be same length vectors let m = amounts.len(); - assert_eq!(bit_lengths.len(), m); - assert_eq!(openings.len(), m); + if bit_lengths.len() != m || openings.len() != m { + return Err(RangeProofGenerationError::VectorLengthMismatch); + } + + // each bit length must be greater than 0 for the proof to make sense + if bit_lengths + .iter() + .any(|bit_length| *bit_length == 0 || *bit_length > u64::BITS as usize) + { + return Err(RangeProofGenerationError::InvalidBitSize); + } // total vector dimension to compute the ultimate inner product proof for let nm: usize = bit_lengths.iter().sum(); - assert!(nm.is_power_of_two()); + if !nm.is_power_of_two() { + return Err(RangeProofGenerationError::VectorLengthMismatch); + } - let bp_gens = BulletproofGens::new(nm); + let bp_gens = BulletproofGens::new(nm) + .map_err(|_| RangeProofGenerationError::MaximumGeneratorLengthExceeded)?; // bit-decompose values and generate their Pedersen vector commitment let a_blinding = Scalar::random(&mut OsRng); @@ -91,7 +104,10 @@ impl RangeProof { for (amount_i, n_i) in amounts.iter().zip(bit_lengths.iter()) { for j in 0..(*n_i) { let (G_ij, H_ij) = gens_iter.next().unwrap(); - let v_ij = Choice::from(((amount_i >> j) & 1) as u8); + + // `j` is guaranteed to be at most `u64::BITS` (a 6-bit number) and therefore, + // casting is lossless and right shift can be safely unwrapped + let v_ij = Choice::from((amount_i.checked_shr(j as u32).unwrap() & 1) as u8); let mut point = -H_ij; point.conditional_assign(G_ij, v_ij); A += point; @@ -136,7 +152,9 @@ impl RangeProof { let mut exp_2 = Scalar::one(); for j in 0..(*n_i) { - let a_L_j = Scalar::from((amount_i >> j) & 1); + // `j` is guaranteed to be at most `u64::BITS` (a 6-bit number) and therefore, + // casting is lossless and right shift can be safely unwrapped + let a_L_j = Scalar::from(amount_i.checked_shr(j as u32).unwrap() & 1); let a_R_j = a_L_j - Scalar::one(); l_poly.0[i] = a_L_j - z; @@ -146,13 +164,17 @@ impl RangeProof { exp_y *= y; exp_2 = exp_2 + exp_2; - i += 1; + + // `i` is capped by the sum of vectors in `bit_lengths` + i = i.checked_add(1).unwrap(); } exp_z *= z; } // define t(x) = = t_0 + t_1*x + t_2*x - let t_poly = l_poly.inner_product(&r_poly); + let t_poly = l_poly + .inner_product(&r_poly) + .ok_or(RangeProofGenerationError::InnerProductLengthMismatch)?; // generate Pedersen commitment for the coefficients t_1 and t_2 let (T_1, t_1_blinding) = Pedersen::new(t_poly.1); @@ -214,9 +236,9 @@ impl RangeProof { l_vec, r_vec, transcript, - ); + )?; - RangeProof { + Ok(RangeProof { A, S, T_1, @@ -225,7 +247,7 @@ impl RangeProof { t_x_blinding, e_blinding, ipp_proof, - } + }) } #[allow(clippy::many_single_char_names)] @@ -234,16 +256,19 @@ impl RangeProof { comms: Vec<&PedersenCommitment>, bit_lengths: Vec, transcript: &mut Transcript, - ) -> Result<(), RangeProofError> { + ) -> Result<(), RangeProofVerificationError> { // commitments and bit-lengths must be same length vectors - assert_eq!(comms.len(), bit_lengths.len()); + if comms.len() != bit_lengths.len() { + return Err(RangeProofVerificationError::VectorLengthMismatch); + } let m = bit_lengths.len(); let nm: usize = bit_lengths.iter().sum(); - let bp_gens = BulletproofGens::new(nm); + let bp_gens = BulletproofGens::new(nm) + .map_err(|_| RangeProofVerificationError::MaximumGeneratorLengthExceeded)?; if !nm.is_power_of_two() { - return Err(ProofVerificationError::InvalidBitSize.into()); + return Err(RangeProofVerificationError::InvalidBitSize); } // append proof data to transcript and derive appropriate challenge scalars @@ -320,12 +345,12 @@ impl RangeProof { .chain(bp_gens.H(nm).map(|&x| Some(x))) .chain(comms.iter().map(|V| Some(*V.get_point()))), ) - .ok_or(ProofVerificationError::MultiscalarMul)?; + .ok_or(RangeProofVerificationError::MultiscalarMul)?; if mega_check.is_identity() { Ok(()) } else { - Err(ProofVerificationError::AlgebraicRelation.into()) + Err(RangeProofVerificationError::AlgebraicRelation) } } @@ -346,12 +371,12 @@ impl RangeProof { // Following the dalek rangeproof library signature for now. The exact method signature can be // changed. - pub fn from_bytes(slice: &[u8]) -> Result { + pub fn from_bytes(slice: &[u8]) -> Result { if slice.len() % 32 != 0 { - return Err(ProofVerificationError::Deserialization.into()); + return Err(RangeProofVerificationError::Deserialization); } if slice.len() < 7 * 32 { - return Err(ProofVerificationError::Deserialization.into()); + return Err(RangeProofVerificationError::Deserialization); } let A = CompressedRistretto(util::read32(&slice[0..])); @@ -360,11 +385,11 @@ impl RangeProof { let T_2 = CompressedRistretto(util::read32(&slice[3 * 32..])); let t_x = Scalar::from_canonical_bytes(util::read32(&slice[4 * 32..])) - .ok_or(ProofVerificationError::Deserialization)?; + .ok_or(RangeProofVerificationError::Deserialization)?; let t_x_blinding = Scalar::from_canonical_bytes(util::read32(&slice[5 * 32..])) - .ok_or(ProofVerificationError::Deserialization)?; + .ok_or(RangeProofVerificationError::Deserialization)?; let e_blinding = Scalar::from_canonical_bytes(util::read32(&slice[6 * 32..])) - .ok_or(ProofVerificationError::Deserialization)?; + .ok_or(RangeProofVerificationError::Deserialization)?; let ipp_proof = InnerProductProof::from_bytes(&slice[7 * 32..])?; @@ -410,7 +435,8 @@ mod tests { let mut transcript_create = Transcript::new(b"Test"); let mut transcript_verify = Transcript::new(b"Test"); - let proof = RangeProof::new(vec![55], vec![32], vec![&open], &mut transcript_create); + let proof = + RangeProof::new(vec![55], vec![32], vec![&open], &mut transcript_create).unwrap(); assert!(proof .verify(vec![&comm], vec![32], &mut transcript_verify) @@ -431,7 +457,8 @@ mod tests { vec![64, 32, 32], vec![&open_1, &open_2, &open_3], &mut transcript_create, - ); + ) + .unwrap(); assert!(proof .verify( diff --git a/zk-token-sdk/src/range_proof/util.rs b/zk-token-sdk/src/range_proof/util.rs index c551abd8f3a15c..4a76543d475bc0 100644 --- a/zk-token-sdk/src/range_proof/util.rs +++ b/zk-token-sdk/src/range_proof/util.rs @@ -11,20 +11,20 @@ impl VecPoly1 { VecPoly1(vec![Scalar::zero(); n], vec![Scalar::zero(); n]) } - pub fn inner_product(&self, rhs: &VecPoly1) -> Poly2 { + pub fn inner_product(&self, rhs: &VecPoly1) -> Option { // Uses Karatsuba's method let l = self; let r = rhs; - let t0 = inner_product(&l.0, &r.0); - let t2 = inner_product(&l.1, &r.1); + let t0 = inner_product(&l.0, &r.0)?; + let t2 = inner_product(&l.1, &r.1)?; let l0_plus_l1 = add_vec(&l.0, &l.1); let r0_plus_r1 = add_vec(&r.0, &r.1); - let t1 = inner_product(&l0_plus_l1, &r0_plus_r1) - t0 - t2; + let t1 = inner_product(&l0_plus_l1, &r0_plus_r1)? - t0 - t2; - Poly2(t0, t1, t2) + Some(Poly2(t0, t1, t2)) } pub fn eval(&self, x: Scalar) -> Vec { @@ -98,16 +98,16 @@ pub fn read32(data: &[u8]) -> [u8; 32] { /// \\[ /// {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} = \sum\_{i=0}^{n-1} a\_i \cdot b\_i. /// \\] -/// Panics if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal. -pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar { +/// Errors if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal. +pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Option { let mut out = Scalar::zero(); if a.len() != b.len() { - panic!("inner_product(a,b): lengths of vectors do not match"); + return None; } for i in 0..a.len() { out += a[i] * b[i]; } - out + Some(out) } /// Takes the sum of all the powers of `x`, up to `n` diff --git a/zk-token-sdk/src/sigma_proofs/batched_grouped_ciphertext_validity_proof.rs b/zk-token-sdk/src/sigma_proofs/batched_grouped_ciphertext_validity_proof.rs index 59b7aceca20978..7fbe17fb195ddd 100644 --- a/zk-token-sdk/src/sigma_proofs/batched_grouped_ciphertext_validity_proof.rs +++ b/zk-token-sdk/src/sigma_proofs/batched_grouped_ciphertext_validity_proof.rs @@ -16,7 +16,7 @@ use crate::encryption::{ use { crate::{ sigma_proofs::{ - errors::ValidityProofError, + errors::ValidityProofVerificationError, grouped_ciphertext_validity_proof::GroupedCiphertext2HandlesValidityProof, }, transcript::TranscriptProtocol, @@ -80,7 +80,7 @@ impl BatchedGroupedCiphertext2HandlesValidityProof { (destination_handle_lo, destination_handle_hi): (&DecryptHandle, &DecryptHandle), (auditor_handle_lo, auditor_handle_hi): (&DecryptHandle, &DecryptHandle), transcript: &mut Transcript, - ) -> Result<(), ValidityProofError> { + ) -> Result<(), ValidityProofVerificationError> { transcript.batched_grouped_ciphertext_validity_proof_domain_separator(); let t = transcript.challenge_scalar(b"t"); @@ -103,7 +103,7 @@ impl BatchedGroupedCiphertext2HandlesValidityProof { self.0.to_bytes() } - pub fn from_bytes(bytes: &[u8]) -> Result { + pub fn from_bytes(bytes: &[u8]) -> Result { GroupedCiphertext2HandlesValidityProof::from_bytes(bytes).map(Self) } } diff --git a/zk-token-sdk/src/sigma_proofs/ciphertext_ciphertext_equality_proof.rs b/zk-token-sdk/src/sigma_proofs/ciphertext_ciphertext_equality_proof.rs index d33587373c3072..70a5de9c4c5efb 100644 --- a/zk-token-sdk/src/sigma_proofs/ciphertext_ciphertext_equality_proof.rs +++ b/zk-token-sdk/src/sigma_proofs/ciphertext_ciphertext_equality_proof.rs @@ -10,7 +10,6 @@ use { elgamal::{ElGamalCiphertext, ElGamalKeypair, ElGamalPubkey}, pedersen::{PedersenOpening, G, H}, }, - errors::ProofVerificationError, sigma_proofs::{canonical_scalar_from_optional_slice, ristretto_point_from_optional_slice}, UNIT_LEN, }, @@ -19,7 +18,10 @@ use { zeroize::Zeroize, }; use { - crate::{sigma_proofs::errors::EqualityProofError, transcript::TranscriptProtocol}, + crate::{ + sigma_proofs::errors::{EqualityProofVerificationError, SigmaProofVerificationError}, + transcript::TranscriptProtocol, + }, curve25519_dalek::{ ristretto::{CompressedRistretto, RistrettoPoint}, scalar::Scalar, @@ -138,7 +140,7 @@ impl CiphertextCiphertextEqualityProof { source_ciphertext: &ElGamalCiphertext, destination_ciphertext: &ElGamalCiphertext, transcript: &mut Transcript, - ) -> Result<(), EqualityProofError> { + ) -> Result<(), EqualityProofVerificationError> { transcript.equality_proof_domain_separator(); // extract the relevant scalar and Ristretto points from the inputs @@ -169,19 +171,19 @@ impl CiphertextCiphertextEqualityProof { let Y_0 = self .Y_0 .decompress() - .ok_or(ProofVerificationError::Deserialization)?; + .ok_or(SigmaProofVerificationError::Deserialization)?; let Y_1 = self .Y_1 .decompress() - .ok_or(ProofVerificationError::Deserialization)?; + .ok_or(SigmaProofVerificationError::Deserialization)?; let Y_2 = self .Y_2 .decompress() - .ok_or(ProofVerificationError::Deserialization)?; + .ok_or(SigmaProofVerificationError::Deserialization)?; let Y_3 = self .Y_3 .decompress() - .ok_or(ProofVerificationError::Deserialization)?; + .ok_or(SigmaProofVerificationError::Deserialization)?; let check = RistrettoPoint::vartime_multiscalar_mul( vec![ @@ -221,7 +223,7 @@ impl CiphertextCiphertextEqualityProof { if check.is_identity() { Ok(()) } else { - Err(ProofVerificationError::AlgebraicRelation.into()) + Err(SigmaProofVerificationError::AlgebraicRelation.into()) } } @@ -240,7 +242,7 @@ impl CiphertextCiphertextEqualityProof { buf } - pub fn from_bytes(bytes: &[u8]) -> Result { + pub fn from_bytes(bytes: &[u8]) -> Result { let mut chunks = bytes.chunks(UNIT_LEN); let Y_0 = ristretto_point_from_optional_slice(chunks.next())?; diff --git a/zk-token-sdk/src/sigma_proofs/ciphertext_commitment_equality_proof.rs b/zk-token-sdk/src/sigma_proofs/ciphertext_commitment_equality_proof.rs index 2217246ea36061..6451d46d1916f2 100644 --- a/zk-token-sdk/src/sigma_proofs/ciphertext_commitment_equality_proof.rs +++ b/zk-token-sdk/src/sigma_proofs/ciphertext_commitment_equality_proof.rs @@ -15,7 +15,6 @@ use { elgamal::{ElGamalCiphertext, ElGamalKeypair, ElGamalPubkey}, pedersen::{PedersenCommitment, PedersenOpening, G, H}, }, - errors::ProofVerificationError, sigma_proofs::{canonical_scalar_from_optional_slice, ristretto_point_from_optional_slice}, UNIT_LEN, }, @@ -24,7 +23,10 @@ use { zeroize::Zeroize, }; use { - crate::{sigma_proofs::errors::EqualityProofError, transcript::TranscriptProtocol}, + crate::{ + sigma_proofs::errors::{EqualityProofVerificationError, SigmaProofVerificationError}, + transcript::TranscriptProtocol, + }, curve25519_dalek::{ ristretto::{CompressedRistretto, RistrettoPoint}, scalar::Scalar, @@ -136,7 +138,7 @@ impl CiphertextCommitmentEqualityProof { source_ciphertext: &ElGamalCiphertext, destination_commitment: &PedersenCommitment, transcript: &mut Transcript, - ) -> Result<(), EqualityProofError> { + ) -> Result<(), EqualityProofVerificationError> { transcript.equality_proof_domain_separator(); // extract the relevant scalar and Ristretto points from the inputs @@ -161,15 +163,15 @@ impl CiphertextCommitmentEqualityProof { let Y_0 = self .Y_0 .decompress() - .ok_or(ProofVerificationError::Deserialization)?; + .ok_or(SigmaProofVerificationError::Deserialization)?; let Y_1 = self .Y_1 .decompress() - .ok_or(ProofVerificationError::Deserialization)?; + .ok_or(SigmaProofVerificationError::Deserialization)?; let Y_2 = self .Y_2 .decompress() - .ok_or(ProofVerificationError::Deserialization)?; + .ok_or(SigmaProofVerificationError::Deserialization)?; let check = RistrettoPoint::vartime_multiscalar_mul( vec![ @@ -203,7 +205,7 @@ impl CiphertextCommitmentEqualityProof { if check.is_identity() { Ok(()) } else { - Err(ProofVerificationError::AlgebraicRelation.into()) + Err(SigmaProofVerificationError::AlgebraicRelation.into()) } } @@ -219,7 +221,7 @@ impl CiphertextCommitmentEqualityProof { buf } - pub fn from_bytes(bytes: &[u8]) -> Result { + pub fn from_bytes(bytes: &[u8]) -> Result { let mut chunks = bytes.chunks(UNIT_LEN); let Y_0 = ristretto_point_from_optional_slice(chunks.next())?; let Y_1 = ristretto_point_from_optional_slice(chunks.next())?; diff --git a/zk-token-sdk/src/sigma_proofs/errors.rs b/zk-token-sdk/src/sigma_proofs/errors.rs index 117d5c44d382ee..1ce28b01bf85a8 100644 --- a/zk-token-sdk/src/sigma_proofs/errors.rs +++ b/zk-token-sdk/src/sigma_proofs/errors.rs @@ -1,30 +1,49 @@ //! Errors related to proving and verifying sigma proofs. -use { - crate::errors::{ProofVerificationError, TranscriptError}, - thiserror::Error, -}; +use {crate::errors::TranscriptError, thiserror::Error}; + +#[derive(Error, Clone, Debug, Eq, PartialEq)] +pub enum SigmaProofVerificationError { + #[error("required algebraic relation does not hold")] + AlgebraicRelation, + #[error("malformed proof")] + Deserialization, + #[error("multiscalar multiplication failed")] + MultiscalarMul, + #[error("transcript failed to produce a challenge")] + Transcript(#[from] TranscriptError), +} + +macro_rules! impl_from_transcript_error { + ($sigma_error_type:ty) => { + impl From for $sigma_error_type { + fn from(err: TranscriptError) -> Self { + SigmaProofVerificationError::Transcript(err).into() + } + } + }; +} #[derive(Error, Clone, Debug, Eq, PartialEq)] #[error("equality proof verification failed: {0}")] -pub struct EqualityProofError(#[from] pub(crate) ProofVerificationError); -impl_from_transcript_error!(EqualityProofError); +pub struct EqualityProofVerificationError(#[from] pub(crate) SigmaProofVerificationError); +impl_from_transcript_error!(EqualityProofVerificationError); #[derive(Error, Clone, Debug, Eq, PartialEq)] #[error("validity proof verification failed: {0}")] -pub struct ValidityProofError(#[from] pub(crate) ProofVerificationError); -impl_from_transcript_error!(ValidityProofError); +pub struct ValidityProofVerificationError(#[from] pub(crate) SigmaProofVerificationError); +impl_from_transcript_error!(ValidityProofVerificationError); #[derive(Error, Clone, Debug, Eq, PartialEq)] #[error("zero-balance proof verification failed: {0}")] -pub struct ZeroBalanceProofError(#[from] pub(crate) ProofVerificationError); -impl_from_transcript_error!(ZeroBalanceProofError); +pub struct ZeroBalanceProofVerificationError(#[from] pub(crate) SigmaProofVerificationError); +impl_from_transcript_error!(ZeroBalanceProofVerificationError); #[derive(Error, Clone, Debug, Eq, PartialEq)] #[error("fee sigma proof verification failed: {0}")] -pub struct FeeSigmaProofError(#[from] pub(crate) ProofVerificationError); -impl_from_transcript_error!(FeeSigmaProofError); +pub struct FeeSigmaProofVerificationError(#[from] pub(crate) SigmaProofVerificationError); +impl_from_transcript_error!(FeeSigmaProofVerificationError); #[derive(Error, Clone, Debug, Eq, PartialEq)] #[error("public key validity proof verification failed: {0}")] -pub struct PubkeyValidityProofError(#[from] pub(crate) ProofVerificationError); -impl_from_transcript_error!(PubkeyValidityProofError); +pub struct PubkeyValidityProofVerificationError(#[from] pub(crate) SigmaProofVerificationError); +impl_from_transcript_error!(PubkeyValidityProofVerificationError); diff --git a/zk-token-sdk/src/sigma_proofs/fee_proof.rs b/zk-token-sdk/src/sigma_proofs/fee_proof.rs index a811758be51049..c3a431768f1226 100644 --- a/zk-token-sdk/src/sigma_proofs/fee_proof.rs +++ b/zk-token-sdk/src/sigma_proofs/fee_proof.rs @@ -8,7 +8,7 @@ //! The protocol guarantees computational soundness (by the hardness of discrete log) and perfect //! zero-knowledge in the random oracle model. //! -//! [`ZK Token proof program`]: https://edge.docs.solana.com/developing/runtime-facilities/zk-token-proof +//! [`ZK Token proof program`]: https://docs.solanalabs.com/runtime/zk-token-proof #[cfg(not(target_os = "solana"))] use { @@ -21,7 +21,7 @@ use { }; use { crate::{ - errors::ProofVerificationError, sigma_proofs::errors::FeeSigmaProofError, + sigma_proofs::errors::{FeeSigmaProofVerificationError, SigmaProofVerificationError}, transcript::TranscriptProtocol, }, curve25519_dalek::{ @@ -313,7 +313,7 @@ impl FeeSigmaProof { claimed_commitment: &PedersenCommitment, max_fee: u64, transcript: &mut Transcript, - ) -> Result<(), FeeSigmaProofError> { + ) -> Result<(), FeeSigmaProofVerificationError> { // extract the relevant scalar and Ristretto points from the input let m = Scalar::from(max_fee); @@ -329,19 +329,19 @@ impl FeeSigmaProof { .fee_max_proof .Y_max_proof .decompress() - .ok_or(ProofVerificationError::Deserialization)?; + .ok_or(SigmaProofVerificationError::Deserialization)?; let z_max = self.fee_max_proof.z_max_proof; let Y_delta_real = self .fee_equality_proof .Y_delta .decompress() - .ok_or(ProofVerificationError::Deserialization)?; + .ok_or(SigmaProofVerificationError::Deserialization)?; let Y_claimed = self .fee_equality_proof .Y_claimed .decompress() - .ok_or(ProofVerificationError::Deserialization)?; + .ok_or(SigmaProofVerificationError::Deserialization)?; let z_x = self.fee_equality_proof.z_x; let z_delta_real = self.fee_equality_proof.z_delta; let z_claimed = self.fee_equality_proof.z_claimed; @@ -387,7 +387,7 @@ impl FeeSigmaProof { if check.is_identity() { Ok(()) } else { - Err(ProofVerificationError::AlgebraicRelation.into()) + Err(SigmaProofVerificationError::AlgebraicRelation.into()) } } @@ -429,7 +429,7 @@ impl FeeSigmaProof { buf } - pub fn from_bytes(bytes: &[u8]) -> Result { + pub fn from_bytes(bytes: &[u8]) -> Result { let mut chunks = bytes.chunks(UNIT_LEN); let Y_max_proof = ristretto_point_from_optional_slice(chunks.next())?; let z_max_proof = canonical_scalar_from_optional_slice(chunks.next())?; diff --git a/zk-token-sdk/src/sigma_proofs/grouped_ciphertext_validity_proof.rs b/zk-token-sdk/src/sigma_proofs/grouped_ciphertext_validity_proof.rs index 3812cce69eefab..9f1df14c316ef1 100644 --- a/zk-token-sdk/src/sigma_proofs/grouped_ciphertext_validity_proof.rs +++ b/zk-token-sdk/src/sigma_proofs/grouped_ciphertext_validity_proof.rs @@ -15,7 +15,6 @@ use { elgamal::{DecryptHandle, ElGamalPubkey}, pedersen::{PedersenCommitment, PedersenOpening, G, H}, }, - errors::ProofVerificationError, sigma_proofs::{canonical_scalar_from_optional_slice, ristretto_point_from_optional_slice}, UNIT_LEN, }, @@ -24,7 +23,10 @@ use { zeroize::Zeroize, }; use { - crate::{sigma_proofs::errors::ValidityProofError, transcript::TranscriptProtocol}, + crate::{ + sigma_proofs::errors::{SigmaProofVerificationError, ValidityProofVerificationError}, + transcript::TranscriptProtocol, + }, curve25519_dalek::{ ristretto::{CompressedRistretto, RistrettoPoint}, scalar::Scalar, @@ -128,7 +130,7 @@ impl GroupedCiphertext2HandlesValidityProof { (destination_pubkey, auditor_pubkey): (&ElGamalPubkey, &ElGamalPubkey), (destination_handle, auditor_handle): (&DecryptHandle, &DecryptHandle), transcript: &mut Transcript, - ) -> Result<(), ValidityProofError> { + ) -> Result<(), ValidityProofVerificationError> { transcript.grouped_ciphertext_validity_proof_domain_separator(); // include Y_0, Y_1, Y_2 to transcript and extract challenges @@ -148,15 +150,15 @@ impl GroupedCiphertext2HandlesValidityProof { let Y_0 = self .Y_0 .decompress() - .ok_or(ProofVerificationError::Deserialization)?; + .ok_or(SigmaProofVerificationError::Deserialization)?; let Y_1 = self .Y_1 .decompress() - .ok_or(ProofVerificationError::Deserialization)?; + .ok_or(SigmaProofVerificationError::Deserialization)?; let Y_2 = self .Y_2 .decompress() - .ok_or(ProofVerificationError::Deserialization)?; + .ok_or(SigmaProofVerificationError::Deserialization)?; let P_dest = destination_pubkey.get_point(); let P_auditor = auditor_pubkey.get_point(); @@ -195,7 +197,7 @@ impl GroupedCiphertext2HandlesValidityProof { if check.is_identity() { Ok(()) } else { - Err(ProofVerificationError::AlgebraicRelation.into()) + Err(SigmaProofVerificationError::AlgebraicRelation.into()) } } @@ -210,7 +212,7 @@ impl GroupedCiphertext2HandlesValidityProof { buf } - pub fn from_bytes(bytes: &[u8]) -> Result { + pub fn from_bytes(bytes: &[u8]) -> Result { let mut chunks = bytes.chunks(UNIT_LEN); let Y_0 = ristretto_point_from_optional_slice(chunks.next())?; let Y_1 = ristretto_point_from_optional_slice(chunks.next())?; diff --git a/zk-token-sdk/src/sigma_proofs/mod.rs b/zk-token-sdk/src/sigma_proofs/mod.rs index f2c68a1bde7391..ddaf712f7c1624 100644 --- a/zk-token-sdk/src/sigma_proofs/mod.rs +++ b/zk-token-sdk/src/sigma_proofs/mod.rs @@ -3,7 +3,7 @@ //! Formal documentation and security proofs for the sigma proofs in this module can be found in //! [`ZK Token proof`] program documentation. //! -//! [`ZK Token proof`]: https://edge.docs.solana.com/developing/runtime-facilities/zk-token-proof +//! [`ZK Token proof`]: https://docs.solanalabs.com/runtime/zk-token-proof pub mod batched_grouped_ciphertext_validity_proof; pub mod ciphertext_ciphertext_equality_proof; @@ -16,7 +16,7 @@ pub mod zero_balance_proof; #[cfg(not(target_os = "solana"))] use { - crate::{errors::ProofVerificationError, RISTRETTO_POINT_LEN, SCALAR_LEN}, + crate::{sigma_proofs::errors::SigmaProofVerificationError, RISTRETTO_POINT_LEN, SCALAR_LEN}, curve25519_dalek::{ristretto::CompressedRistretto, scalar::Scalar}, }; @@ -27,11 +27,11 @@ use { #[cfg(not(target_os = "solana"))] fn ristretto_point_from_optional_slice( optional_slice: Option<&[u8]>, -) -> Result { +) -> Result { optional_slice .and_then(|slice| (slice.len() == RISTRETTO_POINT_LEN).then_some(slice)) .map(CompressedRistretto::from_slice) - .ok_or(ProofVerificationError::Deserialization) + .ok_or(SigmaProofVerificationError::Deserialization) } /// Deserializes an optional slice of bytes to a scalar. @@ -41,10 +41,10 @@ fn ristretto_point_from_optional_slice( #[cfg(not(target_os = "solana"))] fn canonical_scalar_from_optional_slice( optional_slice: Option<&[u8]>, -) -> Result { +) -> Result { optional_slice .and_then(|slice| (slice.len() == SCALAR_LEN).then_some(slice)) // if chunk is the wrong length, convert to None .and_then(|slice| slice.try_into().ok()) // convert to array .and_then(Scalar::from_canonical_bytes) - .ok_or(ProofVerificationError::Deserialization) + .ok_or(SigmaProofVerificationError::Deserialization) } diff --git a/zk-token-sdk/src/sigma_proofs/pubkey_proof.rs b/zk-token-sdk/src/sigma_proofs/pubkey_proof.rs index 450b7bb3e2e1e7..e0d80f2a528ef8 100644 --- a/zk-token-sdk/src/sigma_proofs/pubkey_proof.rs +++ b/zk-token-sdk/src/sigma_proofs/pubkey_proof.rs @@ -18,7 +18,7 @@ use { }; use { crate::{ - errors::ProofVerificationError, sigma_proofs::errors::PubkeyValidityProofError, + sigma_proofs::errors::{PubkeyValidityProofVerificationError, SigmaProofVerificationError}, transcript::TranscriptProtocol, }, curve25519_dalek::{ @@ -92,7 +92,7 @@ impl PubkeyValidityProof { self, elgamal_pubkey: &ElGamalPubkey, transcript: &mut Transcript, - ) -> Result<(), PubkeyValidityProofError> { + ) -> Result<(), PubkeyValidityProofVerificationError> { transcript.pubkey_proof_domain_separator(); // extract the relvant scalar and Ristretto points from the input @@ -106,7 +106,7 @@ impl PubkeyValidityProof { let Y = self .Y .decompress() - .ok_or(ProofVerificationError::Deserialization)?; + .ok_or(SigmaProofVerificationError::Deserialization)?; let check = RistrettoPoint::vartime_multiscalar_mul( vec![&self.z, &(-&c), &(-&Scalar::one())], @@ -116,7 +116,7 @@ impl PubkeyValidityProof { if check.is_identity() { Ok(()) } else { - Err(ProofVerificationError::AlgebraicRelation.into()) + Err(SigmaProofVerificationError::AlgebraicRelation.into()) } } @@ -128,7 +128,7 @@ impl PubkeyValidityProof { buf } - pub fn from_bytes(bytes: &[u8]) -> Result { + pub fn from_bytes(bytes: &[u8]) -> Result { let mut chunks = bytes.chunks(UNIT_LEN); let Y = ristretto_point_from_optional_slice(chunks.next())?; let z = canonical_scalar_from_optional_slice(chunks.next())?; diff --git a/zk-token-sdk/src/sigma_proofs/zero_balance_proof.rs b/zk-token-sdk/src/sigma_proofs/zero_balance_proof.rs index 04120b7c7bd451..cab8759f4f2a87 100644 --- a/zk-token-sdk/src/sigma_proofs/zero_balance_proof.rs +++ b/zk-token-sdk/src/sigma_proofs/zero_balance_proof.rs @@ -10,7 +10,6 @@ use { elgamal::{ElGamalCiphertext, ElGamalKeypair, ElGamalPubkey}, pedersen::H, }, - errors::ProofVerificationError, sigma_proofs::{canonical_scalar_from_optional_slice, ristretto_point_from_optional_slice}, UNIT_LEN, }, @@ -19,7 +18,10 @@ use { zeroize::Zeroize, }; use { - crate::{sigma_proofs::errors::ZeroBalanceProofError, transcript::TranscriptProtocol}, + crate::{ + sigma_proofs::errors::{SigmaProofVerificationError, ZeroBalanceProofVerificationError}, + transcript::TranscriptProtocol, + }, curve25519_dalek::{ ristretto::{CompressedRistretto, RistrettoPoint}, scalar::Scalar, @@ -102,7 +104,7 @@ impl ZeroBalanceProof { elgamal_pubkey: &ElGamalPubkey, ciphertext: &ElGamalCiphertext, transcript: &mut Transcript, - ) -> Result<(), ZeroBalanceProofError> { + ) -> Result<(), ZeroBalanceProofVerificationError> { transcript.zero_balance_proof_domain_separator(); // extract the relevant scalar and Ristretto points from the input @@ -123,11 +125,11 @@ impl ZeroBalanceProof { let Y_P = self .Y_P .decompress() - .ok_or(ProofVerificationError::Deserialization)?; + .ok_or(SigmaProofVerificationError::Deserialization)?; let Y_D = self .Y_D .decompress() - .ok_or(ProofVerificationError::Deserialization)?; + .ok_or(SigmaProofVerificationError::Deserialization)?; // check the required algebraic relation let check = RistrettoPoint::multiscalar_mul( @@ -152,7 +154,7 @@ impl ZeroBalanceProof { if check.is_identity() { Ok(()) } else { - Err(ProofVerificationError::AlgebraicRelation.into()) + Err(SigmaProofVerificationError::AlgebraicRelation.into()) } } @@ -165,7 +167,7 @@ impl ZeroBalanceProof { buf } - pub fn from_bytes(bytes: &[u8]) -> Result { + pub fn from_bytes(bytes: &[u8]) -> Result { let mut chunks = bytes.chunks(UNIT_LEN); let Y_P = ristretto_point_from_optional_slice(chunks.next())?; let Y_D = ristretto_point_from_optional_slice(chunks.next())?; diff --git a/zk-token-sdk/src/zk_token_elgamal/convert.rs b/zk-token-sdk/src/zk_token_elgamal/convert.rs index 44ebdc27646581..c5e955e35be3f5 100644 --- a/zk-token-sdk/src/zk_token_elgamal/convert.rs +++ b/zk-token-sdk/src/zk_token_elgamal/convert.rs @@ -1,4 +1,3 @@ -pub use target_arch::*; use {super::pod, crate::curve25519::ristretto::PodRistrettoPoint}; impl From<(pod::PedersenCommitment, pod::DecryptHandle)> for pod::ElGamalCiphertext { @@ -50,7 +49,7 @@ impl From for pod::DecryptHandle { mod target_arch { use { super::pod, - crate::{curve25519::scalar::PodScalar, errors::ProofError}, + crate::{curve25519::scalar::PodScalar, encryption::elgamal::ElGamalError}, curve25519_dalek::{ristretto::CompressedRistretto, scalar::Scalar}, std::convert::TryFrom, }; @@ -62,10 +61,10 @@ mod target_arch { } impl TryFrom for Scalar { - type Error = ProofError; + type Error = ElGamalError; fn try_from(pod: PodScalar) -> Result { - Scalar::from_canonical_bytes(pod.0).ok_or(ProofError::CiphertextDeserialization) + Scalar::from_canonical_bytes(pod.0).ok_or(ElGamalError::CiphertextDeserialization) } } @@ -102,7 +101,8 @@ mod tests { let mut transcript_create = Transcript::new(b"Test"); let mut transcript_verify = Transcript::new(b"Test"); - let proof = RangeProof::new(vec![55], vec![64], vec![&open], &mut transcript_create); + let proof = + RangeProof::new(vec![55], vec![64], vec![&open], &mut transcript_create).unwrap(); let proof_serialized: pod::RangeProofU64 = proof.try_into().unwrap(); let proof_deserialized: RangeProof = proof_serialized.try_into().unwrap(); @@ -112,7 +112,8 @@ mod tests { .is_ok()); // should fail to serialize to pod::RangeProof128 - let proof = RangeProof::new(vec![55], vec![64], vec![&open], &mut transcript_create); + let proof = + RangeProof::new(vec![55], vec![64], vec![&open], &mut transcript_create).unwrap(); assert!(TryInto::::try_into(proof).is_err()); } @@ -131,7 +132,8 @@ mod tests { vec![64, 32, 32], vec![&open_1, &open_2, &open_3], &mut transcript_create, - ); + ) + .unwrap(); let proof_serialized: pod::RangeProofU128 = proof.try_into().unwrap(); let proof_deserialized: RangeProof = proof_serialized.try_into().unwrap(); @@ -150,7 +152,8 @@ mod tests { vec![64, 32, 32], vec![&open_1, &open_2, &open_3], &mut transcript_create, - ); + ) + .unwrap(); assert!(TryInto::::try_into(proof).is_err()); } diff --git a/zk-token-sdk/src/zk_token_elgamal/ops.rs b/zk-token-sdk/src/zk_token_elgamal/ops.rs index bbca56f8e07885..10db117c44a5b9 100644 --- a/zk-token-sdk/src/zk_token_elgamal/ops.rs +++ b/zk-token-sdk/src/zk_token_elgamal/ops.rs @@ -254,7 +254,7 @@ mod tests { source_pk.encrypt_with(22_u64, &final_source_open).into(); assert_eq!(expected_source, final_source_spendable); - // program arithemtic for the destination account + // program arithmetic for the destination account let dest_lo_ct: pod::ElGamalCiphertext = (comm_lo, handle_dest_lo).into(); let dest_hi_ct: pod::ElGamalCiphertext = (comm_hi, handle_dest_hi).into(); diff --git a/zk-token-sdk/src/zk_token_elgamal/pod/auth_encryption.rs b/zk-token-sdk/src/zk_token_elgamal/pod/auth_encryption.rs index fe2b89f2d688c8..45534218fbcd6d 100644 --- a/zk-token-sdk/src/zk_token_elgamal/pod/auth_encryption.rs +++ b/zk-token-sdk/src/zk_token_elgamal/pod/auth_encryption.rs @@ -1,7 +1,7 @@ //! Plain Old Data types for the AES128-GCM-SIV authenticated encryption scheme. #[cfg(not(target_os = "solana"))] -use crate::{encryption::auth_encryption as decoded, errors::ProofError}; +use crate::encryption::auth_encryption::{self as decoded, AuthenticatedEncryptionError}; use { crate::zk_token_elgamal::pod::{Pod, Zeroable}, base64::{prelude::BASE64_STANDARD, Engine}, @@ -49,9 +49,9 @@ impl From for AeCiphertext { #[cfg(not(target_os = "solana"))] impl TryFrom for decoded::AeCiphertext { - type Error = ProofError; + type Error = AuthenticatedEncryptionError; fn try_from(pod_ciphertext: AeCiphertext) -> Result { - Self::from_bytes(&pod_ciphertext.0).ok_or(ProofError::CiphertextDeserialization) + Self::from_bytes(&pod_ciphertext.0).ok_or(AuthenticatedEncryptionError::Deserialization) } } diff --git a/zk-token-sdk/src/zk_token_elgamal/pod/elgamal.rs b/zk-token-sdk/src/zk_token_elgamal/pod/elgamal.rs index e0ff2a3d186fd0..4473ab3ee89aa2 100644 --- a/zk-token-sdk/src/zk_token_elgamal/pod/elgamal.rs +++ b/zk-token-sdk/src/zk_token_elgamal/pod/elgamal.rs @@ -2,7 +2,7 @@ #[cfg(not(target_os = "solana"))] use { - crate::{encryption::elgamal as decoded, errors::ProofError}, + crate::encryption::elgamal::{self as decoded, ElGamalError}, curve25519_dalek::ristretto::CompressedRistretto, }; use { @@ -55,10 +55,10 @@ impl From for ElGamalCiphertext { #[cfg(not(target_os = "solana"))] impl TryFrom for decoded::ElGamalCiphertext { - type Error = ProofError; + type Error = ElGamalError; fn try_from(pod_ciphertext: ElGamalCiphertext) -> Result { - Self::from_bytes(&pod_ciphertext.0).ok_or(ProofError::CiphertextDeserialization) + Self::from_bytes(&pod_ciphertext.0).ok_or(ElGamalError::CiphertextDeserialization) } } @@ -88,10 +88,10 @@ impl From for ElGamalPubkey { #[cfg(not(target_os = "solana"))] impl TryFrom for decoded::ElGamalPubkey { - type Error = ProofError; + type Error = ElGamalError; fn try_from(pod_pubkey: ElGamalPubkey) -> Result { - Self::from_bytes(&pod_pubkey.0).ok_or(ProofError::CiphertextDeserialization) + Self::from_bytes(&pod_pubkey.0).ok_or(ElGamalError::PubkeyDeserialization) } } @@ -123,9 +123,9 @@ impl From for CompressedRistretto { #[cfg(not(target_os = "solana"))] impl TryFrom for decoded::DecryptHandle { - type Error = ProofError; + type Error = ElGamalError; fn try_from(pod_handle: DecryptHandle) -> Result { - Self::from_bytes(&pod_handle.0).ok_or(ProofError::CiphertextDeserialization) + Self::from_bytes(&pod_handle.0).ok_or(ElGamalError::CiphertextDeserialization) } } diff --git a/zk-token-sdk/src/zk_token_elgamal/pod/grouped_elgamal.rs b/zk-token-sdk/src/zk_token_elgamal/pod/grouped_elgamal.rs index e76133b971f78e..36863faaaf48ef 100644 --- a/zk-token-sdk/src/zk_token_elgamal/pod/grouped_elgamal.rs +++ b/zk-token-sdk/src/zk_token_elgamal/pod/grouped_elgamal.rs @@ -1,7 +1,7 @@ //! Plain Old Data types for the Grouped ElGamal encryption scheme. #[cfg(not(target_os = "solana"))] -use crate::{encryption::grouped_elgamal::GroupedElGamalCiphertext, errors::ProofError}; +use crate::encryption::{elgamal::ElGamalError, grouped_elgamal::GroupedElGamalCiphertext}; use { crate::zk_token_elgamal::pod::{ elgamal::DECRYPT_HANDLE_LEN, pedersen::PEDERSEN_COMMITMENT_LEN, Pod, Zeroable, @@ -42,10 +42,10 @@ impl From> for GroupedElGamalCiphertext2Handles { #[cfg(not(target_os = "solana"))] impl TryFrom for GroupedElGamalCiphertext<2> { - type Error = ProofError; + type Error = ElGamalError; fn try_from(pod_ciphertext: GroupedElGamalCiphertext2Handles) -> Result { - Self::from_bytes(&pod_ciphertext.0).ok_or(ProofError::CiphertextDeserialization) + Self::from_bytes(&pod_ciphertext.0).ok_or(ElGamalError::CiphertextDeserialization) } } @@ -75,9 +75,9 @@ impl From> for GroupedElGamalCiphertext3Handles { #[cfg(not(target_os = "solana"))] impl TryFrom for GroupedElGamalCiphertext<3> { - type Error = ProofError; + type Error = ElGamalError; fn try_from(pod_ciphertext: GroupedElGamalCiphertext3Handles) -> Result { - Self::from_bytes(&pod_ciphertext.0).ok_or(ProofError::CiphertextDeserialization) + Self::from_bytes(&pod_ciphertext.0).ok_or(ElGamalError::CiphertextDeserialization) } } diff --git a/zk-token-sdk/src/zk_token_elgamal/pod/instruction.rs b/zk-token-sdk/src/zk_token_elgamal/pod/instruction.rs index 7e9fdf30cbf160..121747d76ab153 100644 --- a/zk-token-sdk/src/zk_token_elgamal/pod/instruction.rs +++ b/zk-token-sdk/src/zk_token_elgamal/pod/instruction.rs @@ -3,7 +3,7 @@ use crate::zk_token_elgamal::pod::{ Zeroable, }; #[cfg(not(target_os = "solana"))] -use crate::{errors::ProofError, instruction::transfer as decoded}; +use crate::{encryption::elgamal::ElGamalError, instruction::transfer as decoded}; #[derive(Clone, Copy, Pod, Zeroable)] #[repr(C)] @@ -18,7 +18,7 @@ impl From for TransferAmountCiphertext { #[cfg(not(target_os = "solana"))] impl TryFrom for decoded::TransferAmountCiphertext { - type Error = ProofError; + type Error = ElGamalError; fn try_from(pod_ciphertext: TransferAmountCiphertext) -> Result { Ok(Self(pod_ciphertext.0.try_into()?)) @@ -38,7 +38,7 @@ impl From for FeeEncryption { #[cfg(not(target_os = "solana"))] impl TryFrom for decoded::FeeEncryption { - type Error = ProofError; + type Error = ElGamalError; fn try_from(pod_ciphertext: FeeEncryption) -> Result { Ok(Self(pod_ciphertext.0.try_into()?)) diff --git a/zk-token-sdk/src/zk_token_elgamal/pod/pedersen.rs b/zk-token-sdk/src/zk_token_elgamal/pod/pedersen.rs index 64749e9ebbe72d..20b09bd1c2ff2a 100644 --- a/zk-token-sdk/src/zk_token_elgamal/pod/pedersen.rs +++ b/zk-token-sdk/src/zk_token_elgamal/pod/pedersen.rs @@ -2,7 +2,7 @@ #[cfg(not(target_os = "solana"))] use { - crate::{encryption::pedersen as decoded, errors::ProofError}, + crate::encryption::{elgamal::ElGamalError, pedersen as decoded}, curve25519_dalek::ristretto::CompressedRistretto, }; use { @@ -44,9 +44,9 @@ impl From for CompressedRistretto { #[cfg(not(target_os = "solana"))] impl TryFrom for decoded::PedersenCommitment { - type Error = ProofError; + type Error = ElGamalError; fn try_from(pod_commitment: PedersenCommitment) -> Result { - Self::from_bytes(&pod_commitment.0).ok_or(ProofError::CiphertextDeserialization) + Self::from_bytes(&pod_commitment.0).ok_or(ElGamalError::CiphertextDeserialization) } } diff --git a/zk-token-sdk/src/zk_token_elgamal/pod/range_proof.rs b/zk-token-sdk/src/zk_token_elgamal/pod/range_proof.rs index 5a727ed93f0a51..4f134cb5eb7dd0 100644 --- a/zk-token-sdk/src/zk_token_elgamal/pod/range_proof.rs +++ b/zk-token-sdk/src/zk_token_elgamal/pod/range_proof.rs @@ -2,8 +2,7 @@ #[cfg(not(target_os = "solana"))] use crate::{ - errors::ProofVerificationError, - range_proof::{self as decoded, errors::RangeProofError}, + range_proof::{self as decoded, errors::RangeProofVerificationError}, UNIT_LEN, }; use crate::{ @@ -42,11 +41,11 @@ pub struct RangeProofU64(pub [u8; RANGE_PROOF_U64_LEN]); #[cfg(not(target_os = "solana"))] impl TryFrom for RangeProofU64 { - type Error = RangeProofError; + type Error = RangeProofVerificationError; fn try_from(decoded_proof: decoded::RangeProof) -> Result { if decoded_proof.ipp_proof.serialized_size() != INNER_PRODUCT_PROOF_U64_LEN { - return Err(ProofVerificationError::Deserialization.into()); + return Err(RangeProofVerificationError::Deserialization); } let mut buf = [0_u8; RANGE_PROOF_U64_LEN]; @@ -59,7 +58,7 @@ impl TryFrom for RangeProofU64 { #[cfg(not(target_os = "solana"))] impl TryFrom for decoded::RangeProof { - type Error = RangeProofError; + type Error = RangeProofVerificationError; fn try_from(pod_proof: RangeProofU64) -> Result { Self::from_bytes(&pod_proof.0) @@ -73,11 +72,11 @@ pub struct RangeProofU128(pub [u8; RANGE_PROOF_U128_LEN]); #[cfg(not(target_os = "solana"))] impl TryFrom for RangeProofU128 { - type Error = RangeProofError; + type Error = RangeProofVerificationError; fn try_from(decoded_proof: decoded::RangeProof) -> Result { if decoded_proof.ipp_proof.serialized_size() != INNER_PRODUCT_PROOF_U128_LEN { - return Err(ProofVerificationError::Deserialization.into()); + return Err(RangeProofVerificationError::Deserialization); } let mut buf = [0_u8; RANGE_PROOF_U128_LEN]; @@ -90,7 +89,7 @@ impl TryFrom for RangeProofU128 { #[cfg(not(target_os = "solana"))] impl TryFrom for decoded::RangeProof { - type Error = RangeProofError; + type Error = RangeProofVerificationError; fn try_from(pod_proof: RangeProofU128) -> Result { Self::from_bytes(&pod_proof.0) @@ -104,11 +103,11 @@ pub struct RangeProofU256(pub [u8; RANGE_PROOF_U256_LEN]); #[cfg(not(target_os = "solana"))] impl TryFrom for RangeProofU256 { - type Error = RangeProofError; + type Error = RangeProofVerificationError; fn try_from(decoded_proof: decoded::RangeProof) -> Result { if decoded_proof.ipp_proof.serialized_size() != INNER_PRODUCT_PROOF_U256_LEN { - return Err(ProofVerificationError::Deserialization.into()); + return Err(RangeProofVerificationError::Deserialization); } let mut buf = [0_u8; RANGE_PROOF_U256_LEN]; @@ -121,7 +120,7 @@ impl TryFrom for RangeProofU256 { #[cfg(not(target_os = "solana"))] impl TryFrom for decoded::RangeProof { - type Error = RangeProofError; + type Error = RangeProofVerificationError; fn try_from(pod_proof: RangeProofU256) -> Result { Self::from_bytes(&pod_proof.0) diff --git a/zk-token-sdk/src/zk_token_elgamal/pod/sigma_proofs.rs b/zk-token-sdk/src/zk_token_elgamal/pod/sigma_proofs.rs index 878f2e9afd8aa8..0bfc4cfade9785 100644 --- a/zk-token-sdk/src/zk_token_elgamal/pod/sigma_proofs.rs +++ b/zk-token-sdk/src/zk_token_elgamal/pod/sigma_proofs.rs @@ -47,7 +47,7 @@ impl From for CiphertextCommitmentEqua #[cfg(not(target_os = "solana"))] impl TryFrom for DecodedCiphertextCommitmentEqualityProof { - type Error = EqualityProofError; + type Error = EqualityProofVerificationError; fn try_from(pod_proof: CiphertextCommitmentEqualityProof) -> Result { Self::from_bytes(&pod_proof.0) @@ -68,7 +68,7 @@ impl From for CiphertextCiphertextEqua #[cfg(not(target_os = "solana"))] impl TryFrom for DecodedCiphertextCiphertextEqualityProof { - type Error = EqualityProofError; + type Error = EqualityProofVerificationError; fn try_from(pod_proof: CiphertextCiphertextEqualityProof) -> Result { Self::from_bytes(&pod_proof.0) @@ -95,7 +95,7 @@ impl From impl TryFrom for DecodedGroupedCiphertext2HandlesValidityProof { - type Error = ValidityProofError; + type Error = ValidityProofVerificationError; fn try_from(pod_proof: GroupedCiphertext2HandlesValidityProof) -> Result { Self::from_bytes(&pod_proof.0) @@ -122,7 +122,7 @@ impl From impl TryFrom for DecodedBatchedGroupedCiphertext2HandlesValidityProof { - type Error = ValidityProofError; + type Error = ValidityProofVerificationError; fn try_from( pod_proof: BatchedGroupedCiphertext2HandlesValidityProof, @@ -145,7 +145,7 @@ impl From for ZeroBalanceProof { #[cfg(not(target_os = "solana"))] impl TryFrom for DecodedZeroBalanceProof { - type Error = ZeroBalanceProofError; + type Error = ZeroBalanceProofVerificationError; fn try_from(pod_proof: ZeroBalanceProof) -> Result { Self::from_bytes(&pod_proof.0) @@ -166,7 +166,7 @@ impl From for FeeSigmaProof { #[cfg(not(target_os = "solana"))] impl TryFrom for DecodedFeeSigmaProof { - type Error = FeeSigmaProofError; + type Error = FeeSigmaProofVerificationError; fn try_from(pod_proof: FeeSigmaProof) -> Result { Self::from_bytes(&pod_proof.0) @@ -187,7 +187,7 @@ impl From for PubkeyValidityProof { #[cfg(not(target_os = "solana"))] impl TryFrom for DecodedPubkeyValidityProof { - type Error = PubkeyValidityProofError; + type Error = PubkeyValidityProofVerificationError; fn try_from(pod_proof: PubkeyValidityProof) -> Result { Self::from_bytes(&pod_proof.0) diff --git a/zk-token-sdk/src/zk_token_proof_instruction.rs b/zk-token-sdk/src/zk_token_proof_instruction.rs index 429c3d5fb32339..81616beb6f449e 100644 --- a/zk-token-sdk/src/zk_token_proof_instruction.rs +++ b/zk-token-sdk/src/zk_token_proof_instruction.rs @@ -19,8 +19,8 @@ //! this instruction must be signed by the context account's owner. This instruction can be used by //! the account owner to reclaim lamports for storage. //! -//! [`ZK Token proof`]: https://edge.docs.solana.com/developing/runtime-facilities/zk-token-proof -//! [`context-state`]: https://edge.docs.solana.com/developing/runtime-facilities/zk-token-proof#context-data +//! [`ZK Token proof`]: https://docs.solanalabs.com/runtime/zk-token-proof +//! [`context-state`]: https://docs.solanalabs.com/runtime/zk-token-proof#context-data pub use crate::instruction::*; use { diff --git a/zk-token-sdk/src/zk_token_proof_program.rs b/zk-token-sdk/src/zk_token_proof_program.rs index 621849b8bf760d..9a525702b8543f 100644 --- a/zk-token-sdk/src/zk_token_proof_program.rs +++ b/zk-token-sdk/src/zk_token_proof_program.rs @@ -5,7 +5,7 @@ //! the program as well as the technical details of some of the proof instructions can be found in //! the [`ZK Token proof`] documentation. //! -//! [`ZK Token proof`]: https://edge.docs.solana.com/developing/runtime-facilities/zk-token-proof +//! [`ZK Token proof`]: https://docs.solanalabs.com/runtime/zk-token-proof // Program Id of the ZkToken Proof program solana_program::declare_id!("ZkTokenProof1111111111111111111111111111111");