diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 65ccbd61f0..718df9dd09 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -7,9 +7,8 @@ ## โœ… Checklist - [ ] All: Set appropriate labels for the changes. - [ ] All: Considered squashing commits to improve commit history. - - -- [ ] All: Added an entry to [CHANGELOG.md](../docs/CHANGELOG.md). -- [ ] All: Considered updating the online docs in the [./docs/](../docs/) directory. +- [ ] All: Added an entry to [CHANGELOG.md](/ethereum/execution-spec-tests/blob/main/docs/CHANGELOG.md). +- [ ] All: Considered updating the online docs in the [./docs/](/ethereum/execution-spec-tests/blob/main/docs/) directory. +- [ ] Tests: All converted JSON/YML tests from [ethereum/tests](/ethereum/tests) have been added to [converted-ethereum-tests.txt](/ethereum/execution-spec-tests/blob/main/converted-ethereum-tests.txt). - [ ] Tests: Included the type and version of evm t8n tool used to locally execute test cases: e.g., ref with commit hash or geth 1.13.1-stable-3f40e65. - [ ] Tests: Ran `mkdocs serve` locally and verified the auto-generated docs for new tests in the [Test Case Reference](https://ethereum.github.io/execution-spec-tests/main/tests/) are correctly formatted. diff --git a/.github/actions/build-geth-evm/action.yaml b/.github/actions/build-geth-evm/action.yaml index 81110d994a..02ec105c49 100644 --- a/.github/actions/build-geth-evm/action.yaml +++ b/.github/actions/build-geth-evm/action.yaml @@ -12,7 +12,7 @@ inputs: golang: description: 'Golang version to use to build Geth' required: false - default: '1.20.5' + default: '1.21.x' runs: using: "composite" steps: diff --git a/.github/workflows/fixtures.yaml b/.github/workflows/fixtures.yaml index bf5a8030ac..a68ed5dcb0 100644 --- a/.github/workflows/fixtures.yaml +++ b/.github/workflows/fixtures.yaml @@ -19,21 +19,11 @@ jobs: fill-params: '' solc: '0.8.21' python: '3.11' - - name: 'fixtures_hive' - evm-type: 'main' - fill-params: '--enable-hive --from=Merge' - solc: '0.8.21' - python: '3.11' - name: 'fixtures_develop' evm-type: 'develop' fill-params: '--until=Cancun' solc: '0.8.21' python: '3.11' - - name: 'fixtures_develop_hive' - evm-type: 'develop' - fill-params: '--enable-hive --from=Merge --until=Cancun' - solc: '0.8.21' - python: '3.11' steps: - uses: actions/checkout@v3 with: diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 2c709e3b98..1408d79a0c 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -12,12 +12,12 @@ jobs: python: '3.10' solc: '0.8.20' evm-type: 'main' - tox-cmd: 'tox' + tox-cmd: 'tox run-parallel --parallel-no-spinner' - os: ubuntu-latest python: '3.12' - solc: '0.8.21' + solc: '0.8.23' evm-type: 'main' - tox-cmd: 'tox' + tox-cmd: 'tox run-parallel --parallel-no-spinner' - os: ubuntu-latest python: '3.11' solc: '0.8.21' @@ -25,9 +25,9 @@ jobs: tox-cmd: 'tox -e tests-develop' - os: macos-latest python: '3.11' - solc: '0.8.21' + solc: '0.8.22' evm-type: 'main' - tox-cmd: 'tox' + tox-cmd: 'tox run-parallel --parallel-no-spinner' steps: - uses: actions/checkout@v3 with: diff --git a/.markdownlint.yaml b/.markdownlint.yaml index f7cfed44ac..fee81f4d65 100644 --- a/.markdownlint.yaml +++ b/.markdownlint.yaml @@ -4,3 +4,4 @@ MD013: false # line-length: We don't fill paragaraphs/limit line length MD034: false # no-bare-urls - We use pymdownx.magiclink which allows bare urls MD046: false # code-block-style - This doesn't play well with material's admonitions) MD024: false # no-duplicate-heading - We use duplicate headings in the changelog. +MD033: false # no-inline-html - Too strict. diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2167cebf7a..a0d51c0a09 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,7 +3,7 @@ repos: hooks: - id: tox name: tox - entry: tox + entry: tox run-parallel language: system types: [python] pass_filenames: false diff --git a/.vscode/extensions.json b/.vscode/extensions.json index ef9593158b..55edb2c0f0 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -6,6 +6,7 @@ "ms-python.python", "ms-python.isort", "ms-python.flake8", + "ms-python.mypy-type-checker", "ms-python.black-formatter", "esbenp.prettier-vscode", "njpwerner.autodocstring", // https://marketplace.visualstudio.com/items?itemName=njpwerner.autodocstring diff --git a/.vscode/launch.recommended.json b/.vscode/launch.recommended.json index fd6f050916..da2885969c 100644 --- a/.vscode/launch.recommended.json +++ b/.vscode/launch.recommended.json @@ -2,7 +2,6 @@ // Use IntelliSense to learn about possible attributes. // Hover to view descriptions of existing attributes. // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 - // If the VS Code "Run and Debug" button, respecively launch selector are not visible, see this answer: // https://stackoverflow.com/a/74245823 // @@ -41,7 +40,7 @@ "${input:testPathOrId}" ], "cwd": "${workspaceFolder}" - }, + }, { "name": "Launch fill --until Shanghai", "type": "python", @@ -80,7 +79,11 @@ "type": "python", "request": "launch", "module": "pytest", - "args": ["-c", "pytest.ini", "--test-help"], + "args": [ + "-c", + "pytest.ini", + "--test-help" + ], "cwd": "/home/dtopz/code/github/danceratopz/execution-spec-tests" }, ], @@ -118,15 +121,15 @@ "id": "fork", "description": "Which fork do you want to use?", "options": [ - "Frontier", - "Homestead", + "Frontier", + "Homestead", "Byzantium", "Constantinople", "ConstantinopleFix", "Istanbul", "Berlin", "London", - "Merge", + "Paris", "Shanghai", "Cancun", ], @@ -139,4 +142,4 @@ "default": "test_" } ] -} +} \ No newline at end of file diff --git a/.vscode/settings.recommended.json b/.vscode/settings.recommended.json index 0ab21cf06c..08d60b4320 100644 --- a/.vscode/settings.recommended.json +++ b/.vscode/settings.recommended.json @@ -18,8 +18,6 @@ } }, "python.analysis.autoFormatStrings": true, - "python.linting.mypyEnabled": true, - "python.linting.flake8Enabled": true, "python.testing.promptToConfigure": false, "python.testing.unittestEnabled": false, "python.testing.pytestEnabled": true, diff --git a/README.md b/README.md index 8f92e53bd0..96314bc1e3 100644 --- a/README.md +++ b/README.md @@ -68,10 +68,11 @@ The following transition tools are supported by the framework: ### Upcoming EIP Development -Generally, specific `t8n` implementations and branches must be used when developing tests for upcoming EIPs (last updated 2023-09-07): +Generally, specific `t8n` implementations and branches must be used when developing tests for upcoming EIPs. -- Cancun related EIPs (4844, 4788, 1153, 6780) - [marioevz/go-ethereum@cancun-t8n](https://github.com/marioevz/go-ethereum/tree/cancun-t8n) -- EOF tests - [ethereum/evmone@master](https://github.com/ethereum/evmone) +We use named reference tags to point to the specific version of the `t8n` implementation that needs to be used fill the tests. + +All current tags, their t8n implementation and branch they point to, are listed in [evm-config.yaml](evm-config.yaml). ## Getting Started @@ -83,7 +84,7 @@ The following requires a Python 3.10, 3.11 or 3.12 installation. This guide installs stable versions of the required external (go-ethereum) `evm` and `solc` executables and will only enable generation of test fixtures for features deployed to mainnet. In order to generate fixtures for features under active development, you can follow the steps below and then follow the [additional steps in the online doc](https://ethereum.github.io/execution-spec-tests/getting_started/executing_tests_dev_fork/). -1. Ensure go-ethereum's `evm` tool and `solc` ([0.8.20](https://github.com/ethereum/solidity/releases/tag/v0.8.20) or [0.8.21](https://github.com/ethereum/solidity/releases/tag/v0.8.21)) are in your path. Either build the required versions, or alternatively: +1. Ensure go-ethereum's `evm` tool and `solc` ([0.8.20](https://github.com/ethereum/solidity/releases/tag/v0.8.20), [0.8.21](https://github.com/ethereum/solidity/releases/tag/v0.8.21), [0.8.22](https://github.com/ethereum/solidity/releases/tag/v0.8.22), [0.8.23](https://github.com/ethereum/solidity/releases/tag/v0.8.23) supported) are in your path. Either build the required versions, or alternatively: ```console sudo add-apt-repository -y ppa:ethereum/ethereum @@ -132,12 +133,14 @@ This guide installs stable versions of the required external (go-ethereum) `evm` 2. The corresponding fixture file has been generated: ```console - head fixtures/berlin/eip2930_access_list/acl/access_list.json + head fixtures/blockchain_tests/berlin/eip2930_access_list/acl/access_list.json ``` ## Usage -See the [online documentation](https://ethereum.github.io/execution-spec-tests/) for further help with working with this codebase: +More information on how to obtain and consume the [released test fixtures](https://github.com/ethereum/execution-spec-tests/releases) can be found in the [documentation](https://ethereum.github.io/execution-spec-tests/main/consuming_tests/). + +For further help with working with this codebase, see the [online documentation](https://ethereum.github.io/execution-spec-tests/): 1. Learn [useful command-line flags](https://ethereum.github.io/execution-spec-tests/getting_started/executing_tests_command_line/). 2. [Execute tests for features under development](https://ethereum.github.io/execution-spec-tests/getting_started/executing_tests_dev_fork/) via the `--from=FORK1` and `--until=FORK2` flags. diff --git a/converted-ethereum-tests.txt b/converted-ethereum-tests.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 98ffd9d81c..4e10de2e21 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -4,12 +4,183 @@ Test fixtures for use by clients are available for each release on the [Github r **Key:** โœจ = New, ๐Ÿž = Fixed, ๐Ÿ”€ = Changed, ๐Ÿ’ฅ = Breaking change. -## ๐Ÿ”œ [Unreleased - v1.0.6](https://github.com/ethereum/execution-spec-tests/releases/tag/v1.0.6) - 2023-xx-xx +## ๐Ÿ”œ [Unreleased](https://github.com/ethereum/execution-spec-tests/releases/tag/v-Unreleased) - 2024-xx-xx ### ๐Ÿงช Test Cases ### ๐Ÿ› ๏ธ Framework +- ๐Ÿž Fix incorrect `!=` operator for `FixedSizeBytes` ([#477](https://github.com/ethereum/execution-spec-tests/pull/477)). +- โœจ Add Macro enum that represents byte sequence of Op instructions ([#457](https://github.com/ethereum/execution-spec-tests/pull/457)) + +### ๐Ÿ”ง EVM Tools + +### ๐Ÿ“‹ Misc + +- ๐Ÿž Fix CI by using Golang 1.21 in Github Actions to build geth ([#484](https://github.com/ethereum/execution-spec-tests/pull/484)). + +## ๐Ÿ”œ [v2.1.1](https://github.com/ethereum/execution-spec-tests/releases/tag/v2.1.1) - 2024-03-09 + +### ๐Ÿงช Test Cases + +- ๐Ÿž Dynamic create2 collision from different transactions same block ([#430](https://github.com/ethereum/execution-spec-tests/pull/430)). +- ๐Ÿž Fix beacon root contract deployment tests so the account in the pre-alloc is not empty ([#425](https://github.com/ethereum/execution-spec-tests/pull/425)). +- ๐Ÿ”€ All beacon root contract tests are now contained in tests/cancun/eip4788_beacon_root/test_beacon_root_contract.py, and all state tests have been converted back to blockchain tests format ([#449](https://github.com/ethereum/execution-spec-tests/pull/449)) + +### ๐Ÿ› ๏ธ Framework + +- โœจ Add Prague to forks ([#419](https://github.com/ethereum/execution-spec-tests/pull/419)). +- โœจ Improve handling of the argument passed to `solc --evm-version` when compiling Yul code ([#418](https://github.com/ethereum/execution-spec-tests/pull/418)). +- ๐Ÿž Fix `fill -m yul_test` which failed to filter tests that are (dynamically) marked as a yul test ([#418](https://github.com/ethereum/execution-spec-tests/pull/418)). +- ๐Ÿ”€ Helper methods `to_address`, `to_hash` and `to_hash_bytes` have been deprecated in favor of `Address` and `Hash`, which are automatically detected as opcode parameters and pushed to the stack in the resulting bytecode ([#422](https://github.com/ethereum/execution-spec-tests/pull/422)). +- โœจ `Opcodes` enum now contains docstrings with each opcode description, including parameters and return values, which show up in many development environments ([#424](https://github.com/ethereum/execution-spec-tests/pull/424)) @ThreeHrSleep. +- ๐Ÿ”€ Locally calculate state root for the genesis blocks in the blockchain tests instead of calling t8n ([#450](https://github.com/ethereum/execution-spec-tests/pull/450)). +- ๐Ÿž Fix bug that causes an exception during test collection because the fork parameter contains `None` ([#452](https://github.com/ethereum/execution-spec-tests/pull/452)). +- โœจ The `_info` field in the test fixtures now contains a `hash` field, which is the hash of the test fixture, and a `hasher` script has been added which prints and performs calculations on top of the hashes of all fixtures (see `hasher -h`) ([#454](https://github.com/ethereum/execution-spec-tests/pull/454)). +- โœจ Adds an optional `verify_sync` field to hive blockchain tests (EngineAPI). When set to true a second client attempts to sync to the first client that executed the tests ([#431](https://github.com/ethereum/execution-spec-tests/pull/431)). +- ๐Ÿž Fix manually setting the gas limit in the genesis test env for post genesis blocks in blockchain tests ([#472](https://github.com/ethereum/execution-spec-tests/pull/472)). + +### ๐Ÿ”ง EVM Tools + +### ๐Ÿ“‹ Misc + +- ๐Ÿž Fix deprecation warnings due to outdated config in recommended VS Code project settings ([#420](https://github.com/ethereum/execution-spec-tests/pull/420)). +- ๐Ÿž Fix typo in the selfdestruct revert tests module ([#421](https://github.com/ethereum/execution-spec-tests/pull/421)). + +## [v2.1.0](https://github.com/ethereum/execution-spec-tests/releases/tag/v2.1.0) - 2024-01-29: ๐Ÿ๐Ÿ–๏ธ Cancun + +Release [v2.1.0](https://github.com/ethereum/execution-spec-tests/releases/tag/v2.1.0) primarily fixes a small bug introduced within the previous release where transition forks are used within the new `StateTest` format. This was highlighted by @chfast within #405 (https://github.com/ethereum/execution-spec-tests/issues/405), where the fork name `ShanghaiToCancunAtTime15k` was found within state tests. + +### ๐Ÿงช Test Cases + +- โœจ [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844): Adds `test_blob_gas_subtraction_tx()` verifying the blob gas fee is subtracted from the sender before executing the blob tx ([#407](https://github.com/ethereum/execution-spec-tests/pull/407)). + +### ๐Ÿ› ๏ธ Framework + +- ๐Ÿž State tests generated with transition forks no longer use the transition fork name in the fixture output, instead they use the actual enabled fork according to the state test's block number and timestamp ([#406](https://github.com/ethereum/execution-spec-tests/pull/406)). + +### ๐Ÿ“‹ Misc + +- โœจ Use `run-parallel` and shared wheel packages for `tox` ([#408](https://github.com/ethereum/execution-spec-tests/pull/408)). + +## [v2.0.0](https://github.com/ethereum/execution-spec-tests/releases/tag/v2.0.0) - 2024-01-25: ๐Ÿ๐Ÿ–๏ธ Cancun + +Release [v2.0.0](https://github.com/ethereum/execution-spec-tests/releases/tag/v2.0.0) contains many important framework changes, including introduction of the `StateTest` format, and some additional Cancun and other test coverage. + +Due to changes in the framework, there is a breaking change in the directory structure in the release tarball, please see the dedicated "๐Ÿ’ฅ Breaking Changes" section below for more information. + +### ๐Ÿงช Test Cases + +- โœจ [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844): Add `test_sufficient_balance_blob_tx()` and `test_sufficient_balance_blob_tx_pre_fund_tx()` ([#379](https://github.com/ethereum/execution-spec-tests/pull/379)). +- โœจ [EIP-6780](https://eips.ethereum.org/EIPS/eip-6780): Add a reentrancy suicide revert test ([#372](https://github.com/ethereum/execution-spec-tests/pull/372)). +- โœจ [EIP-1153](https://eips.ethereum.org/EIPS/eip-1153): Add `test_run_until_out_of_gas()` for transient storage opcodes ([#401](https://github.com/ethereum/execution-spec-tests/pull/401)). +- โœจ [EIP-198](https://eips.ethereum.org/EIPS/eip-198): Add tests for the MODEXP precompile ([#364](https://github.com/ethereum/execution-spec-tests/pull/364)). +- โœจ Tests for nested `CALL` and `CALLCODE` gas consumption with a positive value transfer (previously lacking coverage) ([#371](https://github.com/ethereum/execution-spec-tests/pull/371)). +- ๐Ÿž [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844): Fixed `test_invalid_tx_max_fee_per_blob_gas()` to account for extra gas required in the case where the account is incorrectly deduced the balance as if it had the correct block blob gas fee ([#370](https://github.com/ethereum/execution-spec-tests/pull/370)). +- ๐Ÿž [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844): Fixed `test_insufficient_balance_blob_tx()` to correctly calculate the minimum balance required for the accounts ([#379](https://github.com/ethereum/execution-spec-tests/pull/379)). +- ๐Ÿž [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844): Fix and enable `test_invalid_blob_tx_contract_creation` ([#379](https://github.com/ethereum/execution-spec-tests/pull/379)). +- ๐Ÿ”€ Convert all eligible `BlockchainTest`s to `StateTest`s (and additionally generate corresponding `BlockchainTest`s) ([#368](https://github.com/ethereum/execution-spec-tests/pull/368), [#370](https://github.com/ethereum/execution-spec-tests/pull/370)). + +### ๐Ÿ› ๏ธ Framework + +- โœจ Add `StateTest` fixture format generation; `StateTests` now generate a `StateTest` and a corresponding `BlockchainTest` test fixture, previously only `BlockchainTest` fixtures were generated ([#368](https://github.com/ethereum/execution-spec-tests/pull/368)). +- โœจ Add `StateTestOnly` fixture format is now available and its only difference with `StateTest` is that it does not produce a `BlockchainTest` ([#368](https://github.com/ethereum/execution-spec-tests/pull/368)). +- โœจ Add `evm_bytes_to_python` command-line utility which converts EVM bytecode to Python Opcodes ([#357](https://github.com/ethereum/execution-spec-tests/pull/357)). +- โœจ Fork objects used to write tests can now be compared using the `>`, `>=`, `<`, `<=` operators, to check for a fork being newer than, newer than or equal, older than, older than or equal, respectively when compared against other fork ([#367](https://github.com/ethereum/execution-spec-tests/pull/367)). +- โœจ Add [solc 0.8.23](https://github.com/ethereum/solidity/releases/tag/v0.8.23) support ([#373](https://github.com/ethereum/execution-spec-tests/pull/373)). +- โœจ Add framework unit tests for post state exception verification ([#350](https://github.com/ethereum/execution-spec-tests/pull/350)). +- โœจ Add a helper class `ethereum_test_tools.TestParameterGroup` to define test parameters as dataclasses and auto-generate test IDs ([#364](https://github.com/ethereum/execution-spec-tests/pull/364)). +- โœจ Add a `--single-fixture-per-file` flag to generate one fixture JSON file per test case ([#331](https://github.com/ethereum/execution-spec-tests/pull/331)). +- ๐Ÿž Storage type iterator is now fixed ([#369](https://github.com/ethereum/execution-spec-tests/pull/369)). +- ๐Ÿž Fix type coercion in `FixtureHeader.join()` ([#398](https://github.com/ethereum/execution-spec-tests/pull/398)). +- ๐Ÿ”€ Locally calculate the transactions list's root instead of using the one returned by t8n when producing BlockchainTests ([#353](https://github.com/ethereum/execution-spec-tests/pull/353)). +- ๐Ÿ”€ Change custom exception classes to dataclasses to improve testability ([#386](https://github.com/ethereum/execution-spec-tests/pull/386)). +- ๐Ÿ”€ Update fork name from "Merge" to "Paris" used within the framework and tests ([#363](https://github.com/ethereum/execution-spec-tests/pull/363)). +- ๐Ÿ’ฅ Replace `=` with `_` in pytest node ids and test fixture names ([#342](https://github.com/ethereum/execution-spec-tests/pull/342)). +- ๐Ÿ’ฅ The `StateTest`, spec format used to write tests, is now limited to a single transaction per test ([#361](https://github.com/ethereum/execution-spec-tests/pull/361)). +- ๐Ÿ’ฅ Tests must now use `BlockException` and `TransactionException` to define the expected exception of a given test, which can be used to test whether the client is hitting the proper exception when processing the block or transaction ([#384](https://github.com/ethereum/execution-spec-tests/pull/384)). +- ๐Ÿ’ฅ `fill`: Remove the `--enable-hive` flag; now all test types are generated by default ([#358](https://github.com/ethereum/execution-spec-tests/pull/358)). +- ๐Ÿ’ฅ Rename test fixtures names to match the corresponding pytest node ID as generated using `fill` ([#342](https://github.com/ethereum/execution-spec-tests/pull/342)). + +### ๐Ÿ“‹ Misc + +- โœจ Docs: Add a ["Consuming Tests"](https://ethereum.github.io/execution-spec-tests/main/consuming_tests/) section to the docs, where each test fixture format is described, along with the steps to consume them, and the description of the structures used in each format ([#375](https://github.com/ethereum/execution-spec-tests/pull/375)). +- ๐Ÿ”€ Docs: Update `t8n` tool branch to fill tests for development features in the [readme](https://github.com/ethereum/execution-spec-test) ([#338](https://github.com/ethereum/execution-spec-tests/pull/338)). +- ๐Ÿ”€ Filling tool: Updated the default filling tool (`t8n`) to go-ethereum@master ([#368](https://github.com/ethereum/execution-spec-tests/pull/368)). +- ๐Ÿž Docs: Fix error banner in online docs due to mermaid syntax error ([#398](https://github.com/ethereum/execution-spec-tests/pull/398)). +- ๐Ÿž Docs: Fix incorrectly formatted nested lists in online doc ([#403](https://github.com/ethereum/execution-spec-tests/pull/403)). + +### ๐Ÿ’ฅ Breaking Changes + +A concrete example of the test name renaming and change in directory structure is provided below. + +1. Fixture output, including release tarballs, now contain subdirectories for different test types: + + 1. `blockchain_tests`: Contains `BlockchainTest` formatted tests + 2. `blockchain_tests_hive`: Contains `BlockchainTest` with Engine API call directives for use in hive + 3. `state_tests`: Contains `StateTest` formatted tests + +2. `StateTest`, spec format used to write tests, is now limited to a single transaction per test. +3. In this release the pytest node ID is now used for fixture names (previously only the test parameters were used), this should not be breaking. However, `=` in both node IDs (and therefore fixture names) have been replaced with `_`, which may break tooling that depends on the `=` character. +4. Produced `blockchain_tests` fixtures and their corresponding `blockchain_tests_hive` fixtures now contain the named exceptions `BlockException` and `TransactionException` as strings in the `expectException` and `validationError` fields, respectively. These exceptions can be used to test whether the client is hitting the proper exception when processing an invalid block. + + Blockchain test: + + ```json + "blocks": [ + { + ... + "expectException": "TransactionException.INSUFFICIENT_ACCOUNT_FUNDS", + ... + } + ... + ] + ``` + + Blockchain hive test: + + ```json + "engineNewPayloads": [ + { + ... + "validationError": "TransactionException.INSUFFICIENT_ACCOUNT_FUNDS", + ... + } + ... + ] + ``` + +#### Renaming and Release Tarball Directory Structure Change Example + +The fixture renaming provides a more consistent naming scheme between the pytest node ID and fixture name and allows the fixture name to be provided directly to pytest 5on the command line to execute individual tests in isolation, e.g. `pytest tests/frontier/opcodes/test_dup.py::test_dup[fork_Frontier]`. + +1. Pytest node ID example: + + 1. Previous node ID: `tests/frontier/opcodes/test_dup.py::test_dup[fork=Frontier]`. + 2. New node ID: `tests/frontier/opcodes/test_dup.py::test_dup[fork_Frontier]`. + +2. Fixture name example: + + 1. Previous fixture name: `000-fork=Frontier` + 2. New fixture name: `tests/frontier/opcodes/test_dup.py::test_dup[fork_Frontier]` (now the same as the pytest node ID). + +3. Fixture JSON file name example (within the release tarball): + + 1. Previous fixture file name: `fixtures/frontier/opcodes/dup/dup.json` (`BlockChainTest` format). + 2. New fixture file names (all present within the release tarball): + + - `fixtures/state_tests/frontier/opcodes/dup/dup.json` (`StateTest` format). + - `fixtures/blockchain_tests/frontier/opcodes/dup/dup.json` (`BlockChainTest` format). + - `fixtures/blockchain_tests_hive/frontier/opcodes/dup/dup.json` (a blockchain test in `HiveFixture` format). + +## [v1.0.6](https://github.com/ethereum/execution-spec-tests/releases/tag/v1.0.6) - 2023-10-19: ๐Ÿ๐Ÿ–๏ธ Cancun Devnet 10 + +### ๐Ÿงช Test Cases + +- ๐Ÿ”€ [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844): Update KZG point evaluation test vectors to use data from the official KZG setup and Mainnet Trusted Setup ([#336](https://github.com/ethereum/execution-spec-tests/pull/336)). + +### ๐Ÿ› ๏ธ Framework + - ๐Ÿ”€ Fixtures: Add a non-RLP format field (`rlp_decoded`) to invalid blocks ([#322](https://github.com/ethereum/execution-spec-tests/pull/322)). - ๐Ÿ”€ Spec: Refactor state and blockchain spec ([#307](https://github.com/ethereum/execution-spec-tests/pull/307)). @@ -23,6 +194,12 @@ Test fixtures for use by clients are available for each release on the [Github r - โœจ Tooling: Add Python 3.12 support ([#309](https://github.com/ethereum/execution-spec-tests/pull/309)). - โœจ Process: Added a Github pull request template ([#308](https://github.com/ethereum/execution-spec-tests/pull/308)). - โœจ Docs: Changelog updated post release ([#321](https://github.com/ethereum/execution-spec-tests/pull/321)). +- โœจ Docs: Add [a section explaining execution-spec-tests release artifacts](https://ethereum.github.io/execution-spec-tests/main/getting_started/using_fixtures/) ([#334](https://github.com/ethereum/execution-spec-tests/pull/334)). +- ๐Ÿ”€ T8N Tool: Branch used to generate the tests for Cancun is now [lightclient/go-ethereum@devnet-10](https://github.com/lightclient/go-ethereum/tree/devnet-10) ([#336](https://github.com/ethereum/execution-spec-tests/pull/336)) + +### ๐Ÿ’ฅ Breaking Change + +- Fixtures now use the Mainnet Trusted Setup merged on [consensus-specs#3521](https://github.com/ethereum/consensus-specs/pull/3521) ([#336](https://github.com/ethereum/execution-spec-tests/pull/336)) ## [v1.0.5](https://github.com/ethereum/execution-spec-tests/releases/tag/v1.0.5) - 2023-09-26: ๐Ÿ๐Ÿ–๏ธ Cancun Devnet 9 Release 3 diff --git a/docs/consuming_tests/blockchain_test.md b/docs/consuming_tests/blockchain_test.md new file mode 100644 index 0000000000..46a3d18b06 --- /dev/null +++ b/docs/consuming_tests/blockchain_test.md @@ -0,0 +1,303 @@ +# Blockchain Tests + +The Blockchain Test fixture format tests are included in the fixtures subdirectory `blockchain_tests`. + +These are produced by the `StateTest` and `BlockchainTest` test specs. + +## Description + +The blockchain test fixture format is used to test block validation and the consensus rules of the Ethereum blockchain. + +It does so by defining a pre-execution state, a series of blocks, and a post-execution state, verifying that, after all the blocks have been processed, appended if valid or rejected if invalid, the result is the expected post-execution state. + +A single JSON fixture file is composed of a JSON object where each key-value pair is a different [`Fixture`](#fixture) test object, with the key string representing the test name. + +The JSON file path plus the test name are used as the unique test identifier. + +## Consumption + +For each [`Fixture`](#fixture) test object in the JSON fixture file, perform the following steps: + +1. Use [`network`](#-network-fork) to configure the execution fork schedule according to the [`Fork`](./common_types.md#fork) type definition. +2. Use [`pre`](#-pre-alloc) as the starting state allocation of the execution environment for the test and calculate the genesis state root. +3. Decode [`genesisRLP`](#-genesisrlp-bytes) to obtain the genesis block header, if the block cannot be decoded, fail the test. +4. Compare the genesis block header with [`genesisBlockHeader`](#-genesisblockheader-fixtureheader), if any field does not match, fail the test. +5. Compare the state root calculated on step 2 with the state root in the genesis block header, if they do not match, fail the test. +6. Set the genesis block as the current head of the chain. +7. If [`blocks`](#-blocks-listfixtureblockinvalidfixtureblock) contains at least one block, perform the following steps for each [`FixtureBlock`](#fixtureblock) or [`InvalidFixtureBlock`](#invalidfixtureblock): + + 1. Determine whether the current block is valid or invalid: + + 1. If the [`expectException`](#expectexception-str) field is not present, it is valid, and object must be decoded as a [`FixtureBlock`](#fixtureblock). + 2. If the [`expectException`](#expectexception-str) field is present, it is invalid, and object must be decoded as a [`InvalidFixtureBlock`](#invalidfixtureblock). + + 2. Attempt to decode field [`rlp`](#-rlp-bytes) as the current block + 1. If the block cannot be decoded: + - If an rlp decoding exception is not expected for the current block, fail the test. + - If an rlp decoding error is expected, pass the test (Note: A block with an expected exception will be the last block in the fixture). + 2. If the block can be decoded, proceed to the next step. + + 3. Attempt to apply the current decoded block on top of the current head of the chain + 1. If the block cannot be applied: + - If an exception is expected on the current block and it matches the exception obtained upon execution, pass the test. (Note: A block with an expected exception will be the last block in the fixture) + - If an exception is not expected on the current block, fail the test + 2. If the block can be applied: + - If an exception is expected on the current block, fail the test + - If an exception is not expected on the current block, set the decoded block as the current head of the chain and proceed to the next block until you reach the last block in the fixture. + +8. Compare the hash of the current head of the chain against [`lastblockhash`](#-lastblockhash-hash), if they do not match, fail the test. +9. Compare all accounts and the fields described in [`post`](#-post-alloc) against the current state, if any do not match, fail the test. + +## Structures + +### `Fixture` + +#### - `network`: [`Fork`](./common_types.md#fork) + +Fork configuration for the test. + +#### - `pre`: [`Alloc`](./common_types.md#alloc-mappingaddressaccount) + +Starting account allocation for the test. State root calculated from this allocation must match the one in the genesis block. + +#### - `genesisRLP`: [`Bytes`](./common_types.md#bytes) + +RLP serialized version of the genesis block. + +#### - `genesisBlockHeader`: [`FixtureHeader`](#fixtureheader) + +Genesis block header. + +#### - `blocks`: [`List`](./common_types.md#list)`[`[`FixtureBlock`](#fixtureblock)` | `[`InvalidFixtureBlock`](#invalidfixtureblock)`]` + +List of blocks to be processed after the genesis block. + +#### - `lastblockhash`: [`Hash`](./common_types.md#hash) + +Hash of the last valid block, or the genesis block hash if the list of blocks is empty, or contains a single invalid block. + +#### - `post`: [`Alloc`](./common_types.md#alloc-mappingaddressaccount) + +Account allocation for verification after all the blocks have been processed. + +#### - `sealEngine`: `str` + +Deprecated: Seal engine used to mine the blocks. + +### `FixtureHeader` + +#### - `parentHash`: [`Hash`](./common_types.md#hash) + +Hash of the parent block. + +#### - `uncleHash`: [`Hash`](./common_types.md#hash) + +Hash of the uncle block list. + +#### - `coinbase`: [`Address`](./common_types.md#address) + +Address of the account that will receive the rewards for building the block. + +#### - `stateRoot`: [`Hash`](./common_types.md#hash) + +Root hash of the state trie. + +#### - `transactionsTrie`: [`Hash`](./common_types.md#hash) + +Root hash of the transactions trie. + +#### - `receiptTrie`: [`Hash`](./common_types.md#hash) + +Root hash of the receipts trie. + +#### - `bloom`: [`Bloom`](./common_types.md#bloom) + +Bloom filter composed of the logs of all the transactions in the block. + +#### - `difficulty`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) + +Difficulty of the block. + +#### - `number`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) + +Number of the block. + +#### - `gasLimit`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) + +Total gas limit of the block. + +#### - `gasUsed`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) + +Total gas used by all the transactions in the block. + +#### - `timestamp`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) + +Timestamp of the block. + +#### - `extraData`: [`Bytes`](./common_types.md#bytes) + +Extra data of the block. + +#### - `mixHash`: [`Hash`](./common_types.md#hash) + +Mix hash or PrevRandao of the block. + +#### - `nonce`: [`HeaderNonce`](./common_types.md#headernonce) + +Nonce of the block. + +#### - `hash`: [`Hash`](./common_types.md#hash) + +Hash of the block. + +#### - `baseFeePerGas`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) `(fork: London)` + +Base fee per gas of the block. + +#### - `withdrawalsRoot`: [`Hash`](./common_types.md#hash) `(fork: Shanghai)` + +Root hash of the withdrawals trie. + +#### - `blobGasUsed`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) `(fork: Cancun)` + +Total blob gas used by all the transactions in the block. + +#### - `excessBlobGas`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) `(fork: Cancun)` + +Excess blob gas of the block used to calculate the blob fee per gas for this block. + +#### - `parentBeaconBlockRoot`: [`Hash`](./common_types.md#hash) `(fork: Cancun)` + +Root hash of the parent beacon block. + +### `FixtureBlock` + +#### - `rlp`: [`Bytes`](./common_types.md#bytes) + +RLP serialized version of the block. Field is only optional when embedded in a [`InvalidFixtureBlock`](#invalidfixtureblock) as the [`rlp_decoded`](#rlp_decoded-optionalfixtureblock) field. + +#### - `blockHeader`: [`FixtureHeader`](#fixtureheader) + +Decoded block header fields included in the block RLP. + +#### - `blocknumber`: [`Number`](./common_types.md#number) + +Block number. + +#### - `transactions`: [`List`](./common_types.md#list)`[`[`FixtureTransaction`](#fixturetransaction)`]` + +List of decoded transactions included in the block RLP. + +#### - `uncleHeaders`: [`List`](./common_types.md#list)`[`[`FixtureHeader`](#fixturetransaction)`]` + +List of uncle headers included in the block RLP. An empty list post merge. + +#### - `withdrawals`: [`Optional`](./common_types.md#optional)`[`[`List`](./common_types.md#list)`[`[`FixtureWithdrawal`](#fixturewithdrawal)`]]` `(fork: Shanghai)` + +Optional list of withdrawals included in the block RLP. + +### `InvalidFixtureBlock` + +#### - `expectException`: [`TransactionException`](./exceptions.md#transactionexception)` | `[`BlockException`](./exceptions.md#blockexception) + +Expected exception that invalidates the block. + +#### - `rlp`: [`Bytes`](./common_types.md#bytes) + +RLP serialized version of the block. + +#### - `rlp_decoded`: [`Optional`](./common_types.md#optional)`[`[`FixtureBlock`](#fixtureblock)`]` + +Decoded block attributes included in the block RLP. + +### `FixtureTransaction` + +#### - `type`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) + +Transaction type. + +#### - `chainId`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) + +Chain ID of the transaction. + +#### - `nonce`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) + +Nonce of the account that sends the transaction + +#### - `gasPrice`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) + +Gas price for the transaction (Transaction types 0 & 1) + +#### - `maxPriorityFeePerGas`: [`HexNumber`](./common_types.md#hexnumber) `(fork: London)` + +Max priority fee per gas to pay (Transaction types 2 & 3) + +#### - `maxFeePerGas`: [`HexNumber`](./common_types.md#hexnumber) `(fork: London)` + +Max base fee per gas to pay (Transaction types 2 & 3) + +#### - `gasLimit`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) + +Gas limit of the transaction + +#### - `to`: [`Address`](./common_types.md#address)`| null` + +Destination address of the transaction, or `null` to create a contract + +#### - `value`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) + +Value of the transaction + +#### - `data`: [`Bytes`](./common_types.md#bytes) + +Data bytes of the transaction + +#### - `accessList`: [`List`](./common_types.md#list)`[`[`Mapping`](./common_types.md#mapping)`[`[`Address`](./common_types.md#address)`,`[`List`](./common_types.md#list)`[`[`Hash`](./common_types.md#hash)`]]]` `(fork: Berlin)` + +Account access lists (Transaction types 1, 2 & 3) + +#### - `maxFeePerBlobGas`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) `(fork: Cancun)` + +Max fee per blob gas to pay (Transaction type 3) + +#### - `blobVersionedHashes`: [`List`](./common_types.md#list)`[`[`Hash`](./common_types.md#hash)`]` `(fork: Cancun)` + +Max fee per blob gas to pay (Transaction type 3) + +#### - `v`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) + +V value of the transaction signature + +#### - `r`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) + +R value of the transaction signature + +#### - `s`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) + +S value of the transaction signature + +#### - `sender`: [`Address`](./common_types.md#address) + +Sender address of the transaction + +#### - `secretKey`: [`Hash`](./common_types.md#hash) + +Private key that must be used to sign the transaction + +### `FixtureWithdrawal` + +#### - `index`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) + +Index of the withdrawal + +#### - `validatorIndex`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) + +Withdrawing validator index + +#### - `address`: [`Address`](./common_types.md#address) + +Address to withdraw to + +#### - `amount`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) + +Amount of the withdrawal diff --git a/docs/consuming_tests/blockchain_test_hive.md b/docs/consuming_tests/blockchain_test_hive.md new file mode 100644 index 0000000000..f3905efdab --- /dev/null +++ b/docs/consuming_tests/blockchain_test_hive.md @@ -0,0 +1,191 @@ +# Blockchain Hive Tests + +The Blockchain Hive Test fixture format tests are included in the fixtures subdirectory `blockchain_tests_hive`, and use Engine API directives instead of the usual BlockchainTest format. + +These are produced by the `StateTest` and `BlockchainTest` test specs. + +## Description + +The Blockchain Hive Test fixture format is used to test block validation and the consensus rules of the Ethereum blockchain, when a block is delivered through the Engine API as a `engine_newPayloadVX` directive. + +It does so by defining a pre-execution state, a series of blocks as `engine_newPayloadVX` directives, and a post-execution state, verifying that, after all the blocks have been processed, appended if valid or rejected if invalid, the result is the expected post-execution state. + +A single JSON fixture file is composed of a JSON object where each key-value pair is a different [`HiveFixture`](#hivefixture) test object, with the key string representing the test name. + +The JSON file path plus the test name are used as the unique test identifier. + +## Consumption + +For each [`HiveFixture`](#hivefixture) test object in the JSON fixture file, perform the following steps: + +1. Start a full node using: + + - [`network`](#-network-fork) to configure the execution fork schedule according to the [`Fork`](./common_types.md#fork) type definition. + - [`pre`](#-pre-alloc) as the starting state allocation of the execution environment for the test and calculate the genesis state root. + - [`genesisBlockHeader`](#-genesisblockheader-fixtureheader) as the genesis block header. + +2. Verify the head of the chain is the genesis block, and the state root matches the one calculated on step 1, otherwise fail the test. + +3. For each [`FixtureEngineNewPayload`](#fixtureenginenewpayload) in [`engineNewPayloads`](#-enginenewpayloads-listfixtureenginenewpayload): + + 1. Deliver the payload using the `engine_newPayloadVX` directive, using: + - [`version`](#-version-number) as the version of the directive. + - [`executionPayload`](#-executionpayload-fixtureexecutionpayload) as the payload. + - [`blob_versioned_hashes`](#-blob_versioned_hashes-optionallisthash-fork-cancun), if present, as the list of hashes of the versioned blobs that are part of the execution payload. + - [`parentBeaconBlockRoot`](#-parentbeaconblockroot-optionalhash-fork-cancun), if present, as the hash of the parent beacon block root. + 2. If [`errorCode`](#-errorcode-optionalnumber) is present: + - Verify the directive returns an error, and the error code matches the one in [`errorCode`](#-errorcode-optionalnumber), otherwise fail the test. + - Proceed to the next payload. + 3. If [`valid`](#-valid-bool) is `false`, verify that the directive returns `status` field of [PayloadStatusV1](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#payloadstatusv1) as `INVALID`, otherwise fail the test. + 4. If [`valid`](#-valid-bool) is `true`, verify that the directive returns `status` field of [PayloadStatusV1](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#payloadstatusv1) as `VALID`, otherwise fail the test. + +## Structures + +### `HiveFixture` + +#### - `network`: [`Fork`](./common_types.md#fork) + +Fork configuration for the test. + +#### - `genesisBlockHeader`: [`FixtureHeader`](./blockchain_test.md#fixtureheader) + +Genesis block header. + +#### - `engineNewPayloads`: [`List`](./common_types.md#list)`[`[`FixtureEngineNewPayload`](#fixtureenginenewpayload)`]` + +List of `engine_newPayloadVX` directives to be processed after the genesis block. + +#### - `engineFcuVersion`: [`Number`](./common_types.md#number) + +Version of the `engine_forkchoiceUpdatedVX` directive to use to set the head of the chain. + +#### - `pre`: [`Alloc`](./common_types.md#alloc-mappingaddressaccount) + +Starting account allocation for the test. State root calculated from this allocation must match the one in the genesis block. + +#### - `post`: [`Alloc`](./common_types.md#alloc-mappingaddressaccount) + +Account allocation for verification after all the blocks have been processed. + +### `FixtureEngineNewPayload` + +#### - `executionPayload`: [`FixtureExecutionPayload`](#fixtureexecutionpayload) + +Execution payload. + +#### - `blob_versioned_hashes`: [`Optional`](./common_types.md#optional)`[`[`List`](./common_types.md#list)`[`[`Hash`](./common_types.md#hash)`]]` `(fork: Cancun)` + +List of hashes of the versioned blobs that are part of the execution payload. +They can mismatch the hashes of the versioned blobs in the execution payload, for negative-testing reasons. + +#### - `parentBeaconBlockRoot`: [`Optional`](./common_types.md#optional)`[`[`Hash`](./common_types.md#hash)`]` `(fork: Cancun)` + +Hash of the parent beacon block root. + +#### - `validationError`: [`TransactionException`](./exceptions.md#transactionexception)` | `[`BlockException`](./exceptions.md#blockexception) + +Validation error expected when executing the payload. + +When the payload is valid, this field is not present, and a `VALID` status is +expected in the `status` field of +[PayloadStatusV1](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#payloadstatusv1). + +If this field is present, the `status` field of +[PayloadStatusV1](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#payloadstatusv1) +is expected to be `INVALID`. + +#### - `version`: [`Number`](./common_types.md#number) + +Version of the `engine_newPayloadVX` directive to use to deliver the payload. + +#### - `errorCode`: [`Optional`](./common_types.md#optional)`[`[`Number`](./common_types.md#number)`]` + +Error code to be returned by the `engine_newPayloadVX` directive. + +### `FixtureExecutionPayload` + +#### - `parentHash`: [`Hash`](./common_types.md#hash) + +Hash of the parent block. + +#### - `feeRecipient`: [`Address`](./common_types.md#address) + +Address of the account that will receive the rewards for building the block. + +#### - `stateRoot`: [`Hash`](./common_types.md#hash) + +Root hash of the state trie. + +#### - `receiptsRoot`: [`Hash`](./common_types.md#hash) + +Root hash of the receipts trie. + +#### - `logsBloom`: [`Bloom`](./common_types.md#bloom) + +Bloom filter composed of the logs of all the transactions in the block. + +#### - `blockNumber`: [`HexNumber`](./common_types.md#hexnumber) + +Number of the block. + +#### - `gasLimit`: [`HexNumber`](./common_types.md#hexnumber) + +Total gas limit of the block. + +#### - `gasUsed`: [`HexNumber`](./common_types.md#hexnumber) + +Total gas used by all the transactions in the block. + +#### - `timestamp`: [`HexNumber`](./common_types.md#hexnumber) + +Timestamp of the block. + +#### - `extraData`: [`Bytes`](./common_types.md#bytes) + +Extra data of the block. + +#### - `prevRandao`: [`Hash`](./common_types.md#hash) + +PrevRandao of the block. + +#### - `blockHash`: [`Hash`](./common_types.md#hash) + +Hash of the block. + +#### - `transactions`: [`List`](./common_types.md#list)`[`[`Bytes`](./common_types.md#bytes)`]` + +List of transactions in the block, in serialized format. + +#### - `withdrawals`: [`List`](./common_types.md#list)`[`[`FixtureWithdrawal`](#fixturewithdrawal)`]` + +List of withdrawals in the block. + +#### - `baseFeePerGas`: [`HexNumber`](./common_types.md#hexnumber) `(fork: London)` + +Base fee per gas of the block. + +#### - `blobGasUsed`: [`HexNumber`](./common_types.md#hexnumber) `(fork: Cancun)` + +Total blob gas used by all the transactions in the block. + +#### - `excessBlobGas`: [`HexNumber`](./common_types.md#hexnumber) `(fork: Cancun)` + +Excess blob gas of the block used to calculate the blob fee per gas for this block. + +### `FixtureWithdrawal` + +#### - `index`: [`HexNumber`](./common_types.md#hexnumber) + +Index of the withdrawal + +#### - `validatorIndex`: [`HexNumber`](./common_types.md#hexnumber) + +Withdrawing validator index + +#### - `address`: [`Address`](./common_types.md#address) + +Address to withdraw to + +#### - `amount`: [`HexNumber`](./common_types.md#hexnumber) + +Amount of the withdrawal diff --git a/docs/consuming_tests/common_types.md b/docs/consuming_tests/common_types.md new file mode 100644 index 0000000000..176175551e --- /dev/null +++ b/docs/consuming_tests/common_types.md @@ -0,0 +1,339 @@ +# Common Types + +## Basic Types + +### `Address` + +[Bytes](#bytes) of a 20-byte fixed length. + +### `Bloom` + +[Bytes](#bytes) of a 256-byte fixed length. + +### `Bytes` + +Hexadecimal representation of binary data of any length encoded as a JSON string, with a "0x" prefix. + +### `EmptyAddress` + +An empty JSON string `""`, used to represent an empty address. E.g. in the `to` field of a transaction when it is a contract creation. + +### `Hash` + +[Bytes](#bytes) of a 32-byte fixed length. + +### `HeaderNonce` + +[Bytes](#bytes) of a 8-byte fixed length. + +### `HexNumber` + +Hexadecimal number with "0x" prefix encoded as a JSON string. + +### `List` + +A JSON array where each element is a specific type, also defined in this document. +E.g. `List[Address]` is a JSON array where each element is an Ethereum address. + +### `Mapping` + +A JSON object where the keys and values are specific types, also defined in this document. +E.g. `Mapping[Address, Account]` is a JSON object where the keys are Ethereum addresses, and the values are Ethereum accounts. + +### `Number` + +Decimal number encoded as a JSON string. + +### `Optional` + +Marks a field as optional, meaning that the field can be missing from the JSON object. + +### `ZeroPaddedHexNumber` + +Hexadecimal number with "0x" prefix encoded as a JSON string, with a single zero used to pad odd number of digits, and zero represented as "0x00". + +## Composite Types + +### `Storage`: [`Mapping`](#mapping)`[`[`Hash`](#hash)`,`[`Hash`](#hash)`]` + +Storage represented as a JSON object, where the keys and values are represented with the [`Hash`](#hash) type. + +### `Account` + +An Ethereum account represented as a JSON object with the following fields: + +#### - `balance`: [`ZeroPaddedHexNumber`](#zeropaddedhexnumber) + +Balance of the account. + +#### - `nonce`: [`ZeroPaddedHexNumber`](#zeropaddedhexnumber) + +Nonce of the account. + +#### - `code`: [`Bytes`](#bytes) + +Code of the account. + +#### - `storage`: [`Storage`](#storage-mappinghashhash) + +Storage of the account. + +### `Alloc`: [`Mapping`](#mapping)`[`[`Address`](#address)`,`[`Account`](#account)`]` + +State allocation represented as a JSON object, where the keys are the addresses of the accounts, and the values are the accounts. + +## Fork + +Fork type is represented as a JSON string that can be set to one of the following values: + +### `"Frontier"` + +- Chain ID: `0x00` + +### `"Homestead"` + +- Chain ID: `0x01` +- Homestead Block: `0x00` + +### `"Byzantium"` + +- Chain ID: `0x01` +- Homestead Block: `0x00` +- EIP150 Block: `0x00` +- EIP155 Block: `0x00` +- EIP158 Block: `0x00` +- DAO Fork Block: `0x00` +- Byzantium Block: `0x00` + +### `"Constantinople"` + +- Chain ID: `0x01` +- Homestead Block: `0x00` +- EIP150 Block: `0x00` +- EIP155 Block: `0x00` +- EIP158 Block: `0x00` +- DAO Fork Block: `0x00` +- Byzantium Block: `0x00` +- Constantinople Block: `0x00` + +### `"ConstantinopleFix"` + +- Chain ID: `0x01` +- Homestead Block: `0x00` +- EIP150 Block: `0x00` +- EIP155 Block: `0x00` +- EIP158 Block: `0x00` +- DAO Fork Block: `0x00` +- Byzantium Block: `0x00` +- Constantinople Block: `0x00` +- Constantinople Fix Block: `0x00` + +### `"Istanbul"` + +- Chain ID: `0x01` +- Homestead Block: `0x00` +- EIP150 Block: `0x00` +- EIP155 Block: `0x00` +- EIP158 Block: `0x00` +- DAO Fork Block: `0x00` +- Byzantium Block: `0x00` +- Constantinople Block: `0x00` +- Constantinople Fix Block: `0x00` +- Istanbul Block: `0x00` + +### `"MuirGlacier"` + +- Chain ID: `0x01` +- Homestead Block: `0x00` +- EIP150 Block: `0x00` +- EIP155 Block: `0x00` +- EIP158 Block: `0x00` +- DAO Fork Block: `0x00` +- Byzantium Block: `0x00` +- Constantinople Block: `0x00` +- Constantinople Fix Block: `0x00` +- Istanbul Block: `0x00` +- Muir Glacier Block: `0x00` + +### `"Berlin"` + +- Chain ID: `0x01` +- Homestead Block: `0x00` +- EIP150 Block: `0x00` +- EIP155 Block: `0x00` +- EIP158 Block: `0x00` +- DAO Fork Block: `0x00` +- Byzantium Block: `0x00` +- Constantinople Block: `0x00` +- Constantinople Fix Block: `0x00` +- Istanbul Block: `0x00` +- Muir Glacier Block: `0x00` +- Berlin Block: `0x00` + +### `"BerlinToLondonAt5"` + +- Chain ID: `0x01` +- Homestead Block: `0x00` +- EIP150 Block: `0x00` +- EIP155 Block: `0x00` +- EIP158 Block: `0x00` +- DAO Fork Block: `0x00` +- Byzantium Block: `0x00` +- Constantinople Block: `0x00` +- Constantinople Fix Block: `0x00` +- Istanbul Block: `0x00` +- Muir Glacier Block: `0x00` +- Berlin Block: `0x00` +- London Block: `0x05` + +### `"London"` + +- Chain ID: `0x01` +- Homestead Block: `0x00` +- EIP150 Block: `0x00` +- EIP155 Block: `0x00` +- EIP158 Block: `0x00` +- DAO Fork Block: `0x00` +- Byzantium Block: `0x00` +- Constantinople Block: `0x00` +- Constantinople Fix Block: `0x00` +- Istanbul Block: `0x00` +- Muir Glacier Block: `0x00` +- Berlin Block: `0x00` +- London Block: `0x00` + +### `"ArrowGlacier"` + +- Chain ID: `0x01` +- Homestead Block: `0x00` +- EIP150 Block: `0x00` +- EIP155 Block: `0x00` +- EIP158 Block: `0x00` +- DAO Fork Block: `0x00` +- Byzantium Block: `0x00` +- Constantinople Block: `0x00` +- Constantinople Fix Block: `0x00` +- Istanbul Block: `0x00` +- Muir Glacier Block: `0x00` +- Berlin Block: `0x00` +- London Block: `0x00` +- Arrow Glacier Block: `0x00` + +### `"GrayGlacier"` + +- Chain ID: `0x01` +- Homestead Block: `0x00` +- EIP150 Block: `0x00` +- EIP155 Block: `0x00` +- EIP158 Block: `0x00` +- DAO Fork Block: `0x00` +- Byzantium Block: `0x00` +- Constantinople Block: `0x00` +- Constantinople Fix Block: `0x00` +- Istanbul Block: `0x00` +- Muir Glacier Block: `0x00` +- Berlin Block: `0x00` +- London Block: `0x00` +- Arrow Glacier Block: `0x00` +- Gray Glacier Block: `0x00` + +### `"Merge"` + +- Chain ID: `0x01` +- Homestead Block: `0x00` +- EIP150 Block: `0x00` +- EIP155 Block: `0x00` +- EIP158 Block: `0x00` +- DAO Fork Block: `0x00` +- Byzantium Block: `0x00` +- Constantinople Block: `0x00` +- Constantinople Fix Block: `0x00` +- Istanbul Block: `0x00` +- Muir Glacier Block: `0x00` +- Berlin Block: `0x00` +- London Block: `0x00` +- Arrow Glacier Block: `0x00` +- Gray Glacier Block: `0x00` +- Terminal Total Difficulty: `0x00` + +### `"MergeToShanghaiAtTime15k"` + +- Chain ID: `0x01` +- Homestead Block: `0x00` +- EIP150 Block: `0x00` +- EIP155 Block: `0x00` +- EIP158 Block: `0x00` +- DAO Fork Block: `0x00` +- Byzantium Block: `0x00` +- Constantinople Block: `0x00` +- Constantinople Fix Block: `0x00` +- Istanbul Block: `0x00` +- Muir Glacier Block: `0x00` +- Berlin Block: `0x00` +- London Block: `0x00` +- Arrow Glacier Block: `0x00` +- Gray Glacier Block: `0x00` +- Terminal Total Difficulty: `0x00` +- Shanghai Time: `0x3a98` + +### `"Shanghai"` + +- Chain ID: `0x01` +- Homestead Block: `0x00` +- EIP150 Block: `0x00` +- EIP155 Block: `0x00` +- EIP158 Block: `0x00` +- DAO Fork Block: `0x00` +- Byzantium Block: `0x00` +- Constantinople Block: `0x00` +- Constantinople Fix Block: `0x00` +- Istanbul Block: `0x00` +- Muir Glacier Block: `0x00` +- Berlin Block: `0x00` +- London Block: `0x00` +- Arrow Glacier Block: `0x00` +- Gray Glacier Block: `0x00` +- Terminal Total Difficulty: `0x00` +- Shanghai Time: `0x00` + +### `"ShanghaiToCancunAtTime15k"` + +- Chain ID: `0x01` +- Homestead Block: `0x00` +- EIP150 Block: `0x00` +- EIP155 Block: `0x00` +- EIP158 Block: `0x00` +- DAO Fork Block: `0x00` +- Byzantium Block: `0x00` +- Constantinople Block: `0x00` +- Constantinople Fix Block: `0x00` +- Istanbul Block: `0x00` +- Muir Glacier Block: `0x00` +- Berlin Block: `0x00` +- London Block: `0x00` +- Arrow Glacier Block: `0x00` +- Gray Glacier Block: `0x00` +- Terminal Total Difficulty: `0x00` +- Shanghai Time: `0x0` +- Cancun Time: `0x3a98` + +### `"Cancun"` + +- Chain ID: `0x01` +- Homestead Block: `0x00` +- EIP150 Block: `0x00` +- EIP155 Block: `0x00` +- EIP158 Block: `0x00` +- DAO Fork Block: `0x00` +- Byzantium Block: `0x00` +- Constantinople Block: `0x00` +- Constantinople Fix Block: `0x00` +- Istanbul Block: `0x00` +- Muir Glacier Block: `0x00` +- Berlin Block: `0x00` +- London Block: `0x00` +- Arrow Glacier Block: `0x00` +- Gray Glacier Block: `0x00` +- Terminal Total Difficulty: `0x00` +- Shanghai Time: `0x00` +- Cancun Time: `0x00` diff --git a/docs/consuming_tests/exceptions.md b/docs/consuming_tests/exceptions.md new file mode 100644 index 0000000000..76dcec5672 --- /dev/null +++ b/docs/consuming_tests/exceptions.md @@ -0,0 +1,22 @@ +# Exceptions + +Exception types are represented as a JSON string in the test fixtures. + +The exception converted into a string is composed of the exception type name, +followed by a period, followed by the specific exception name. + +For example, the exception `INSUFFICIENT_ACCOUNT_FUNDS` of type +`TransactionException` is represented as +`"TransactionException.INSUFFICIENT_ACCOUNT_FUNDS"`. + +The JSON string can contain multiple exception types, separated by the `|` +character, denoting that the transaction or block can throw either one of +the exceptions. + +## `TransactionException` + +::: ethereum_test_tools.TransactionException + +## `BlockException` + +::: ethereum_test_tools.BlockException diff --git a/docs/consuming_tests/index.md b/docs/consuming_tests/index.md new file mode 100644 index 0000000000..e511e54f78 --- /dev/null +++ b/docs/consuming_tests/index.md @@ -0,0 +1,65 @@ +# Consuming Tests (Fixtures) Generated by execution-spec-tests + +@ethereum/execution-spec-tests generates JSON test fixtures in different formats that can be consumed by execution clients either directly or via Hive: + +| Format | Consumed by the client | Location in `.tar.gz` release | +| --- | --- | --- | +| [State Tests](./state_test.md) | directly via a `statetest`-like command
(e.g., [go-ethereum/cmd/evm/staterunner.go](https://github.com/ethereum/go-ethereum/blob/509a64ffb9405942396276ae111d06f9bded9221/cmd/evm/staterunner.go#L35)) | `./fixtures/state_tests/` | +| [Blockchain Tests](./blockchain_test.md) | directly via a `blocktest`-like command
(e.g., [go-ethereum/cmd/evm/blockrunner.go](https://github.com/ethereum/go-ethereum/blob/509a64ffb9405942396276ae111d06f9bded9221/cmd/evm/blockrunner.go#L39)) | `./fixtures/blockchain_tests/` | +| [Blockchain Hive Tests](./blockchain_test_hive.md) | in the [Hive `pyspec` simulator](https://github.com/ethereum/hive/tree/master/simulators/ethereum/pyspec#readme) via the Engine API and other RPC endpoints | `./fixtures/blockchain_tests_hive/` | + +Here's a top-level comparison of the different methods of consuming tests: + +| Consumed via | Scope | Pros | Cons | +| --- | --- | --- | --- | +| `statetest` or blocktest-like command | Module test | - Fast feedback loop
- Less complex | - Smaller coverage scope
- Requires a dedicated interface to the client EVM to consume the JSON fixtures and execute tests | +| `hive --sim ethereum/pyspec` | System test / Integration test | - Wider Coverage Scope
- Tests more of the client stack | - Slower feedback loop
- Harder to debug
- Post-Merge forks only (requires the Engine API) | + +!!! note "Running `blocktest`, `statetest`, directly within the execution-spec-tests framework" + + It's possible to execute `evm blocktest` directly within the execution-spec-tests framework. This is intended to verify fixture generation, see [Debugging `t8n` Tools](../getting_started/debugging_t8n_tools.md). + +!!! note "Generating test fixtures using a `t8n` tool via `fill` is not considered to be the actual test" + + The `fill` command uses `t8n` tools to generate fixtures. Whilst this will provide basic sanity checking of EVM behavior and a sub-set of post conditions are typically checked within test cases, it is not considered the actual test. The actual test is the execution of the fixture against the EVM which will check the entire post allocation and typically use different code paths than `t8n` commands. + +## Release Formats + +The @ethereum/execution-spec-tests repository provides [releases](https://github.com/ethereum/execution-spec-tests/releases) of fixtures in various formats (as of 2023-10-16): + +| Release Artifact | Consumer | Fork/feature scope | +| ------------------------------ | -------- | ------------------ | +| `fixtures.tar.gz` | Clients | All tests until the last stable fork ("must pass") | +| `fixtures_develop.tar.gz` | Clients | All tests until the last development fork | + +## Obtaining the Most Recent Release Artifacts + +Artifacts can be downloaded directly from [the release page](https://github.com/ethereum/execution-spec-tests/releases). The following script demonstrates how the most recent release version of a specific artifact can be downloaded using the Github API: + +```bash +#!/bin/bash + +# requires jq +# sudo apt install jq + +# The following two artifacts are intended for consumption by clients: +# - fixtures.tar.gz: Generated up to the last deployed fork. +# - fixtures_develop.tar.gz: Generated up to and including the latest dev fork. +# As of Oct 2023, dev is Cancun, deployed is Shanghai. + +ARTIFACT="fixtures_develop.tar.gz" + +OWNER="ethereum" +REPO="execution-spec-tests" + +DOWNLOAD_URL=$(curl -s https://api.github.com/repos/$OWNER/$REPO/releases/latest \ + | jq -r '.assets[] | select(.name=="'$ARTIFACT'").browser_download_url') + +# Sanity check for the download URL: contains a version tag prefixed with "v" +if [[ "$DOWNLOAD_URL" =~ v[0-9]+\.[0-9]+\.[0-9]+ ]]; then + curl -LO $DOWNLOAD_URL +else + echo "Error: URL does not contain a valid version tag (URL: ${DOWNLOAD_URL})." + exit 1 +fi +``` diff --git a/docs/consuming_tests/state_test.md b/docs/consuming_tests/state_test.md new file mode 100644 index 0000000000..2505c344d2 --- /dev/null +++ b/docs/consuming_tests/state_test.md @@ -0,0 +1,186 @@ +# State Tests + +The State Test fixture format tests are included in the fixtures subdirectory `state_tests`. + +These are produced by the `StateTest` and `StateTestOnly` test specs. + +## Description + +The state test fixture format is used to test the state transition function of the Ethereum Virtual Machine (EVM). + +It does so by defining a transaction, a pre-execution state, and a post-execution state, and verifying that the transaction execution results in the expected post-execution state. + +A single JSON fixture file is composed of a JSON object where each key-value pair is a different [`Fixture`](#fixture) test object, with the key string representing the test name. + +The JSON file path plus the test name are used as the unique test identifier. + +As opposed to other fixture formats, the state test fixture format could contain multiple test vectors per test object, each represented by an element in the mapping of lists of the `post` field. + +However tests generated by the `execution-spec-tests` repository do **not** use this feature, as every single test object contains only a single test vector. + +## Consumption + +For each [`Fixture`](#-fixture) test object in the JSON fixture file, perform the following steps: + +1. Use [`pre`](#-pre-alloc) as the starting state allocation of the execution environment for the test. +2. Use [`env`](#-env-fixtureenvironment) to configure the current execution environment. +3. For each [`Fork`](./common_types.md#fork) key of [`post`](#-post-mappingforklist-fixtureforkpost) in the test, and for each of the elements of the list of [`FixtureForkPost`](#fixtureforkpost) values: + + 1. Configure the execution fork schedule according to the current [`Fork`](./common_types.md#fork) key. + 2. Using the [`indexes`](#-indexes-fixtureforkpostindexes) values, and the [`transaction`](#-transaction-fixturetransaction) object, decode the transaction to be executed. + 3. If the serialized version of the decoded transaction does not match [`txbytes`](#-txbytes-bytes), fail the test. + 4. Attempt to apply the transaction using the current execution environment: + + 1. If the transaction could not be applied to the current execution context: + - If [`expectException`](#-expectexception-str) is empty, fail the test. + - If [`expectException`](#-expectexception-str) is not empty, revert the state to the pre-state. + 2. If the transaction could be applied to the current execution context: + - If [`expectException`](#-expectexception-str) is not empty, fail the test. + + 5. Compare the resulting post-state root with the expected post-state root contained in the [`hash`](#-hash-hash) field of the current [`FixtureForkPost`](#fixtureforkpost), and fail the test if they do not match. + 6. Compare the resulting logs hash with the expected logs contained in the [`logs`](#-logs-hash) field of the current [`FixtureForkPost`](#fixtureforkpost), and fail the test if they do not match. + +## Structures + +### `Fixture` + +#### - `env`: [`FixtureEnvironment`](#fixtureenvironment) + +Execution environment description for the test. + +#### - `pre`: [`Alloc`](./common_types.md#alloc-mappingaddressaccount) + +Starting account allocation for the test. + +#### - `transaction`: [`FixtureTransaction`](#fixturetransaction) + +Transaction to be executed. + +#### - `post`: [`Mapping`](./common_types.md#mapping)`(`[`Fork`](./common_types.md#fork)`,`[`List`](./common_types.md#list)`[` [`FixtureForkPost`](#fixtureforkpost) `])` + +Mapping of lists of post for verification per fork, where each element represents a single possible outcome of the transaction execution after being applied to the `pre`. + +### `FixtureEnvironment` + +#### - `currentCoinbase`: [`Address`](./common_types.md#address) + +The address of the account that will receive the rewards for building the block. + +#### - `currentGasLimit`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) + +Total gas limit of the block where the transaction is executed. + +#### - `currentNumber`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) + +Number of the block where the transaction is executed. + +#### - `currentDifficulty`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) + +Difficulty of the block where the transaction is executed. + +#### - `currentTimestamp`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) + +Timestamp of the block where the transaction is executed. + +#### - `currentBaseFee`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) `(fork: London)` + +Base fee of the block where the transaction is executed. + +#### - `currentRandom`: [`Hash`](./common_types.md#hash) `(fork: Paris)` + +Randao value of the block where the transaction is executed. + +#### - `currentExcessBlobGas`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) `(fork: Cancun)` + +Excess blob gas of the block where the transaction is executed. + +### `FixtureTransaction` + +#### - `nonce`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) + +Nonce of the account that sends the transaction + +#### - `gasPrice`: [`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber) + +Gas price for the transaction (Transaction types 0 & 1) + +#### - `maxPriorityFeePerGas`: [`HexNumber`](./common_types.md#hexnumber) + +Max priority fee per gas to pay (Transaction types 2 & 3) + +#### - `maxFeePerGas`: [`HexNumber`](./common_types.md#hexnumber) + +Max base fee per gas to pay (Transaction types 2 & 3) + +#### - `gasLimit`: [`List`](./common_types.md#list)`[`[`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber)`]` + +List of gas limits used on each indexed test combination + +#### - `to`: [`Address`](./common_types.md#address)` | `[`EmptyAddress`](./common_types.md#emptyaddress) + +Destination address of the transaction, or an empty string to create a contract + +#### - `value`: [`List`](./common_types.md#list)`[`[`ZeroPaddedHexNumber`](./common_types.md#zeropaddedhexnumber)`]` + +List of values used on each indexed test combination + +#### - `data`: [`List`](./common_types.md#list)`[`[`Bytes`](./common_types.md#bytes)`]` + +List of data bytes used on each indexed test combination + +#### - `accessLists`: [`List`](./common_types.md#list)`[`[`List`](./common_types.md#list)`[`[`Mapping`](./common_types.md#mapping)`[`[`Address`](./common_types.md#address)`,`[`List`](./common_types.md#list)`[`[`Hash`](./common_types.md#hash)`]]]]` `(fork: Berlin)` + +List of account access lists used on each indexed test combination (Transaction types 1, 2 & 3) + +#### - `maxFeePerBlobGas`: [`HexNumber`](./common_types.md#hexnumber) `(fork: Cancun)` + +Max fee per blob gas to pay (Transaction type 3) + +#### - `blobVersionedHashes`: [`List`](./common_types.md#list)`[`[`Hash`](./common_types.md#hash)`]` `(fork: Cancun)` + +List of blob versioned hashes the transaction includes (Transaction type 3) + +#### - `sender`: [`Address`](./common_types.md#address) + +Sender address of the transaction + +#### - `secretKey`: [`Hash`](./common_types.md#hash) + +Private key that must be used to sign the transaction + +### `FixtureForkPost` + +#### - `indexes`: [`FixtureForkPostIndexes`](#fixtureforkpostindexes) + +Transaction field indexes that must be used to obtain the transaction to be executed + +#### - `txbytes`: [`Bytes`](./common_types.md#bytes) + +Serialized bytes version of the [`FixtureTransaction`](#fixturetransaction) that was executed to produce this post-state + +#### - `hash`: [`Hash`](./common_types.md#hash) + +Expected state root value that results of applying the transaction to the pre-state + +#### - `logs`: [`Hash`](./common_types.md#hash) + +Hash of the RLP representation of the state logs result of applying the transaction to the pre-state +(TODO: double-check this.) + +#### - `expectException`: [`TransactionException`](./exceptions.md#transactionexception) + +Exception that is expected to be thrown by the transaction execution (Field is missing if the transaction is expected to succeed) + +### `FixtureForkPostIndexes` + +#### - `data`: `int` + +Index of the data field in the transaction + +#### - `gas`: `int` + +Index of the gas limit field in the transaction + +#### - `value`: `int` + +Index of the value field in the transaction diff --git a/docs/getting_started/debugging_t8n_tools.md b/docs/getting_started/debugging_t8n_tools.md index 888308a79c..718c54c820 100644 --- a/docs/getting_started/debugging_t8n_tools.md +++ b/docs/getting_started/debugging_t8n_tools.md @@ -14,64 +14,45 @@ In particular, a script `t8n.sh` is generated for each call to the `t8n` command For example, running: ```console -fill tests/berlin/eip2930_access_list/ --fork Berlin \ +fill tests/berlin/eip2930_access_list/ --fork Berlin -m blockchain_test \ --evm-dump-dir=/tmp/evm-dump ``` will produce the directory structure: ```text -๐Ÿ“ /tmp/evm-dump/ -โ””โ”€โ”€ ๐Ÿ“ berlin__eip2930_access_list__test_acl__test_access_list - โ”œโ”€โ”€ ๐Ÿ“ fork_Berlin - โ”‚ โ”œโ”€โ”€ ๐Ÿ“ 0 - โ”‚ โ”‚ โ”œโ”€โ”€ ๐Ÿ“„ args.py - โ”‚ โ”‚ โ”œโ”€โ”€ ๐Ÿ“ input - โ”‚ โ”‚ โ”‚ โ”œโ”€โ”€ ๐Ÿ“„ alloc.json - โ”‚ โ”‚ โ”‚ โ”œโ”€โ”€ ๐Ÿ“„ env.json - โ”‚ โ”‚ โ”‚ โ””โ”€โ”€ ๐Ÿ“„ txs.json - โ”‚ โ”‚ โ”œโ”€โ”€ ๐Ÿ“ output - โ”‚ โ”‚ โ”‚ โ”œโ”€โ”€ ๐Ÿ“„ alloc.json - โ”‚ โ”‚ โ”‚ โ”œโ”€โ”€ ๐Ÿ“„ result.json - โ”‚ โ”‚ โ”‚ โ””โ”€โ”€ ๐Ÿ“„ txs.rlp - โ”‚ โ”‚ โ”œโ”€โ”€ ๐Ÿ“„ returncode.txt - โ”‚ โ”‚ โ”œโ”€โ”€ ๐Ÿ“„ stderr.txt - โ”‚ โ”‚ โ”œโ”€โ”€ ๐Ÿ“„ stdin.txt - โ”‚ โ”‚ โ”œโ”€โ”€ ๐Ÿ“„ stdout.txt - โ”‚ โ”‚ โ””โ”€โ”€ ๐Ÿ“„ t8n.sh - โ”‚ โ””โ”€โ”€ ๐Ÿ“ 1 - โ”‚ โ”œโ”€โ”€ ๐Ÿ“„ args.py - โ”‚ โ”œโ”€โ”€ ๐Ÿ“ input - โ”‚ โ”‚ โ”œโ”€โ”€ ๐Ÿ“„ alloc.json - โ”‚ โ”‚ โ”œโ”€โ”€ ๐Ÿ“„ env.json - โ”‚ โ”‚ โ””โ”€โ”€ ๐Ÿ“„ txs.json - โ”‚ โ”œโ”€โ”€ ๐Ÿ“ output - โ”‚ โ”‚ โ”œโ”€โ”€ ๐Ÿ“„ alloc.json - โ”‚ โ”‚ โ”œโ”€โ”€ ๐Ÿ“„ result.json - โ”‚ โ”‚ โ””โ”€โ”€ ๐Ÿ“„ txs.rlp - โ”‚ โ”œโ”€โ”€ ๐Ÿ“„ returncode.txt - โ”‚ โ”œโ”€โ”€ ๐Ÿ“„ stderr.txt - โ”‚ โ”œโ”€โ”€ ๐Ÿ“„ stdin.txt - โ”‚ โ”œโ”€โ”€ ๐Ÿ“„ stdout.txt - โ”‚ โ””โ”€โ”€ ๐Ÿ“„ t8n.sh - โ””โ”€โ”€ ๐Ÿ“„ access_list.json +๐Ÿ“‚ /tmp/evm-dump +โ””โ”€โ”€ ๐Ÿ“‚ berlin__eip2930_access_list__test_acl__test_access_list + โ””โ”€โ”€ ๐Ÿ“‚ fork_Berlin_blockchain_test + โ””โ”€โ”€ ๐Ÿ“‚ 0 + ย ย  โ”œโ”€โ”€ ๐Ÿ“„ args.py + ย ย  โ”œโ”€โ”€ ๐Ÿ“‚ input + ย ย  โ”‚ย ย  โ”œโ”€โ”€ ๐Ÿ“„ alloc.json + ย ย  โ”‚ย ย  โ”œโ”€โ”€ ๐Ÿ“„ env.json + ย ย  โ”‚ย ย  โ””โ”€โ”€ ๐Ÿ“„ txs.json + ย ย  โ”œโ”€โ”€ ๐Ÿ“‚ output + ย ย  โ”‚ย ย  โ”œโ”€โ”€ ๐Ÿ“„ alloc.json + ย ย  โ”‚ย ย  โ”œโ”€โ”€ ๐Ÿ“„ result.json + ย ย  โ”‚ย ย  โ””โ”€โ”€ ๐Ÿ“„ txs.rlp + ย ย  โ”œโ”€โ”€ ๐Ÿ“„ returncode.txt + ย ย  โ”œโ”€โ”€ ๐Ÿ“„ stderr.txt + ย ย  โ”œโ”€โ”€ ๐Ÿ“„ stdin.txt + ย ย  โ”œโ”€โ”€ ๐Ÿ“„ stdout.txt + ย ย  โ””โ”€โ”€ ๐Ÿ“„ t8n.sh ``` -where the directories `0` and `1` correspond to the different calls made to the `t8n` tool executed during the test: - -- `0` corresponds to the call used to calculate the state root of the test's initial alloc (which is why it has an empty transaction list). -- `1` corresponds to the call used to execute the first transaction or block from the test. +where the directory `0` is the starting index of the different calls made to the `t8n` tool executed during the test, and since the test only contains one block, there is only one directory present. -Note, there may be more directories present `2`, `3`, `4`,... if the test executes more transactions/blocks. +Note, there may be more directories present `1`, `2`, `3`,... if the test executes more blocks. Each directory contains files containing information corresponding to the call, for example, the `args.py` file contains the arguments passed to the `t8n` command and the `output/alloc.json` file contains the output of the `t8n` command's `--output-alloc` flag. ### The `t8n.sh` Script -The `t8n.sh` script written to the debug directory can be used to reproduce a specific call made to the `t8n` command during the test session. For example, if a Besu `t8n-server` has been started on port `3001`, the request made by the test for first transaction can be reproduced as: +The `t8n.sh` script written to the debug directory can be used to reproduce a specific call made to the `t8n` command during the test session. For example, if a Besu `t8n-server` has been started on port `3001`, the request made by the test for first block can be reproduced as: ```console -/tmp/besu/test_access_list_fork_Berlin/1/t8n.sh 3001 +/tmp/besu/test_access_list_fork_Berlin/0/t8n.sh 3001 ``` which writes the response the from the `t8n-server` to the console output: @@ -110,7 +91,7 @@ The `--verify-fixtures` flag can be used to run go-ethereum's `evm blocktest` co For example, running: ```console -fill tests/berlin/eip2930_access_list/ --fork Berlin \ +fill tests/berlin/eip2930_access_list/ --fork Berlin -m blockchain_test \ --evm-dump-dir==/tmp/evm-dump \ --evm-bin=../evmone/build/bin/evmone-t8n \ --verify-fixtures-bin=../go-ethereum/build/bin/evm \ @@ -122,8 +103,8 @@ will additionally run the `evm blocktest` command on every JSON fixture file and ```text ๐Ÿ“‚ /tmp/evm-dump โ””โ”€โ”€ ๐Ÿ“‚ berlin__eip2930_access_list__test_acl__test_access_list - โ”œโ”€โ”€ ๐Ÿ“„ access_list.json - โ”œโ”€โ”€ ๐Ÿ“‚ fork_Berlin + โ”œโ”€โ”€ ๐Ÿ“„ fixtures.json + โ”œโ”€โ”€ ๐Ÿ“‚ fork_Berlin_blockchain_test โ”‚ย ย  โ”œโ”€โ”€ ๐Ÿ“‚ 0 โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ ๐Ÿ“„ args.py โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ ๐Ÿ“‚ input @@ -171,13 +152,22 @@ where the `verify_fixtures.sh` script can be used to reproduce the `evm blocktes --evm-dump-dir=/tmp/evm-dump ``` +5. Additionally use `--single-fixture-per-file` to improve the granularity of the reporting of the `evm blocktest` command by writing the fixture generated by each parametrized test case to its own file. + + ```console + fill --evm-bin=../evmone/build/bin/evmone-t8n \ + --verify-fixtures-bin=../go-ethereum/build/bin/evm \ + --evm-dump-dir=/tmp/evm-dump \ + --single-fixture-per-file + ``` + !!! note "Execution scope of `evm blocktest`" - Note, that `evm blocktest` is not executed per parametrized test case, but rather per test function. This is because each fixture JSON file contains all the parametrized test cases for one test function. + Note, by default, that `evm blocktest` is not executed per parametrized test case, but rather per test function. This is because each fixture JSON file contains fixtures for all the parametrized test cases for one test function. This means only one error will be reported, even if multiple fixtures fail within one fixture file. - Additionally, it is executed once for all test functions in one module only after all the test cases in the module have been executed and only report the first fixtures from the first failing test function[^1]. + Additionally, it is only executed after all the test cases in the module have been executed[^1] and will only report the first failing test fixture in all files, even if there are multiple failing fixture files. - This means that the feedback is not as granular as for test case execution. To improve granularity, and get feedback per fork, for example, the `--fork` flag can be used to only execute test cases for one particular fork. + This means, by default, that the feedback is not as granular as for test case execution. To improve granularity, and get feedback per parametrized test case use `--single-fixture-per-file`. [^1]: This limitation is required to enable support of the [`pytest-xdist` plugin](https://github.com/pytest-dev/pytest-xdist) for concurrent test execution across multiple CPUs. To achieve this we use the we apply the `--dist loadscope` xdist flag in our `pytest.ini`. diff --git a/docs/getting_started/executing_tests_command_line.md b/docs/getting_started/executing_tests_command_line.md index 4627cd08f7..1293659c43 100644 --- a/docs/getting_started/executing_tests_command_line.md +++ b/docs/getting_started/executing_tests_command_line.md @@ -71,7 +71,7 @@ fill tests/shanghai/eip3651_warm_coinbase/test_warm_coinbase.py::test_warm_coinb or, for a test function and specific parameter combination: ```console -fill tests/shanghai/eip3651_warm_coinbase/test_warm_coinbase.py::test_warm_coinbase_gas_usage[fork=Merge-DELEGATECALL] +fill tests/shanghai/eip3651_warm_coinbase/test_warm_coinbase.py::test_warm_coinbase_gas_usage[fork_Paris-DELEGATECALL] ``` ## Execution for Development Forks @@ -111,12 +111,13 @@ fill --test-help Output: -```console +```text usage: fill [-h] [--evm-bin EVM_BIN] [--traces] [--verify-fixtures] [--verify-fixtures-bin VERIFY_FIXTURES_BIN] [--solc-bin SOLC_BIN] [--filler-path FILLER_PATH] [--output OUTPUT] [--flat-output] - [--enable-hive] [--evm-dump-dir EVM_DUMP_DIR] [--forks] [--fork FORK] - [--from FROM] [--until UNTIL] [--test-help] + [--single-fixture-per-file] [--enable-hive] + [--evm-dump-dir EVM_DUMP_DIR] [--forks] [--fork FORK] [--from FROM] + [--until UNTIL] [--test-help] options: -h, --help show this help message and exit @@ -127,11 +128,11 @@ Arguments defining evm executable behavior: --traces Collect traces of the execution information from the transition tool. --verify-fixtures Verify generated fixture JSON files using geth's evm - blocktest command. By default, the same evm binary as for - the t8n tool is used. A different (geth) evm binary may - be specified via --verify-fixtures-bin, this must be - specified if filling with a non-geth t8n tool that does - not support blocktest. + blocktest command. By default, the same evm binary as + for the t8n tool is used. A different (geth) evm binary + may be specified via --verify-fixtures-bin, this must + be specified if filling with a non-geth t8n tool that + does not support blocktest. --verify-fixtures-bin VERIFY_FIXTURES_BIN Path to an evm executable that provides the `blocktest` command. Default: The first (geth) 'evm' entry in PATH. @@ -145,9 +146,12 @@ Arguments defining filler location and output: Path to filler directives --output OUTPUT Directory to store the generated test fixtures. Can be deleted. - --flat-output Output each test case in the directory without the folder - structure. - --enable-hive Output test fixtures with the hive-specific properties. + --flat-output Output each test case in the directory without the + folder structure. + --single-fixture-per-file + Don't group fixtures in JSON files by test function; + write each fixture to its own file. This can be used to + increase the granularity of --verify-fixtures. Arguments defining debug behavior: --evm-dump-dir EVM_DUMP_DIR, --t8n-dump-dir EVM_DUMP_DIR @@ -164,4 +168,5 @@ Arguments related to running execution-spec-tests: and exit. Exit: After displaying help. + ``` diff --git a/docs/getting_started/quick_start.md b/docs/getting_started/quick_start.md index b34bf19dee..037908c312 100644 --- a/docs/getting_started/quick_start.md +++ b/docs/getting_started/quick_start.md @@ -7,7 +7,7 @@ The following requires a Python 3.10, 3.11 or 3.12 installation. -1. Ensure `go-ethereum`'s `evm` tool and `solc` ([0.8.20](https://github.com/ethereum/solidity/releases/tag/v0.8.20) or [0.8.21](https://github.com/ethereum/solidity/releases/tag/v0.8.21)) are in your path. Either build the required versions, or alternatively: +1. Ensure `go-ethereum`'s `evm` tool and `solc` ([0.8.20](https://github.com/ethereum/solidity/releases/tag/v0.8.20), [0.8.21](https://github.com/ethereum/solidity/releases/tag/v0.8.21), [0.8.22](https://github.com/ethereum/solidity/releases/tag/v0.8.22), [0.8.23](https://github.com/ethereum/solidity/releases/tag/v0.8.23) supported) are in your path. Either build the required versions, or alternatively: === "Ubuntu" @@ -84,7 +84,7 @@ The following requires a Python 3.10, 3.11 or 3.12 installation. 2. The corresponding fixture file has been generated: ```console - head fixtures/berlin/eip2930_access_list/acl/access_list.json + head fixtures/blockchain_tests/berlin/eip2930_access_list/acl/access_list.json ``` ## Next Steps diff --git a/docs/getting_started/repository_overview.md b/docs/getting_started/repository_overview.md index de26dd190d..ac571a7364 100644 --- a/docs/getting_started/repository_overview.md +++ b/docs/getting_started/repository_overview.md @@ -10,8 +10,9 @@ The most relevant folders and files in the repo are: โ”‚ โ”œโ”€โ”€ ๐Ÿ“ vm/ โ”‚ โ””โ”€โ”€ ๐Ÿ“ ... โ”œโ”€โ•ด๐Ÿ“ fixtures/ # default fixture output dir -โ”‚ โ”œโ”€โ”€ ๐Ÿ“ eips/ -โ”‚ โ”œโ”€โ”€ ๐Ÿ“ vm/ +โ”‚ โ”œโ”€โ”€ ๐Ÿ“ blockchain_tests/ +โ”‚ โ”œโ”€โ”€ ๐Ÿ“ blockchain_tests_hive/ +โ”‚ โ”œโ”€โ”€ ๐Ÿ“ state_tests/ โ”‚ โ””โ”€โ”€ ๐Ÿ“ ... โ”œโ”€โ•ด๐Ÿ“ src/ # library & framework packages โ”‚ โ”œโ”€โ”€ ๐Ÿ“ ethereum_test_fork/ diff --git a/docs/index.md b/docs/index.md index 515f051eaf..f92361d3f7 100644 --- a/docs/index.md +++ b/docs/index.md @@ -18,7 +18,7 @@ flowchart LR style G stroke:#F9A825,stroke-width:2px style H stroke:#F9A825,stroke-width:2px - subgraph ethereum/go-ethereum, ... + subgraph ethereum/go-ethereum C[evm t8n\nexternal executable] end @@ -56,6 +56,8 @@ The generated test fixtures can be used: 1. Directly by client teams' test frameworks, and, 2. In the integration tests executed in the @ethereum/hive framework. +More information on how to use and download the [released test fixtures](https://github.com/ethereum/execution-spec-tests/releases) can be found [here](consuming_tests/index.md). + ## Transition Tool Support The following transition tools are supported by the framework: diff --git a/docs/navigation.md b/docs/navigation.md index 7c85aa99da..3a2d7baae0 100644 --- a/docs/navigation.md +++ b/docs/navigation.md @@ -15,8 +15,15 @@ * [Writing a New Test](writing_tests/writing_a_new_test.md) * [Referencing an EIP Spec Version](writing_tests/reference_specification.md) * [Verifying Changes Locally](writing_tests/verifying_changes.md) + * [Exception Tests](writing_tests/exception_tests.md) * Tutorials * [State Transition Tests](tutorials/state_transition.md) + * [Consuming Tests](consuming_tests/index.md) + * [State Tests](consuming_tests/state_test.md) + * [Blockchain Tests](consuming_tests/blockchain_test.md) + * [Blockchain Hive Tests](consuming_tests/blockchain_test_hive.md) + * [Common Types](consuming_tests/common_types.md) + * [Exceptions](consuming_tests/exceptions.md) * [Getting Help](getting_help/index.md) * [Developer Doc](dev/index.md) * [Documentation](dev/docs.md) diff --git a/docs/tutorials/state_transition.md b/docs/tutorials/state_transition.md index 001745016f..97f3e7af40 100644 --- a/docs/tutorials/state_transition.md +++ b/docs/tutorials/state_transition.md @@ -1,6 +1,6 @@ # State Transition Tests -This tutorial teaches you to create a state transition execution specification test. These tests verify that a blockchain, starting from a defined pre-state, will reach a specified post-state after executing a set of specific transactions. +This tutorial teaches you to create a state transition execution specification test. These tests verify that a starting pre-state will reach a specified post-state after executing a single transaction. ## Pre-requisites @@ -13,7 +13,7 @@ Before proceeding with this tutorial, it is assumed that you have prior knowledg ## Example Tests -The most effective method of learning how to write tests is to study a couple of straightforward examples. In this tutorial we will go over the [Yul](https://github.com/ethereum/execution-spec-tests/blob/main/tests/example/test_yul_example.py#L17) state test. +The most effective method of learning how to write tests is to study a couple of straightforward examples. In this tutorial we will go over the [Yul](https://github.com/ethereum/execution-spec-tests/blob/main/tests/homestead/yul/test_yul_example.py#L19) state test. ### Yul Test @@ -62,7 +62,7 @@ In this case, the decorator is a custom [pytest fixture](https://docs.pytest.org and to execute it for a specific fork range, we can provide the `--from` and `--until` command-line arguments: ```console - fill -k test_yul --from London --until Merge + fill -k test_yul --from London --until Paris ``` ```python @@ -83,7 +83,7 @@ The function definition ends when there is a line that is no longer indented. As env = Environment() ``` -This line specifies that `env` is an [`Environment`](https://github.com/ethereum/execution-spec-tests/blob/main/src/ethereum_test_tools/common/types.py#L445) object, and that we just use the default parameters. +This line specifies that `env` is an [`Environment`](https://github.com/ethereum/execution-spec-tests/blob/main/src/ethereum_test_tools/common/types.py#L878) object, and that we just use the default parameters. If necessary we can modify the environment to have different block gas limits, block numbers, etc. In most tests the defaults are good enough. @@ -102,7 +102,7 @@ It is a [dictionary](https://docs.python.org/3/tutorial/datastructures.html#dict "0x1000000000000000000000000000000000000000": Account( ``` -The keys of the dictionary are addresses (as strings), and the values are [`Account`](https://github.com/ethereum/execution-spec-tests/blob/main/src/ethereum_test_tools/common/types.py#L264) objects. +The keys of the dictionary are addresses (as strings), and the values are [`Account`](https://github.com/ethereum/execution-spec-tests/blob/main/src/ethereum_test_tools/common/types.py#L517) objects. You can read more about address fields [in the static test documentation](https://ethereum-tests.readthedocs.io/en/latest/test_filler/state_filler.html#address-fields). ```python @@ -145,7 +145,7 @@ Generally for execution spec tests the `sstore` instruction acts as a high-level } ``` -[`TestAddress`](https://github.com/ethereum/execution-spec-tests/blob/main/src/ethereum_test_tools/common/constants.py#L8) is an address for which the test filler has the private key. +[`TestAddress`](https://github.com/ethereum/execution-spec-tests/blob/main/src/ethereum_test_tools/common/constants.py#L7) is an address for which the test filler has the private key. This means that the test runner can issue a transaction as that contract. Of course, this address also needs a balance to be able to issue transactions. @@ -163,7 +163,7 @@ Of course, this address also needs a balance to be able to issue transactions. ) ``` -With the pre-state specified, we can add a description for the [`Transaction`](https://github.com/ethereum/execution-spec-tests/blob/main/src/ethereum_test_tools/common/types.py#L516). +With the pre-state specified, we can add a description for the [`Transaction`](https://github.com/ethereum/execution-spec-tests/blob/main/src/ethereum_test_tools/common/types.py#L1155). For more information, [see the static test documentation](https://ethereum-tests.readthedocs.io/en/latest/test_filler/state_filler.html#transaction) #### Post State @@ -185,10 +185,10 @@ In this case, we look at the storage of the contract we called and add to it wha #### State Test ```python - state_test(env=env, pre=pre, post=post, txs=[tx]) + state_test(env=env, pre=pre, post=post, tx=tx) ``` -This line calls the wrapper to the `StateTest` object that provides all the objects required (for example, the fork parameter) in order to fill the test, generate the test fixtures and write them to file (by default, `./fixtures/example/yul_example/test_yul.json`). +This line calls the wrapper to the `StateTest` object that provides all the objects required (for example, the fork parameter) in order to fill the test, generate the test fixtures and write them to file (by default, `./fixtures/_tests/example/yul_example/test_yul.json`). ## Conclusion diff --git a/docs/tutorials/state_transition_bad_opcode.md b/docs/tutorials/state_transition_bad_opcode.md index 89a3e4b128..e770562a14 100644 --- a/docs/tutorials/state_transition_bad_opcode.md +++ b/docs/tutorials/state_transition_bad_opcode.md @@ -74,7 +74,7 @@ We don't know what will be the fork names after Shanghai, so it is easiest to sp We don't need to worry about forks prior to London because the decorator for this test says it is only valid from London. ```python - if fork in {"london", "merge"} and opc==0x5F: + if fork in {"london", "paris"} and opc==0x5F: ``` Python has a [set data structure](https://docs.python.org/3/tutorial/datastructures.html#sets). @@ -267,7 +267,7 @@ Over the entire for loop, it yields 255 different tests. yield StateTest( env=env, pre=pre, - txs=[tx], + tx=tx, post=(post_valid if opc_valid(opc) else post_invalid), ) ``` diff --git a/docs/writing_tests/exception_tests.md b/docs/writing_tests/exception_tests.md new file mode 100644 index 0000000000..a30756c2f2 --- /dev/null +++ b/docs/writing_tests/exception_tests.md @@ -0,0 +1,34 @@ +# Exception Tests + +Exception tests are a special type of test which verify that an invalid transaction or an invalid block are correctly rejected with the expected error. + +## Creating an Exception Test + +To test for an exception, the test can use either of the following types from `ethereum_test_tools` library: + +1. [`TransactionException`](../consuming_tests/exceptions.md#transactionexception): To be added to the `error` field of the `Transaction` object, and to the `exception` field of the `Block` object that includes the transaction; this exception type is used when a transaction is invalid, and therefore when included in a block, the block is expected to be invalid too. This is different from valid transactions where an exception during EVM execution is expected (e.g. a revert, or out-of-gas), which can be included in valid blocks. + + For an example, see [`eip3860_initcode.test_initcode.test_contract_creating_tx`](../tests/shanghai/eip3860_initcode/test_initcode/index.md#tests.shanghai.eip3860_initcode.test_initcode.test_contract_creating_tx) which raises `TransactionException.INITCODE_SIZE_EXCEEDED` in the case that the initcode size exceeds the maximum allowed size. + +2. [`BlockException`](../consuming_tests/exceptions.md#blockexception): To be added to the `exception` field of the `Block` object; this exception type is used when a block is expected to be invalid, but the exception is related to a block property, e.g. an invalid value of the block header. + + For an example, see [`eip4844_blobs.test_excess_blob_gas.test_invalid_static_excess_blob_gas`](../tests/cancun/eip4844_blobs/test_excess_blob_gas/index.md#tests.cancun.eip4844_blobs.test_excess_blob_gas.test_invalid_static_excess_blob_gas) which raises `BlockException.INCORRECT_EXCESS_BLOB_GAS` in the case that the the `excessBlobGas` remains unchanged + but the parent blobs included are not `TARGET_BLOBS_PER_BLOCK`. + +Although exceptions can be combined with the `|` operator to indicate that a test vector can throw either one of multiple exceptions, ideally the tester should aim to use only one exception per test vector, and only use multiple exceptions on the rare instance when it is not possible to know which exception will be thrown because it depends on client implementation. + +## Adding a new exception + +If a test requires a new exception, because none of the existing ones is suitable for the test, a new exception can be added to either [`TransactionException`](../consuming_tests/exceptions.md#transactionexception) or [`BlockException`](../consuming_tests/exceptions.md#blockexception) classes. + +The new exception should be added as a new enum value, and the docstring of the attribute should be a string that describes the exception. + +The name of the exception should be unique, and should not be used by any other exception. + +## Test runner behavior on exception tests + +When an exception is added to a test vector, the test runner must check that the transaction or block is rejected with the expected exception. + +The test runner must map the exception key to the corresponding error string that is expected to be returned by the client. + +Exception mapping are particularly important in blockchain tests because the block can be invalid for multiple reasons, and the client returning a different error can mean that a verification in the client is faulty. diff --git a/docs/writing_tests/verifying_changes.md b/docs/writing_tests/verifying_changes.md index 24a5211e96..04dd7ae6a3 100644 --- a/docs/writing_tests/verifying_changes.md +++ b/docs/writing_tests/verifying_changes.md @@ -19,6 +19,12 @@ pip install tox Run tox, as executed in Github Actions, with: +```console +tox run-parallel +``` + +or, with sequential test environment execution and verbose output as: + ```console tox ``` @@ -82,3 +88,38 @@ Verify: ```console tox -e docs ``` + +### Verifying Fixture Changes + +When writing a PR that modifies either the framework or test cases, it is important to verify that the changes do not cause any issues with the existing test cases. + +All filled fixtures contain a `hash` field in the `_info` object, which is the hash of the json string of the fixture. This hash can be used to verify that the fixture has not changed. + +The `hasher` command can be used to bulk-verify the hashes of all fixtures in a directory. + +It has the following options: + +| Flag | Description | +|--------------|-------------| +| `--files` / `-f` | Prints a single combined hash per each JSON fixture file recursively contained in a directory. | +| `--tests` / `-t` | Prints the hash of every single test vector in every JSON fixture file recursively contained in a directory. | +| `--root` / `-r` | Prints a single combined hash for all JSON fixture files recursively contained in a directory. | + +For a quick comparison between two fixture directories, the `--root` option can be used and if the output matches, it means the fixtures in the directories are identical: + +```console +hasher --root fixtures/ +hasher --root fixtures_new/ +``` + +If the output does not match, the `--files` option can be used to identify which files are different: + +```console +diff <(hasher --files fixtures/) <(hasher --files fixtures_new/) +``` + +And the `--tests` option can be used for an even more granular comparison: + +```console +diff <(hasher --tests fixtures/) <(hasher --tests fixtures_new/) +``` diff --git a/evm-config.yaml b/evm-config.yaml index b07bc928ed..5aef7c9e77 100644 --- a/evm-config.yaml +++ b/evm-config.yaml @@ -4,5 +4,5 @@ main: ref: master develop: impl: geth - repo: marioevz/go-ethereum - ref: cancun-t8n \ No newline at end of file + repo: ethereum/go-ethereum + ref: master \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index 548421e3b0..512522bb88 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -81,8 +81,8 @@ markdown_extensions: - pymdownx.caret - pymdownx.details - pymdownx.emoji: - emoji_index: !!python/name:materialx.emoji.twemoji - emoji_generator: !!python/name:materialx.emoji.to_svg + emoji_index: !!python/name:material.extensions.emoji.twemoji + emoji_generator: !!python/name:material.extensions.emoji.to_svg - pymdownx.highlight - pymdownx.inlinehilite - pymdownx.keys diff --git a/pytest-framework.ini b/pytest-framework.ini index faaf929262..323b0a4b79 100644 --- a/pytest-framework.ini +++ b/pytest-framework.ini @@ -6,4 +6,5 @@ python_files= testpaths = src addopts = - -p pytester \ No newline at end of file + -p pytester + --ignore=src/pytest_plugins/test_filler/test_filler.py \ No newline at end of file diff --git a/pytest.ini b/pytest.ini index c6992ac390..42e18627b9 100644 --- a/pytest.ini +++ b/pytest.ini @@ -10,3 +10,4 @@ addopts = -p pytest_plugins.test_help.test_help -m "not eip_version_check" --dist loadscope + --ignore tests/cancun/eip4844_blobs/point_evaluation_vectors/ diff --git a/setup.cfg b/setup.cfg index 91b2764489..b4e34369bb 100644 --- a/setup.cfg +++ b/setup.cfg @@ -50,6 +50,8 @@ console_scripts = pyspelling_soft_fail = entry_points.pyspelling_soft_fail:main markdownlintcli2_soft_fail = entry_points.markdownlintcli2_soft_fail:main create_whitelist_for_flake8_spelling = entry_points.create_whitelist_for_flake8_spelling:main + evm_bytes_to_python = entry_points.evm_bytes_to_python:main + hasher = entry_points.hasher:main [options.extras_require] test = diff --git a/src/entry_points/evm_bytes_to_python.py b/src/entry_points/evm_bytes_to_python.py new file mode 100644 index 0000000000..8239146289 --- /dev/null +++ b/src/entry_points/evm_bytes_to_python.py @@ -0,0 +1,58 @@ +""" +Define an entry point wrapper for pytest. +""" + +import sys +from typing import Any, List, Optional + +from ethereum_test_tools import Macro +from ethereum_test_tools import Opcodes as Op + + +def process_evm_bytes(evm_bytes_hex_string: Any) -> str: # noqa: D103 + if evm_bytes_hex_string.startswith("0x"): + evm_bytes_hex_string = evm_bytes_hex_string[2:] + + evm_bytes = bytearray(bytes.fromhex(evm_bytes_hex_string)) + + opcodes_strings: List[str] = [] + + while evm_bytes: + opcode_byte = evm_bytes.pop(0) + + opcode: Optional[Op] = None + for op in Op: + if not isinstance(op, Macro) and op.int() == opcode_byte: + opcode = op + break + + if opcode is None: + raise ValueError(f"Unknown opcode: {opcode_byte}") + + if opcode.data_portion_length > 0: + data_portion = evm_bytes[: opcode.data_portion_length] + evm_bytes = evm_bytes[opcode.data_portion_length :] + opcodes_strings.append(f'Op.{opcode._name_}("0x{data_portion.hex()}")') + else: + opcodes_strings.append(f"Op.{opcode._name_}") + + return " + ".join(opcodes_strings) + + +def print_help(): # noqa: D103 + print("Usage: evm_bytes_to_python ") + + +def main(): # noqa: D103 + if len(sys.argv) != 2: + print_help() + sys.exit(1) + if sys.argv[1] in ["-h", "--help"]: + print_help() + sys.exit(0) + evm_bytes_hex_string = sys.argv[1] + print(process_evm_bytes(evm_bytes_hex_string)) + + +if __name__ == "__main__": + main() diff --git a/src/entry_points/hasher.py b/src/entry_points/hasher.py new file mode 100644 index 0000000000..54030357a7 --- /dev/null +++ b/src/entry_points/hasher.py @@ -0,0 +1,135 @@ +""" +Simple CLI tool to hash a directory of JSON fixtures. +""" + +import argparse +import hashlib +import json +from dataclasses import dataclass, field +from enum import IntEnum, auto +from pathlib import Path +from typing import Dict, List, Optional + + +class HashableItemType(IntEnum): + """ + Represents the type of a hashable item. + """ + + FOLDER = 0 + FILE = auto() + TEST = auto() + + +@dataclass(kw_only=True) +class HashableItem: + """ + Represents an item that can be hashed containing other items that can be hashed as well. + """ + + type: HashableItemType + parents: List[str] = field(default_factory=list) + root: Optional[bytes] = None + items: Optional[Dict[str, "HashableItem"]] = None + + def hash(self) -> bytes: + """ + Return the hash of the item. + """ + if self.root is not None: + return self.root + if self.items is None: + raise ValueError("No items to hash") + all_hash_bytes = b"" + for _, item in sorted(self.items.items()): + item_hash_bytes = item.hash() + all_hash_bytes += item_hash_bytes + return hashlib.sha256(all_hash_bytes).digest() + + def print( + self, *, name: str, level: int = 0, print_type: Optional[HashableItemType] = None + ) -> None: + """ + Print the hash of the item and sub-items. + """ + next_level = level + print_name = name + if level == 0 and self.parents: + separator = "::" if self.type == HashableItemType.TEST else "/" + print_name = f"{'/'.join(self.parents)}{separator}{name}" + if print_type is None or self.type >= print_type: + next_level += 1 + print(f"{' ' * level}{print_name}: 0x{self.hash().hex()}") + + if self.items is not None: + for key, item in sorted(self.items.items()): + item.print(name=key, level=next_level, print_type=print_type) + + @classmethod + def from_json_file(cls, *, file_path: Path, parents: List[str]) -> "HashableItem": + """ + Create a hashable item from a JSON file. + """ + items = {} + with file_path.open("r") as f: + data = json.load(f) + for key, item in sorted(data.items()): + assert isinstance(item, dict), f"Expected dict, got {type(item)}" + assert "_info" in item, f"Expected _info in {key}" + assert "hash" in item["_info"], f"Expected hash in {key}" + assert isinstance( + item["_info"]["hash"], str + ), f"Expected hash to be a string in {key}, got {type(item['_info']['hash'])}" + item_hash_bytes = bytes.fromhex(item["_info"]["hash"][2:]) + items[key] = cls( + type=HashableItemType.TEST, + root=item_hash_bytes, + parents=parents + [file_path.name], + ) + return cls(type=HashableItemType.FILE, items=items, parents=parents) + + @classmethod + def from_folder(cls, *, folder_path: Path, parents: List[str] = []) -> "HashableItem": + """ + Create a hashable item from a folder. + """ + items = {} + for file_path in sorted(folder_path.iterdir()): + if file_path.is_file() and file_path.suffix == ".json": + item = cls.from_json_file( + file_path=file_path, parents=parents + [folder_path.name] + ) + items[file_path.name] = item + elif file_path.is_dir(): + item = cls.from_folder(folder_path=file_path, parents=parents + [folder_path.name]) + items[file_path.name] = item + return cls(type=HashableItemType.FOLDER, items=items, parents=parents) + + +def main() -> None: + """ + Main function. + """ + parser = argparse.ArgumentParser(description="Hash folders of JSON fixtures.") + + parser.add_argument("folder_path", type=Path, help="The path to the JSON fixtures directory") + parser.add_argument("--files", "-f", action="store_true", help="Print hash of files") + parser.add_argument("--tests", "-t", action="store_true", help="Print hash of tests") + parser.add_argument("--root", "-r", action="store_true", help="Only print hash of root folder") + + args = parser.parse_args() + + item = HashableItem.from_folder(folder_path=args.folder_path) + + if args.root: + print(f"0x{item.hash().hex()}") + return + + print_type: Optional[HashableItemType] = None + + if args.files: + print_type = HashableItemType.FILE + elif args.tests: + print_type = HashableItemType.TEST + + item.print(name=args.folder_path.name, print_type=print_type) diff --git a/src/entry_points/tests/__init__.py b/src/entry_points/tests/__init__.py new file mode 100644 index 0000000000..6a7a6059f9 --- /dev/null +++ b/src/entry_points/tests/__init__.py @@ -0,0 +1,3 @@ +""" +Basic pytest applications `entry_points` unit tests. +""" diff --git a/src/entry_points/tests/test_evm_bytes_to_python.py b/src/entry_points/tests/test_evm_bytes_to_python.py new file mode 100644 index 0000000000..529c31dd6b --- /dev/null +++ b/src/entry_points/tests/test_evm_bytes_to_python.py @@ -0,0 +1,56 @@ +""" +Test suite for `entry_points.evm_bytes_to_python` module. +""" + +import pytest +from evm_bytes_to_python import process_evm_bytes + +from ethereum_test_tools import Macro +from ethereum_test_tools import Opcodes as Op + +basic_vector = [ + "0x60008080808061AAAA612d5ff1600055", + 'Op.PUSH1("0x00") + Op.DUP1 + Op.DUP1 + Op.DUP1 + Op.DUP1 + Op.PUSH2("0xaaaa") + Op.PUSH2("0x2d5f") + Op.CALL + Op.PUSH1("0x00") + Op.SSTORE', # noqa: E501 +] +complex_vector = [ + "0x7fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebf5f527fc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedf6020527fe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff60405260786040356020355f35608a565b5f515f55602051600155604051600255005b5e56", # noqa: E501 + 'Op.PUSH32("0xa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebf") + Op.PUSH0 + Op.MSTORE + Op.PUSH32("0xc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedf") + Op.PUSH1("0x20") + Op.MSTORE + Op.PUSH32("0xe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff") + Op.PUSH1("0x40") + Op.MSTORE + Op.PUSH1("0x78") + Op.PUSH1("0x40") + Op.CALLDATALOAD + Op.PUSH1("0x20") + Op.CALLDATALOAD + Op.PUSH0 + Op.CALLDATALOAD + Op.PUSH1("0x8a") + Op.JUMP + Op.JUMPDEST + Op.PUSH0 + Op.MLOAD + Op.PUSH0 + Op.SSTORE + Op.PUSH1("0x20") + Op.MLOAD + Op.PUSH1("0x01") + Op.SSTORE + Op.PUSH1("0x40") + Op.MLOAD + Op.PUSH1("0x02") + Op.SSTORE + Op.STOP + Op.JUMPDEST + Op.MCOPY + Op.JUMP', # noqa: E501 +] + + +@pytest.mark.parametrize( + "evm_bytes, python_opcodes", + [ + (basic_vector[0], basic_vector[1]), + (basic_vector[0][2:], basic_vector[1]), # no "0x" prefix + (complex_vector[0], complex_vector[1]), + (complex_vector[0][2:], complex_vector[1]), # no "0x" prefix + ], +) +def test_evm_bytes_to_python(evm_bytes, python_opcodes): + """Test evm_bytes_to_python using the basic and complex vectors""" + assert process_evm_bytes(evm_bytes) == python_opcodes + + +@pytest.mark.parametrize("opcode", [op for op in Op if not isinstance(op, Macro)]) +def test_individual_opcodes(opcode): + """Test each opcode individually""" + if opcode.data_portion_length > 0: + expected_output = f'Op.{opcode._name_}("0x")' + else: + expected_output = f"Op.{opcode._name_}" + + bytecode = opcode.int().to_bytes(1, byteorder="big").hex() + assert process_evm_bytes("0x" + bytecode) == expected_output + + +def test_invalid_opcode(): + """Invalid hex string""" + with pytest.raises(ValueError): + process_evm_bytes("0xZZ") + + +def test_unknown_opcode(): + """Opcode not defined in Op""" + with pytest.raises(ValueError): + process_evm_bytes("0x0F") diff --git a/src/ethereum_test_forks/__init__.py b/src/ethereum_test_forks/__init__.py index 20831ca42c..29c76a0496 100644 --- a/src/ethereum_test_forks/__init__.py +++ b/src/ethereum_test_forks/__init__.py @@ -15,24 +15,26 @@ Homestead, Istanbul, London, - Merge, MuirGlacier, + Paris, Shanghai, ) from .forks.transition import ( BerlinToLondonAt5, - MergeToShanghaiAtTime15k, + ParisToShanghaiAtTime15k, ShanghaiToCancunAtTime15k, ) from .helpers import ( InvalidForkError, forks_from, forks_from_until, + get_closest_fork_with_solc_support, get_deployed_forks, get_development_forks, get_forks, + get_forks_with_solc_support, + get_forks_without_solc_support, get_transition_forks, - is_fork, transition_fork_from_to, transition_fork_to, ) @@ -52,8 +54,8 @@ "InvalidForkError", "Istanbul", "London", - "Merge", - "MergeToShanghaiAtTime15k", + "Paris", + "ParisToShanghaiAtTime15k", "MuirGlacier", "Shanghai", "ShanghaiToCancunAtTime15k", @@ -64,7 +66,9 @@ "get_deployed_forks", "get_development_forks", "get_forks", - "is_fork", + "get_forks_with_solc_support", + "get_forks_without_solc_support", + "get_closest_fork_with_solc_support", "transition_fork_from_to", "transition_fork_to", ] diff --git a/src/ethereum_test_forks/base_fork.py b/src/ethereum_test_forks/base_fork.py index 2ddfbe13c2..c8823fed33 100644 --- a/src/ethereum_test_forks/base_fork.py +++ b/src/ethereum_test_forks/base_fork.py @@ -1,8 +1,11 @@ """ Abstract base class for Ethereum forks """ + from abc import ABC, ABCMeta, abstractmethod -from typing import Any, List, Mapping, Optional, Protocol, Type +from typing import Any, ClassVar, List, Mapping, Optional, Protocol, Type + +from semver import Version from .base_decorators import prefer_transition_to_method @@ -36,6 +39,30 @@ def __repr__(cls) -> str: """ return cls.name() + def __gt__(cls, other: "BaseForkMeta") -> bool: + """ + Compare if a fork is newer than some other fork. + """ + return cls != other and other.__subclasscheck__(cls) + + def __ge__(cls, other: "BaseForkMeta") -> bool: + """ + Compare if a fork is newer than or equal to some other fork. + """ + return other.__subclasscheck__(cls) + + def __lt__(cls, other: "BaseForkMeta") -> bool: + """ + Compare if a fork is older than some other fork. + """ + return cls != other and cls.__subclasscheck__(other) + + def __le__(cls, other: "BaseForkMeta") -> bool: + """ + Compare if a fork is older than or equal to some other fork. + """ + return cls.__subclasscheck__(other) + class BaseFork(ABC, metaclass=BaseForkMeta): """ @@ -44,13 +71,26 @@ class BaseFork(ABC, metaclass=BaseForkMeta): Must contain all the methods used by every fork. """ - @classmethod - @abstractmethod - def fork(cls, block_number: int = 0, timestamp: int = 0) -> str: + _transition_tool_name: ClassVar[Optional[str]] = None + _blockchain_test_network_name: ClassVar[Optional[str]] = None + _solc_name: ClassVar[Optional[str]] = None + _ignore: ClassVar[bool] = False + + def __init_subclass__( + cls, + *, + transition_tool_name: Optional[str] = None, + blockchain_test_network_name: Optional[str] = None, + solc_name: Optional[str] = None, + ignore: bool = False, + ) -> None: """ - Returns fork name as it's meant to be passed to the transition tool for execution. + Initializes the new fork with values that don't carry over to subclass forks. """ - pass + cls._transition_tool_name = transition_tool_name + cls._blockchain_test_network_name = blockchain_test_network_name + cls._solc_name = solc_name + cls._ignore = ignore # Header information abstract methods @classmethod @@ -109,6 +149,14 @@ def header_beacon_root_required(cls, block_number: int, timestamp: int) -> bool: """ pass + @classmethod + @abstractmethod + def blob_gas_per_blob(cls, block_number: int, timestamp: int) -> int: + """ + Returns the amount of blob gas used per blob for a given fork. + """ + pass + @classmethod @abstractmethod def get_reward(cls, block_number: int = 0, timestamp: int = 0) -> int: @@ -136,9 +184,21 @@ def precompiles(cls, block_number: int = 0, timestamp: int = 0) -> List[int]: @classmethod @prefer_transition_to_method @abstractmethod - def pre_allocation(cls, block_number: int = 0, timestamp: int = 0) -> Mapping: + def pre_allocation(cls) -> Mapping: """ - Returns required pre-allocation of accounts. + Returns required pre-allocation of accounts for any kind of test. + + This method must always call the `fork_to` method when transitioning, because the + allocation can only be set at genesis, and thus cannot be changed at transition time. + """ + pass + + @classmethod + @prefer_transition_to_method + @abstractmethod + def pre_allocation_blockchain(cls) -> Mapping: + """ + Returns required pre-allocation of accounts for any blockchain tests. This method must always call the `fork_to` method when transitioning, because the allocation can only be set at genesis, and thus cannot be changed at transition time. @@ -192,6 +252,47 @@ def name(cls) -> str: """ return cls.__name__ + @classmethod + def fork_at(cls, block_number: int = 0, timestamp: int = 0) -> Type["BaseFork"]: + """ + Returns the fork at the given block number and timestamp. + Useful only for transition forks, and it's a no-op for normal forks. + """ + return cls + + @classmethod + @abstractmethod + def transition_tool_name(cls, block_number: int = 0, timestamp: int = 0) -> str: + """ + Returns fork name as it's meant to be passed to the transition tool for execution. + """ + pass + + @classmethod + @abstractmethod + def solc_name(cls) -> str: + """ + Returns fork name as it's meant to be passed to the solc compiler. + """ + pass + + @classmethod + @abstractmethod + def solc_min_version(cls) -> Version: + """ + Returns the minimum version of solc that supports this fork. + """ + pass + + @classmethod + def blockchain_test_network_name(cls) -> str: + """ + Returns the network configuration name to be used in BlockchainTests for this fork. + """ + if cls._blockchain_test_network_name is not None: + return cls._blockchain_test_network_name + return cls.name() + @classmethod def is_deployed(cls) -> bool: """ @@ -202,6 +303,13 @@ def is_deployed(cls) -> bool: """ return True + @classmethod + def ignore(cls) -> bool: + """ + Returns whether the fork should be ignored during test generation. + """ + return cls._ignore + # Fork Type Fork = Type[BaseFork] diff --git a/src/ethereum_test_forks/forks/forks.py b/src/ethereum_test_forks/forks/forks.py index c13b41438a..ac1b9329fc 100644 --- a/src/ethereum_test_forks/forks/forks.py +++ b/src/ethereum_test_forks/forks/forks.py @@ -1,24 +1,45 @@ """ All Ethereum fork class definitions. """ + from typing import List, Mapping, Optional +from semver import Version + from ..base_fork import BaseFork # All forks must be listed here !!! in the order they were introduced !!! -class Frontier(BaseFork): +class Frontier(BaseFork, solc_name="homestead"): """ Frontier fork """ @classmethod - def fork(cls, block_number: int = 0, timestamp: int = 0) -> str: + def transition_tool_name(cls, block_number: int = 0, timestamp: int = 0) -> str: """ Returns fork name as it's meant to be passed to the transition tool for execution. """ + if cls._transition_tool_name is not None: + return cls._transition_tool_name return cls.name() + @classmethod + def solc_name(cls) -> str: + """ + Returns fork name as it's meant to be passed to the solc compiler. + """ + if cls._solc_name is not None: + return cls._solc_name + return cls.name().lower() + + @classmethod + def solc_min_version(cls) -> Version: + """ + Returns the minimum version of solc that supports this fork. + """ + return Version.parse("0.8.20") + @classmethod def header_base_fee_required(cls, block_number: int = 0, timestamp: int = 0) -> bool: """ @@ -61,6 +82,13 @@ def header_blob_gas_used_required(cls, block_number: int = 0, timestamp: int = 0 """ return False + @classmethod + def blob_gas_per_blob(cls, block_number: int, timestamp: int) -> int: + """ + Returns the amount of blob gas used per blob for a given fork. + """ + return 0 + @classmethod def engine_new_payload_version( cls, block_number: int = 0, timestamp: int = 0 @@ -123,7 +151,16 @@ def precompiles(cls, block_number: int = 0, timestamp: int = 0) -> List[int]: return [] @classmethod - def pre_allocation(cls, block_number: int = 0, timestamp: int = 0) -> Mapping: + def pre_allocation(cls) -> Mapping: + """ + Returns whether the fork expects pre-allocation of accounts + + Frontier does not require pre-allocated accounts + """ + return {} + + @classmethod + def pre_allocation_blockchain(cls) -> Mapping: """ Returns whether the fork expects pre-allocation of accounts @@ -182,7 +219,7 @@ def get_reward(cls, block_number: int = 0, timestamp: int = 0) -> int: return 2_000_000_000_000_000_000 -class ConstantinopleFix(Constantinople): +class ConstantinopleFix(Constantinople, solc_name="constantinople"): """ Constantinople Fix fork """ @@ -204,7 +241,7 @@ def precompiles(cls, block_number: int = 0, timestamp: int = 0) -> List[int]: # Glacier forks skipped, unless explicitly specified -class MuirGlacier(Istanbul): +class MuirGlacier(Istanbul, solc_name="istanbul", ignore=True): """ Muir Glacier fork """ @@ -246,7 +283,7 @@ def tx_types(cls, block_number: int = 0, timestamp: int = 0) -> List[int]: # Glacier forks skipped, unless explicitly specified -class ArrowGlacier(London): +class ArrowGlacier(London, solc_name="london", ignore=True): """ Arrow Glacier fork """ @@ -254,7 +291,7 @@ class ArrowGlacier(London): pass -class GrayGlacier(ArrowGlacier): +class GrayGlacier(ArrowGlacier, solc_name="london", ignore=True): """ Gray Glacier fork """ @@ -262,29 +299,33 @@ class GrayGlacier(ArrowGlacier): pass -class Merge(London): +class Paris( + London, + transition_tool_name="Merge", + blockchain_test_network_name="Merge", +): """ - Merge fork + Paris (Merge) fork """ @classmethod def header_prev_randao_required(cls, block_number: int = 0, timestamp: int = 0) -> bool: """ - Prev Randao is required starting from Merge. + Prev Randao is required starting from Paris. """ return True @classmethod def header_zero_difficulty_required(cls, block_number: int = 0, timestamp: int = 0) -> bool: """ - Zero difficulty is required starting from Merge. + Zero difficulty is required starting from Paris. """ return True @classmethod def get_reward(cls, block_number: int = 0, timestamp: int = 0) -> int: """ - Merge updates the reward to 0. + Paris updates the reward to 0. """ return 0 @@ -293,12 +334,12 @@ def engine_new_payload_version( cls, block_number: int = 0, timestamp: int = 0 ) -> Optional[int]: """ - Starting at the merge, payloads can be sent through the engine API + Starting at Paris, payloads can be sent through the engine API """ return 1 -class Shanghai(Merge): +class Shanghai(Paris): """ Shanghai fork """ @@ -333,6 +374,13 @@ def is_deployed(cls): """ return False + @classmethod + def solc_min_version(cls) -> Version: + """ + Returns the minimum version of solc that supports this fork. + """ + return Version.parse("0.8.24") + @classmethod def header_excess_blob_gas_required(cls, block_number: int = 0, timestamp: int = 0) -> bool: """ @@ -354,6 +402,13 @@ def header_beacon_root_required(cls, block_number: int = 0, timestamp: int = 0) """ return True + @classmethod + def blob_gas_per_blob(cls, block_number: int, timestamp: int) -> int: + """ + Blobs are enabled started from Cancun. + """ + return 2**17 + @classmethod def tx_types(cls, block_number: int = 0, timestamp: int = 0) -> List[int]: """ @@ -369,9 +424,10 @@ def precompiles(cls, block_number: int = 0, timestamp: int = 0) -> List[int]: return [0xA] + super(Cancun, cls).precompiles(block_number, timestamp) @classmethod - def pre_allocation(cls, block_number: int = 0, timestamp: int = 0) -> Mapping: + def pre_allocation_blockchain(cls) -> Mapping: """ - Cancun requires pre-allocation of the beacon root contract for EIP-4788 + Cancun requires pre-allocation of the beacon root contract for EIP-4788 on blockchain + type tests """ new_allocation = { 0x000F3DF6D732807EF1319FB7B8BB8522D0BEAC02: { @@ -381,7 +437,7 @@ def pre_allocation(cls, block_number: int = 0, timestamp: int = 0) -> Mapping: "5ffd5b62001fff42064281555f359062001fff015500", } } - return new_allocation | super(Cancun, cls).pre_allocation(block_number, timestamp) + return new_allocation | super(Cancun, cls).pre_allocation_blockchain() @classmethod def engine_new_payload_version( @@ -405,3 +461,24 @@ def engine_new_payload_beacon_root(cls, block_number: int = 0, timestamp: int = Starting at Cancun, payloads must have a parent beacon block root. """ return True + + +class Prague(Cancun): + """ + Prague fork + """ + + @classmethod + def is_deployed(cls) -> bool: + """ + Flags that the fork has not been deployed to mainnet; it is under active + development. + """ + return False + + @classmethod + def solc_min_version(cls) -> Version: + """ + Returns the minimum version of solc that supports this fork. + """ + return Version.parse("1.0.0") # set a high version; currently unknown diff --git a/src/ethereum_test_forks/forks/transition.py b/src/ethereum_test_forks/forks/transition.py index 3180440233..b32b4e7252 100644 --- a/src/ethereum_test_forks/forks/transition.py +++ b/src/ethereum_test_forks/forks/transition.py @@ -2,7 +2,7 @@ List of all transition fork definitions. """ from ..transition_base_fork import transition_fork -from .forks import Berlin, Cancun, London, Merge, Shanghai +from .forks import Berlin, Cancun, London, Paris, Shanghai # Transition Forks @@ -16,9 +16,9 @@ class BerlinToLondonAt5(Berlin): @transition_fork(to_fork=Shanghai, at_timestamp=15_000) -class MergeToShanghaiAtTime15k(Merge): +class ParisToShanghaiAtTime15k(Paris, blockchain_test_network_name="ParisToShanghaiAtTime15k"): """ - Merge to Shanghai transition at Timestamp 15k + Paris to Shanghai transition at Timestamp 15k """ pass diff --git a/src/ethereum_test_forks/helpers.py b/src/ethereum_test_forks/helpers.py index 6705bc9553..62da80568d 100644 --- a/src/ethereum_test_forks/helpers.py +++ b/src/ethereum_test_forks/helpers.py @@ -1,7 +1,9 @@ """ Helper methods to resolve forks during test filling """ -from typing import List +from typing import List, Optional + +from semver import Version from .base_fork import BaseFork, Fork from .forks import forks, transition @@ -33,7 +35,7 @@ def get_forks() -> List[Fork]: return all_forks -def get_deployed_forks(): +def get_deployed_forks() -> List[Fork]: """ Returns a list of all the fork classes implemented by `ethereum_test_forks` that have been deployed to mainnet, chronologically ordered by deployment. @@ -41,7 +43,7 @@ def get_deployed_forks(): return [fork for fork in get_forks() if fork.is_deployed()] -def get_development_forks(): +def get_development_forks() -> List[Fork]: """ Returns a list of all the fork classes implemented by `ethereum_test_forks` that have been not yet deployed to mainnet and are currently under @@ -57,6 +59,34 @@ def get_parent_fork(fork: Fork) -> Fork: return fork.__base__ +def get_forks_with_solc_support(solc_version: Version) -> List[Fork]: + """ + Returns a list of all fork classes that are supported by solc. + """ + return [fork for fork in get_forks() if solc_version >= fork.solc_min_version()] + + +def get_forks_without_solc_support(solc_version: Version) -> List[Fork]: + """ + Returns a list of all fork classes that aren't supported by solc. + """ + return [fork for fork in get_forks() if solc_version < fork.solc_min_version()] + + +def get_closest_fork_with_solc_support(fork: Fork, solc_version: Version) -> Optional[Fork]: + """ + Returns the closest fork, potentially the provided fork itself, that has + solc support. + """ + if fork is BaseFork: + return None + return ( + fork + if solc_version >= fork.solc_min_version() + else get_closest_fork_with_solc_support(get_parent_fork(fork), solc_version) + ) + + def get_transition_forks() -> List[Fork]: """ Returns all the transition forks @@ -135,18 +165,3 @@ def forks_from(fork: Fork, deployed_only: bool = True) -> List[Fork]: else: latest_fork = get_forks()[-1] return forks_from_until(fork, latest_fork) - - -def is_fork(fork: Fork, which: Fork) -> bool: - """ - Returns `True` if `fork` is `which` or beyond, `False otherwise. - """ - prev_fork = fork - - while prev_fork != BaseFork: - if prev_fork == which: - return True - - prev_fork = prev_fork.__base__ - - return False diff --git a/src/ethereum_test_forks/tests/test_forks.py b/src/ethereum_test_forks/tests/test_forks.py index 61cddd1ea1..a9dfcb200d 100644 --- a/src/ethereum_test_forks/tests/test_forks.py +++ b/src/ethereum_test_forks/tests/test_forks.py @@ -4,16 +4,19 @@ from typing import Mapping, cast +from semver import Version + from ..base_fork import Fork -from ..forks.forks import Berlin, Cancun, Frontier, London, Merge, Shanghai -from ..forks.transition import BerlinToLondonAt5, MergeToShanghaiAtTime15k +from ..forks.forks import Berlin, Cancun, Frontier, London, Paris, Prague, Shanghai +from ..forks.transition import BerlinToLondonAt5, ParisToShanghaiAtTime15k from ..helpers import ( forks_from, forks_from_until, + get_closest_fork_with_solc_support, get_deployed_forks, get_development_forks, get_forks, - is_fork, + get_forks_with_solc_support, transition_fork_from_to, transition_fork_to, ) @@ -21,8 +24,8 @@ FIRST_DEPLOYED = Frontier LAST_DEPLOYED = Shanghai -LAST_DEVELOPMENT = Cancun -DEVELOPMENT_FORKS = [Cancun] +LAST_DEVELOPMENT = Prague +DEVELOPMENT_FORKS = [Cancun, Prague] def test_transition_forks(): @@ -30,36 +33,43 @@ def test_transition_forks(): Test transition fork utilities. """ assert transition_fork_from_to(Berlin, London) == BerlinToLondonAt5 - assert transition_fork_from_to(Berlin, Merge) is None - assert transition_fork_to(Shanghai) == [MergeToShanghaiAtTime15k] + assert transition_fork_from_to(Berlin, Paris) is None + assert transition_fork_to(Shanghai) == [ParisToShanghaiAtTime15k] # Test forks transitioned to and from assert BerlinToLondonAt5.transitions_to() == London assert BerlinToLondonAt5.transitions_from() == Berlin - assert BerlinToLondonAt5.fork(4, 0) == "Berlin" - assert BerlinToLondonAt5.fork(5, 0) == "London" + assert BerlinToLondonAt5.transition_tool_name(4, 0) == "Berlin" + assert BerlinToLondonAt5.transition_tool_name(5, 0) == "London" # Default values of transition forks is the transition block - assert BerlinToLondonAt5.fork() == "London" + assert BerlinToLondonAt5.transition_tool_name() == "London" - assert MergeToShanghaiAtTime15k.fork(0, 14_999) == "Merge" - assert MergeToShanghaiAtTime15k.fork(0, 15_000) == "Shanghai" - assert MergeToShanghaiAtTime15k.fork() == "Shanghai" + assert ParisToShanghaiAtTime15k.transition_tool_name(0, 14_999) == "Merge" + assert ParisToShanghaiAtTime15k.transition_tool_name(0, 15_000) == "Shanghai" + assert ParisToShanghaiAtTime15k.transition_tool_name() == "Shanghai" assert BerlinToLondonAt5.header_base_fee_required(4, 0) is False assert BerlinToLondonAt5.header_base_fee_required(5, 0) is True - assert MergeToShanghaiAtTime15k.header_withdrawals_required(0, 14_999) is False - assert MergeToShanghaiAtTime15k.header_withdrawals_required(0, 15_000) is True + assert ParisToShanghaiAtTime15k.header_withdrawals_required(0, 14_999) is False + assert ParisToShanghaiAtTime15k.header_withdrawals_required(0, 15_000) is True + + assert ParisToShanghaiAtTime15k.engine_new_payload_version(0, 14_999) == 1 + assert ParisToShanghaiAtTime15k.engine_new_payload_version(0, 15_000) == 2 - assert MergeToShanghaiAtTime15k.engine_new_payload_version(0, 14_999) == 1 - assert MergeToShanghaiAtTime15k.engine_new_payload_version(0, 15_000) == 2 + assert BerlinToLondonAt5.fork_at(4, 0) == Berlin + assert BerlinToLondonAt5.fork_at(5, 0) == London + assert ParisToShanghaiAtTime15k.fork_at(0, 14_999) == Paris + assert ParisToShanghaiAtTime15k.fork_at(0, 15_000) == Shanghai + assert ParisToShanghaiAtTime15k.fork_at() == Paris + assert ParisToShanghaiAtTime15k.fork_at(10_000_000, 14_999) == Paris def test_forks_from(): # noqa: D103 - assert forks_from(Merge) == [Merge, LAST_DEPLOYED] - assert forks_from(Merge, deployed_only=True) == [Merge, LAST_DEPLOYED] - assert forks_from(Merge, deployed_only=False) == [Merge, LAST_DEPLOYED] + DEVELOPMENT_FORKS + assert forks_from(Paris) == [Paris, LAST_DEPLOYED] + assert forks_from(Paris, deployed_only=True) == [Paris, LAST_DEPLOYED] + assert forks_from(Paris, deployed_only=False) == [Paris, LAST_DEPLOYED] + DEVELOPMENT_FORKS def test_forks(): @@ -68,35 +78,72 @@ def test_forks(): """ assert forks_from_until(Berlin, Berlin) == [Berlin] assert forks_from_until(Berlin, London) == [Berlin, London] - assert forks_from_until(Berlin, Merge) == [ + assert forks_from_until(Berlin, Paris) == [ Berlin, London, - Merge, + Paris, ] # Test fork names assert London.name() == "London" - assert MergeToShanghaiAtTime15k.name() == "MergeToShanghaiAtTime15k" + assert ParisToShanghaiAtTime15k.name() == "ParisToShanghaiAtTime15k" assert f"{London}" == "London" - assert f"{MergeToShanghaiAtTime15k}" == "MergeToShanghaiAtTime15k" + assert f"{ParisToShanghaiAtTime15k}" == "ParisToShanghaiAtTime15k" + + # Merge name will be changed to paris, but we need to check the inheriting fork name is still + # the default + assert Paris.transition_tool_name() == "Merge" + assert Shanghai.transition_tool_name() == "Shanghai" + assert Paris.blockchain_test_network_name() == "Merge" + assert Shanghai.blockchain_test_network_name() == "Shanghai" + assert ParisToShanghaiAtTime15k.blockchain_test_network_name() == "ParisToShanghaiAtTime15k" # Test some fork properties assert Berlin.header_base_fee_required(0, 0) is False assert London.header_base_fee_required(0, 0) is True - assert Merge.header_base_fee_required(0, 0) is True + assert Paris.header_base_fee_required(0, 0) is True # Default values of normal forks if the genesis block - assert Merge.header_base_fee_required() is True + assert Paris.header_base_fee_required() is True # Transition forks too assert cast(Fork, BerlinToLondonAt5).header_base_fee_required(4, 0) is False assert cast(Fork, BerlinToLondonAt5).header_base_fee_required(5, 0) is True - assert cast(Fork, MergeToShanghaiAtTime15k).header_withdrawals_required(0, 14_999) is False - assert cast(Fork, MergeToShanghaiAtTime15k).header_withdrawals_required(0, 15_000) is True - assert cast(Fork, MergeToShanghaiAtTime15k).header_withdrawals_required() is True - - assert is_fork(Berlin, Berlin) is True - assert is_fork(London, Berlin) is True - assert is_fork(Berlin, Merge) is False + assert cast(Fork, ParisToShanghaiAtTime15k).header_withdrawals_required(0, 14_999) is False + assert cast(Fork, ParisToShanghaiAtTime15k).header_withdrawals_required(0, 15_000) is True + assert cast(Fork, ParisToShanghaiAtTime15k).header_withdrawals_required() is True + + # Test fork comparison + assert Paris > Berlin + assert not Berlin > Paris + assert Berlin < Paris + assert not Paris < Berlin + + assert Paris >= Berlin + assert not Berlin >= Paris + assert Berlin <= Paris + assert not Paris <= Berlin + + assert London > Berlin + assert not Berlin > London + assert Berlin < London + assert not London < Berlin + + assert London >= Berlin + assert not Berlin >= London + assert Berlin <= London + assert not London <= Berlin + + assert Berlin >= Berlin + assert Berlin <= Berlin + assert not Berlin > Berlin + assert not Berlin < Berlin + + fork = Berlin + assert fork >= Berlin + assert fork <= Berlin + assert not fork > Berlin + assert not fork < Berlin + assert fork == Berlin def test_get_forks(): # noqa: D103 @@ -121,7 +168,7 @@ class PrePreAllocFork(Shanghai): """ @classmethod - def pre_allocation(cls, block_number: int = 0, timestamp: int = 0) -> Mapping: + def pre_allocation(cls) -> Mapping: """ Return some starting point for allocation. """ @@ -134,13 +181,11 @@ class PreAllocFork(PrePreAllocFork): """ @classmethod - def pre_allocation(cls, block_number: int = 0, timestamp: int = 0) -> Mapping: + def pre_allocation(cls) -> Mapping: """ Add allocation to the pre-existing one from previous fork. """ - return {"test2": "test2"} | super(PreAllocFork, cls).pre_allocation( - block_number, timestamp - ) + return {"test2": "test2"} | super(PreAllocFork, cls).pre_allocation() @transition_fork(to_fork=PreAllocFork, at_timestamp=15_000) @@ -159,7 +204,7 @@ def test_pre_alloc(): "test": "test", "test2": "test2", } - assert PreAllocTransitionFork.pre_allocation(block_number=0, timestamp=0) == { + assert PreAllocTransitionFork.pre_allocation() == { "test": "test", "test2": "test2", } @@ -171,3 +216,15 @@ def test_precompiles(): def test_tx_types(): Cancun.tx_types() == list(range(4)) + + +def test_solc_versioning(): + assert len(get_forks_with_solc_support(Version.parse("0.8.20"))) == 13 + assert len(get_forks_with_solc_support(Version.parse("0.8.24"))) > 13 + + +def test_closest_fork_supported_by_solc(): + assert get_closest_fork_with_solc_support(Paris, Version.parse("0.8.20")) == Paris + assert get_closest_fork_with_solc_support(Cancun, Version.parse("0.8.20")) == Shanghai + assert get_closest_fork_with_solc_support(Cancun, Version.parse("0.8.24")) == Cancun + assert get_closest_fork_with_solc_support(Prague, Version.parse("0.8.24")) == Cancun diff --git a/src/ethereum_test_forks/transition_base_fork.py b/src/ethereum_test_forks/transition_base_fork.py index dddc72b0d7..e067d89f6e 100644 --- a/src/ethereum_test_forks/transition_base_fork.py +++ b/src/ethereum_test_forks/transition_base_fork.py @@ -1,9 +1,11 @@ """ Base objects used to define transition forks. """ -from typing import List, Type -from .base_fork import BaseFork, Fork, ForkAttribute +from inspect import signature +from typing import Callable, List, Type + +from .base_fork import BaseFork, Fork ALWAYS_TRANSITIONED_BLOCK_NUMBER = 10_000 ALWAYS_TRANSITIONED_BLOCK_TIMESTAMP = 10_000_000 @@ -46,27 +48,43 @@ def decorator(cls) -> Type[TransitionBaseClass]: from_fork = cls.__bases__[0] assert issubclass(from_fork, BaseFork) - class NewTransitionClass(cls, TransitionBaseClass, BaseFork): # type: ignore + class NewTransitionClass( + cls, # type: ignore + TransitionBaseClass, + BaseFork, + transition_tool_name=cls._transition_tool_name, + blockchain_test_network_name=cls._blockchain_test_network_name, + solc_name=cls._solc_name, + ignore=cls._ignore, + ): pass NewTransitionClass.name = lambda: transition_name # type: ignore def make_transition_method( - base_method: ForkAttribute, - from_fork_method: ForkAttribute, - to_fork_method: ForkAttribute, + base_method: Callable, + from_fork_method: Callable, + to_fork_method: Callable, ): + base_method_parameters = signature(base_method).parameters + def transition_method( cls, block_number: int = ALWAYS_TRANSITIONED_BLOCK_NUMBER, timestamp: int = ALWAYS_TRANSITIONED_BLOCK_TIMESTAMP, ): + kwargs = {} + if "block_number" in base_method_parameters: + kwargs["block_number"] = block_number + if "timestamp" in base_method_parameters: + kwargs["timestamp"] = timestamp + if getattr(base_method, "__prefer_transition_to_method__", False): - return to_fork_method(block_number=block_number, timestamp=timestamp) + return to_fork_method(**kwargs) return ( - to_fork_method(block_number=block_number, timestamp=timestamp) + to_fork_method(**kwargs) if block_number >= at_block and timestamp >= at_timestamp - else from_fork_method(block_number=block_number, timestamp=timestamp) + else from_fork_method(**kwargs) ) return classmethod(transition_method) @@ -84,6 +102,9 @@ def transition_method( NewTransitionClass.transitions_to = lambda: to_fork # type: ignore NewTransitionClass.transitions_from = lambda: from_fork # type: ignore + NewTransitionClass.fork_at = lambda block_number=0, timestamp=0: ( # type: ignore + to_fork if block_number >= at_block and timestamp >= at_timestamp else from_fork + ) return NewTransitionClass diff --git a/src/ethereum_test_tools/__init__.py b/src/ethereum_test_tools/__init__.py index 23edbc7f9f..639744e8e4 100644 --- a/src/ethereum_test_tools/__init__.py +++ b/src/ethereum_test_tools/__init__.py @@ -17,20 +17,17 @@ from .common import ( AccessList, Account, + Address, Auto, - Block, EngineAPIError, Environment, - Fixture, - FixtureEngineNewPayload, - Header, - HistoryStorageAddress, - HiveFixture, + Hash, JSONEncoder, Removable, Storage, TestAddress, TestAddress2, + TestParameterGroup, TestPrivateKey, TestPrivateKey2, Transaction, @@ -42,46 +39,52 @@ copy_opcode_cost, cost_memory_bytes, eip_2028_transaction_data_cost, - to_address, - to_hash, - to_hash_bytes, + transaction_list_root, ) -from .filling.fill import fill_test +from .exceptions import BlockException, ExceptionList, ExceptionType, TransactionException from .reference_spec import ReferenceSpec, ReferenceSpecTypes from .spec import ( + SPEC_TYPES, + BaseFixture, BaseTest, - BaseTestConfig, BlockchainTest, BlockchainTestFiller, + FixtureCollector, StateTest, StateTestFiller, + TestInfo, ) -from .vm import Opcode, OpcodeCallArg, Opcodes +from .spec.blockchain.types import Block, Header +from .vm import Macro, Opcode, OpcodeCallArg, Opcodes __all__ = ( + "SPEC_TYPES", "AccessList", "Account", + "Address", "Auto", + "BaseFixture", "BaseTest", - "BaseTestConfig", "Block", "BlockchainTest", "BlockchainTestFiller", - "Case", + "BlockException", "CalldataCase", + "Case", "Code", "CodeGasMeasure", "Conditional", "EngineAPIError", "Environment", - "Fixture", - "FixtureEngineNewPayload", + "ExceptionList", + "ExceptionType", + "FixtureCollector", + "Hash", "Header", - "HistoryStorageAddress", - "HiveFixture", "Initcode", "JSONEncoder", "Opcode", + "Macro", "OpcodeCallArg", "Opcodes", "ReferenceSpec", @@ -93,9 +96,12 @@ "Switch", "TestAddress", "TestAddress2", + "TestInfo", + "TestParameterGroup", "TestPrivateKey", "TestPrivateKey2", "Transaction", + "TransactionException", "Withdrawal", "Yul", "YulCompiler", @@ -107,8 +113,5 @@ "cost_memory_bytes", "eip_2028_transaction_data_cost", "eip_2028_transaction_data_cost", - "fill_test", - "to_address", - "to_hash_bytes", - "to_hash", + "transaction_list_root", ) diff --git a/src/ethereum_test_tools/code/yul.py b/src/ethereum_test_tools/code/yul.py index ca4fa3356b..80343516d5 100644 --- a/src/ethereum_test_tools/code/yul.py +++ b/src/ethereum_test_tools/code/yul.py @@ -7,7 +7,7 @@ from pathlib import Path from shutil import which from subprocess import PIPE, run -from typing import Mapping, Optional, Sized, SupportsBytes, Tuple, Type, Union +from typing import Optional, Sized, SupportsBytes, Tuple, Type, Union from semver import Version @@ -33,13 +33,7 @@ def get_evm_version_from_fork(fork: Fork | None): """ if not fork: return None - fork_to_evm_version_map: Mapping[str, str] = { - "Merge": "paris", - "ConstantinopleFix": "constantinople", - } - if fork.name() in fork_to_evm_version_map: - return fork_to_evm_version_map[fork.name()] - return fork.name().lower() + return fork.solc_name() class Yul(SupportsBytes, Sized): diff --git a/src/ethereum_test_tools/common/__init__.py b/src/ethereum_test_tools/common/__init__.py index 395088726f..e8d750d403 100644 --- a/src/ethereum_test_tools/common/__init__.py +++ b/src/ethereum_test_tools/common/__init__.py @@ -1,18 +1,28 @@ """ Common definitions and types. """ +from .base_types import ( + Address, + Bloom, + Bytes, + Hash, + HeaderNonce, + HexNumber, + Number, + ZeroPaddedHexNumber, +) from .constants import ( AddrAA, AddrBB, EmptyTrieRoot, EngineAPIError, - HistoryStorageAddress, TestAddress, TestAddress2, TestPrivateKey, TestPrivateKey2, ) from .helpers import ( + TestParameterGroup, add_kzg_version, ceiling_division, compute_create2_address, @@ -20,40 +30,23 @@ copy_opcode_cost, cost_memory_bytes, eip_2028_transaction_data_cost, - to_address, - to_hash, - to_hash_bytes, ) +from .json import to_json from .types import ( AccessList, Account, - Address, Alloc, Auto, - Block, - Bloom, - Bytes, Environment, - Fixture, - FixtureBlock, - FixtureEngineNewPayload, - FixtureHeader, - Hash, - Header, - HeaderNonce, - HiveFixture, - InvalidFixtureBlock, JSONEncoder, - Number, Removable, Storage, Transaction, Withdrawal, - ZeroPaddedHexNumber, alloc_to_accounts, serialize_transactions, str_or_none, - to_json, + transaction_list_root, withdrawals_root, ) @@ -65,28 +58,21 @@ "AddrBB", "Alloc", "Auto", - "Block", "Bloom", "Bytes", "EngineAPIError", "EmptyTrieRoot", "Environment", - "Fixture", - "FixtureBlock", - "FixtureEngineNewPayload", - "FixtureHeader", "Hash", - "Header", "HeaderNonce", - "HistoryStorageAddress", - "HiveFixture", - "InvalidFixtureBlock", + "HexNumber", "JSONEncoder", "Number", "Removable", "Storage", "TestAddress", "TestAddress2", + "TestParameterGroup", "TestPrivateKey", "TestPrivateKey2", "Transaction", @@ -102,9 +88,7 @@ "eip_2028_transaction_data_cost", "serialize_transactions", "str_or_none", - "to_address", - "to_hash_bytes", - "to_hash", "to_json", + "transaction_list_root", "withdrawals_root", ) diff --git a/src/ethereum_test_tools/common/base_types.py b/src/ethereum_test_tools/common/base_types.py new file mode 100644 index 0000000000..fcd7df10c8 --- /dev/null +++ b/src/ethereum_test_tools/common/base_types.py @@ -0,0 +1,224 @@ +""" +Basic type primitives used to define other types. +""" + +from typing import ClassVar, SupportsBytes, Type, TypeVar + +from .conversions import ( + BytesConvertible, + FixedSizeBytesConvertible, + NumberConvertible, + to_bytes, + to_fixed_size_bytes, + to_number, +) +from .json import JSONEncoder, SupportsJSON + +N = TypeVar("N", bound="Number") + + +class Number(int, SupportsJSON): + """ + Class that helps represent numbers in tests. + """ + + def __new__(cls, input: NumberConvertible | N): + """ + Creates a new Number object. + """ + return super(Number, cls).__new__(cls, to_number(input)) + + def __str__(self) -> str: + """ + Returns the string representation of the number. + """ + return str(int(self)) + + def __json__(self, encoder: JSONEncoder) -> str: + """ + Returns the JSON representation of the number. + """ + return str(self) + + def hex(self) -> str: + """ + Returns the hexadecimal representation of the number. + """ + return hex(self) + + @classmethod + def or_none(cls: Type[N], input: N | NumberConvertible | None) -> N | None: + """ + Converts the input to a Number while accepting None. + """ + if input is None: + return input + return cls(input) + + +class HexNumber(Number): + """ + Class that helps represent an hexadecimal numbers in tests. + """ + + def __str__(self) -> str: + """ + Returns the string representation of the number. + """ + return self.hex() + + +class ZeroPaddedHexNumber(HexNumber): + """ + Class that helps represent zero padded hexadecimal numbers in tests. + """ + + def hex(self) -> str: + """ + Returns the hexadecimal representation of the number. + """ + if self == 0: + return "0x00" + hex_str = hex(self)[2:] + if len(hex_str) % 2 == 1: + return "0x0" + hex_str + return "0x" + hex_str + + +class Bytes(bytes, SupportsJSON): + """ + Class that helps represent bytes of variable length in tests. + """ + + def __new__(cls, input: BytesConvertible): + """ + Creates a new Bytes object. + """ + return super(Bytes, cls).__new__(cls, to_bytes(input)) + + def __hash__(self) -> int: + """ + Returns the hash of the bytes. + """ + return super(Bytes, self).__hash__() + + def __str__(self) -> str: + """ + Returns the hexadecimal representation of the bytes. + """ + return self.hex() + + def __json__(self, encoder: JSONEncoder) -> str: + """ + Returns the JSON representation of the bytes. + """ + return str(self) + + def hex(self, *args, **kwargs) -> str: + """ + Returns the hexadecimal representation of the bytes. + """ + return "0x" + super().hex(*args, **kwargs) + + @classmethod + def or_none(cls, input: "Bytes | BytesConvertible | None") -> "Bytes | None": + """ + Converts the input to a Bytes while accepting None. + """ + if input is None: + return input + return cls(input) + + +T = TypeVar("T", bound="FixedSizeBytes") + + +class FixedSizeBytes(Bytes): + """ + Class that helps represent bytes of fixed length in tests. + """ + + byte_length: ClassVar[int] + + def __class_getitem__(cls, length: int) -> Type["FixedSizeBytes"]: + """ + Creates a new FixedSizeBytes class with the given length. + """ + + class Sized(cls): # type: ignore + byte_length = length + + return Sized + + def __new__(cls, input: FixedSizeBytesConvertible | T): + """ + Creates a new FixedSizeBytes object. + """ + return super(FixedSizeBytes, cls).__new__(cls, to_fixed_size_bytes(input, cls.byte_length)) + + def __hash__(self) -> int: + """ + Returns the hash of the bytes. + """ + return super(FixedSizeBytes, self).__hash__() + + @classmethod + def or_none(cls: Type[T], input: T | FixedSizeBytesConvertible | None) -> T | None: + """ + Converts the input to a Fixed Size Bytes while accepting None. + """ + if input is None: + return input + return cls(input) + + def __eq__(self, other: object) -> bool: + """ + Compares two FixedSizeBytes objects to be equal. + """ + if not isinstance(other, FixedSizeBytes): + assert ( + isinstance(other, str) + or isinstance(other, int) + or isinstance(other, bytes) + or isinstance(other, SupportsBytes) + ) + other = self.__class__(other) + return super().__eq__(other) + + def __ne__(self, other: object) -> bool: + """ + Compares two FixedSizeBytes objects to be not equal. + """ + return not self.__eq__(other) + + +class Address(FixedSizeBytes[20]): # type: ignore + """ + Class that helps represent Ethereum addresses in tests. + """ + + pass + + +class Hash(FixedSizeBytes[32]): # type: ignore + """ + Class that helps represent hashes in tests. + """ + + pass + + +class Bloom(FixedSizeBytes[256]): # type: ignore + """ + Class that helps represent blooms in tests. + """ + + pass + + +class HeaderNonce(FixedSizeBytes[8]): # type: ignore + """ + Class that helps represent the header nonce in tests. + """ + + pass diff --git a/src/ethereum_test_tools/common/constants.py b/src/ethereum_test_tools/common/constants.py index 06d06da0f3..14f3b0364e 100644 --- a/src/ethereum_test_tools/common/constants.py +++ b/src/ethereum_test_tools/common/constants.py @@ -4,23 +4,23 @@ from enum import IntEnum -TestAddress = "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" -TestAddress2 = "0x8a0a19589531694250d570040a0c4b74576919b8" +from .base_types import Address + +TestAddress = Address("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b") +TestAddress2 = Address("0x8a0a19589531694250d570040a0c4b74576919b8") TestPrivateKey = "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" TestPrivateKey2 = "0x9e7645d0cfd9c3a04eb7a9db59a4eb7d359f2e75c9164a9d6b9a7d54e1b6a36f" -AddrAA = "0x00000000000000000000000000000000000000aa" -AddrBB = "0x00000000000000000000000000000000000000bb" +AddrAA = Address(0xAA) +AddrBB = Address(0xBB) EmptyBloom = bytes([0] * 256) EmptyOmmersRoot = bytes.fromhex("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347") EmptyTrieRoot = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") EmptyHash = bytes([0] * 32) EmptyNonce = bytes([0] * 8) -ZeroAddress = bytes([0] * 20) - -HistoryStorageAddress = "0x000000000000000000000000000000000000000b" +ZeroAddress = Address(0x00) class EngineAPIError(IntEnum): diff --git a/src/ethereum_test_tools/common/helpers.py b/src/ethereum_test_tools/common/helpers.py index 65a5f89f2a..f25207cd06 100644 --- a/src/ethereum_test_tools/common/helpers.py +++ b/src/ethereum_test_tools/common/helpers.py @@ -2,13 +2,14 @@ Helper functions/classes used to generate Ethereum tests. """ +from dataclasses import MISSING, dataclass, fields from typing import List, SupportsBytes from ethereum.crypto.hash import keccak256 from ethereum.rlp import encode +from .base_types import Address, Bytes, Hash from .conversions import BytesConvertible, FixedSizeBytesConvertible -from .types import Address, Bytes, Hash """ Helper functions @@ -23,25 +24,25 @@ def ceiling_division(a: int, b: int) -> int: return -(a // -b) -def compute_create_address(address: FixedSizeBytesConvertible, nonce: int) -> str: +def compute_create_address(address: FixedSizeBytesConvertible, nonce: int) -> Address: """ Compute address of the resulting contract created using a transaction or the `CREATE` opcode. """ nonce_bytes = bytes() if nonce == 0 else nonce.to_bytes(length=1, byteorder="big") hash = keccak256(encode([Address(address), nonce_bytes])) - return "0x" + hash[-20:].hex() + return Address(hash[-20:]) def compute_create2_address( address: FixedSizeBytesConvertible, salt: FixedSizeBytesConvertible, initcode: BytesConvertible -) -> str: +) -> Address: """ Compute address of the resulting contract created using the `CREATE2` opcode. """ hash = keccak256(b"\xff" + Address(address) + Hash(salt) + keccak256(Bytes(initcode))) - return "0x" + hash[-20:].hex() + return Address(hash[-20:]) def cost_memory_bytes(new_bytes: int, previous_bytes: int) -> int: @@ -84,27 +85,6 @@ def eip_2028_transaction_data_cost(data: BytesConvertible) -> int: return cost -def to_address(input: FixedSizeBytesConvertible) -> str: - """ - Converts an int or str into proper address 20-byte hex string. - """ - return str(Address(input)) - - -def to_hash_bytes(input: FixedSizeBytesConvertible) -> bytes: - """ - Converts an int or str into proper 32-byte hash. - """ - return bytes(Hash(input)) - - -def to_hash(input: FixedSizeBytesConvertible) -> str: - """ - Converts an int or str into proper 32-byte hash hex string. - """ - return str(Hash(input)) - - def add_kzg_version( b_hashes: List[bytes | SupportsBytes | int | str], kzg_version: int ) -> List[bytes]: @@ -115,8 +95,9 @@ def add_kzg_version( kzg_versioned_hashes = [] for hash in b_hashes: + hash = bytes(Hash(hash)) if isinstance(hash, int) or isinstance(hash, str): - kzg_versioned_hashes.append(kzg_version_hex + to_hash_bytes(hash)[1:]) + kzg_versioned_hashes.append(kzg_version_hex + hash[1:]) elif isinstance(hash, bytes) or isinstance(hash, SupportsBytes): if isinstance(hash, SupportsBytes): hash = bytes(hash) @@ -124,3 +105,30 @@ def add_kzg_version( else: raise TypeError("Blob hash must be either an integer, string or bytes") return kzg_versioned_hashes + + +@dataclass(kw_only=True, frozen=True, repr=False) +class TestParameterGroup: + """ + Base class for grouping test parameters in a dataclass. Provides a generic + __repr__ method to generate clean test ids, including only non-default + optional fields. + """ + + __test__ = False # explicitly prevent pytest collecting this class + + def __repr__(self): + """ + Generates a repr string, intended to be used as a test id, based on the class + name and the values of the non-default optional fields. + """ + class_name = self.__class__.__name__ + field_strings = [] + + for field in fields(self): + value = getattr(self, field.name) + # Include the field only if it is not optional or not set to its default value + if field.default is MISSING or field.default != value: + field_strings.append(f"{field.name}_{value}") + + return f"{class_name}_{'-'.join(field_strings)}" diff --git a/src/ethereum_test_tools/common/json.py b/src/ethereum_test_tools/common/json.py index 9503bad512..3e4e5956bf 100644 --- a/src/ethereum_test_tools/common/json.py +++ b/src/ethereum_test_tools/common/json.py @@ -1,6 +1,7 @@ """ JSON encoding and decoding for Ethereum types. """ + import json from abc import ABC, abstractmethod from dataclasses import dataclass @@ -112,8 +113,9 @@ def default(self, obj: Any) -> Any: for object_field in fields(obj): field_name = object_field.name metadata = object_field.metadata + if not metadata: + continue value = getattr(obj, field_name) - assert metadata is not None, f"Field {field_name} has no metadata" field_settings = metadata.get("json_encoder") assert isinstance(field_settings, self.Field), ( f"Field {field_name} has invalid json_encoder " f"metadata: {field_settings}" diff --git a/src/ethereum_test_tools/common/types.py b/src/ethereum_test_tools/common/types.py index f504c32f29..50a5910192 100644 --- a/src/ethereum_test_tools/common/types.py +++ b/src/ethereum_test_tools/common/types.py @@ -1,12 +1,12 @@ """ Useful types for generating Ethereum tests. """ + from copy import copy, deepcopy -from dataclasses import dataclass, fields, replace +from dataclasses import dataclass, fields from itertools import count from typing import ( Any, - Callable, ClassVar, Dict, Iterator, @@ -15,34 +15,31 @@ Optional, Sequence, SupportsBytes, - Tuple, Type, TypeAlias, - TypeVar, ) from coincurve.keys import PrivateKey, PublicKey from ethereum import rlp as eth_rlp -from ethereum.base_types import Uint +from ethereum.base_types import U256, Uint from ethereum.crypto.hash import keccak256 +from ethereum.frontier.fork_types import Account as FrontierAccount +from ethereum.frontier.state import State, set_account, set_storage, state_root from trie import HexaryTrie from ethereum_test_forks import Fork -from evm_transition_tool import TransitionTool -from ..reference_spec.reference_spec import ReferenceSpec -from .constants import AddrAA, EmptyOmmersRoot, EngineAPIError, TestPrivateKey +from ..exceptions import ExceptionList, TransactionException +from .base_types import Address, Bytes, Hash, HexNumber, Number, ZeroPaddedHexNumber +from .constants import TestPrivateKey from .conversions import ( BytesConvertible, FixedSizeBytesConvertible, NumberConvertible, int_or_none, str_or_none, - to_bytes, - to_fixed_size_bytes, - to_number, ) -from .json import JSONEncoder, SupportsJSON, field, to_json +from .json import JSONEncoder, SupportsJSON, field # Sentinel classes @@ -66,198 +63,15 @@ def __repr__(self) -> str: return "auto" -# Basic Types - - -N = TypeVar("N", bound="Number") - - -class Number(int, SupportsJSON): - """ - Class that helps represent numbers in tests. - """ - - def __new__(cls, input: NumberConvertible | N): - """ - Creates a new Number object. - """ - return super(Number, cls).__new__(cls, to_number(input)) - - def __str__(self) -> str: - """ - Returns the string representation of the number. - """ - return str(int(self)) - - def __json__(self, encoder: JSONEncoder) -> str: - """ - Returns the JSON representation of the number. - """ - return str(self) - - def hex(self) -> str: - """ - Returns the hexadecimal representation of the number. - """ - return hex(self) - - @classmethod - def or_none(cls: Type[N], input: N | NumberConvertible | None) -> N | None: - """ - Converts the input to a Number while accepting None. - """ - if input is None: - return input - return cls(input) - - -class HexNumber(Number): - """ - Class that helps represent an hexadecimal numbers in tests. - """ - - def __str__(self) -> str: - """ - Returns the string representation of the number. - """ - return self.hex() - - -class ZeroPaddedHexNumber(HexNumber): - """ - Class that helps represent zero padded hexadecimal numbers in tests. - """ - - def hex(self) -> str: - """ - Returns the hexadecimal representation of the number. - """ - if self == 0: - return "0x00" - hex_str = hex(self)[2:] - if len(hex_str) % 2 == 1: - return "0x0" + hex_str - return "0x" + hex_str - - -class Bytes(bytes, SupportsJSON): - """ - Class that helps represent bytes of variable length in tests. - """ - - def __new__(cls, input: BytesConvertible): - """ - Creates a new Bytes object. - """ - return super(Bytes, cls).__new__(cls, to_bytes(input)) - - def __str__(self) -> str: - """ - Returns the hexadecimal representation of the bytes. - """ - return self.hex() - - def __json__(self, encoder: JSONEncoder) -> str: - """ - Returns the JSON representation of the bytes. - """ - return str(self) - - def hex(self, *args, **kwargs) -> str: - """ - Returns the hexadecimal representation of the bytes. - """ - return "0x" + super().hex(*args, **kwargs) - - @classmethod - def or_none(cls, input: "Bytes | BytesConvertible | None") -> "Bytes | None": - """ - Converts the input to a Bytes while accepting None. - """ - if input is None: - return input - return cls(input) - - -T = TypeVar("T", bound="FixedSizeBytes") - - -class FixedSizeBytes(Bytes): - """ - Class that helps represent bytes of fixed length in tests. - """ - - byte_length: ClassVar[int] - - def __class_getitem__(cls, length: int) -> Type["FixedSizeBytes"]: - """ - Creates a new FixedSizeBytes class with the given length. - """ - - class Sized(cls): # type: ignore - byte_length = length - - return Sized - - def __new__(cls, input: FixedSizeBytesConvertible | T): - """ - Creates a new FixedSizeBytes object. - """ - return super(FixedSizeBytes, cls).__new__(cls, to_fixed_size_bytes(input, cls.byte_length)) - - @classmethod - def or_none(cls: Type[T], input: T | FixedSizeBytesConvertible | None) -> T | None: - """ - Converts the input to a Fixed Size Bytes while accepting None. - """ - if input is None: - return input - return cls(input) - - -class Address(FixedSizeBytes[20]): # type: ignore - """ - Class that helps represent Ethereum addresses in tests. - """ - - pass - - -class Hash(FixedSizeBytes[32]): # type: ignore - """ - Class that helps represent hashes in tests. - """ - - pass - - -class Bloom(FixedSizeBytes[256]): # type: ignore - """ - Class that helps represent blooms in tests. - """ - - pass - - -class HeaderNonce(FixedSizeBytes[8]): # type: ignore - """ - Class that helps represent the header nonce in tests. - """ - - pass - - MAX_STORAGE_KEY_VALUE = 2**256 - 1 MIN_STORAGE_KEY_VALUE = -(2**255) -class Storage(SupportsJSON): +class Storage(SupportsJSON, dict): """ Definition of a storage in pre or post state of a test """ - data: Dict[int, int] - current_slot: Iterator[int] StorageDictType: ClassVar[TypeAlias] = Dict[ @@ -267,6 +81,7 @@ class Storage(SupportsJSON): Dictionary type to be used when defining an input to initialize a storage. """ + @dataclass(kw_only=True) class InvalidType(Exception): """ Invalid type used when describing test's expected storage key or value. @@ -282,6 +97,7 @@ def __str__(self): """Print exception string""" return f"invalid type for key/value: {self.key_or_value}" + @dataclass(kw_only=True) class InvalidValue(Exception): """ Invalid value used when describing test's expected storage key or @@ -298,6 +114,7 @@ def __str__(self): """Print exception string""" return f"invalid value for key/value: {self.key_or_value}" + @dataclass(kw_only=True) class AmbiguousKeyValue(Exception): """ Key is represented twice in the storage. @@ -330,6 +147,7 @@ def __str__(self): s[{self.key_1}] = {self.val_1} and s[{self.key_2}] = {self.val_2} """ + @dataclass(kw_only=True) class MissingKey(Exception): """ Test expected to find a storage key set but key was missing. @@ -345,18 +163,19 @@ def __str__(self): """Print exception string""" return "key {0} not found in storage".format(Storage.key_value_to_string(self.key)) + @dataclass(kw_only=True) class KeyValueMismatch(Exception): """ Test expected a certain value in a storage key but value found was different. """ - address: str + address: Address key: int want: int got: int - def __init__(self, address: str, key: int, want: int, got: int, *args): + def __init__(self, address: Address, key: int, want: int, got: int, *args): super().__init__(args) self.address = address self.key = key @@ -384,10 +203,10 @@ def parse_key_value(input: str | int | bytes | SupportsBytes) -> int: elif isinstance(input, bytes) or isinstance(input, SupportsBytes): input = int.from_bytes(bytes(input), "big") else: - raise Storage.InvalidType(input) + raise Storage.InvalidType(key_or_value=input) if input > MAX_STORAGE_KEY_VALUE or input < MIN_STORAGE_KEY_VALUE: - raise Storage.InvalidValue(input) + raise Storage.InvalidValue(key_or_value=input) return input @staticmethod @@ -402,45 +221,43 @@ def key_value_to_string(value: int) -> str: hex_str = "0" + hex_str return "0x" + hex_str - def __init__(self, input: StorageDictType = {}, start_slot: int = 0): + def __init__(self, input: StorageDictType | "Storage" = {}, *, start_slot: int = 0): """ Initializes the storage using a given mapping which can have keys and values either as string or int. Strings must be valid decimal or hexadecimal (starting with 0x) numbers. """ - self.data = {} - for key in input: - value = Storage.parse_key_value(input[key]) - key = Storage.parse_key_value(key) - self.data[key] = value + super().__init__( + (Storage.parse_key_value(k), Storage.parse_key_value(v)) for k, v in input.items() + ) self.current_slot = count(start_slot) - def __len__(self) -> int: - """Returns number of elements in the storage""" - return len(self.data) - - def __contains__(self, key: str | int | bytes) -> bool: + def __contains__(self, key: object) -> bool: """Checks for an item in the storage""" - key = Storage.parse_key_value(key) - return key in self.data + assert ( + isinstance(key, str) + or isinstance(key, int) + or isinstance(key, bytes) + or isinstance(key, SupportsBytes) + ) + return super().__contains__(Storage.parse_key_value(key)) - def __getitem__(self, key: str | int | bytes) -> int: + def __getitem__(self, key: str | int | bytes | SupportsBytes) -> int: """Returns an item from the storage""" - key = Storage.parse_key_value(key) - if key not in self.data: - raise KeyError() - return self.data[key] + return super().__getitem__(Storage.parse_key_value(key)) - def __setitem__(self, key: str | int | bytes, value: str | int | bytes): # noqa: SC200 + def __setitem__( + self, key: str | int | bytes | SupportsBytes, value: str | int | bytes | SupportsBytes + ): # noqa: SC200 """Sets an item in the storage""" - self.data[Storage.parse_key_value(key)] = Storage.parse_key_value(value) + super().__setitem__(Storage.parse_key_value(key), Storage.parse_key_value(value)) - def __delitem__(self, key: str | int | bytes): + def __delitem__(self, key: str | int | bytes | SupportsBytes): """Deletes an item from the storage""" - del self.data[Storage.parse_key_value(key)] + super().__delitem__(Storage.parse_key_value(key)) - def store_next(self, value: str | int | bytes) -> int: + def store_next(self, value: str | int | bytes | SupportsBytes) -> int: """ Stores a value in the storage and returns the key where the value is stored. @@ -456,11 +273,13 @@ def __json__(self, encoder: JSONEncoder) -> Mapping[str, str]: hex string formatting. """ res: Dict[str, str] = {} - for key in self.data: + for key, value in self.items(): key_repr = Storage.key_value_to_string(key) - val_repr = Storage.key_value_to_string(self.data[key]) + val_repr = Storage.key_value_to_string(value) if key_repr in res and val_repr != res[key_repr]: - raise Storage.AmbiguousKeyValue(key_repr, res[key_repr], key, val_repr) + raise Storage.AmbiguousKeyValue( + key_1=key_repr, val_1=res[key_repr], key_2=key, val_2=val_repr + ) res[key_repr] = val_repr return res @@ -471,14 +290,14 @@ def contains(self, other: "Storage") -> bool: Used for comparison with test expected post state and alloc returned by the transition tool. """ - for key in other.data: - if key not in self.data: + for key in other: + if key not in self: return False - if self.data[key] != other.data[key]: + if self[key] != other[key]: return False return True - def must_contain(self, address: str, other: "Storage"): + def must_contain(self, address: Address, other: "Storage"): """ Succeeds only if self contains all keys with equal value as contained by second storage. @@ -486,31 +305,35 @@ def must_contain(self, address: str, other: "Storage"): by the transition tool. Raises detailed exception when a difference is found. """ - for key in other.data: - if key not in self.data: + for key in other: + if key not in self: # storage[key]==0 is equal to missing storage if other[key] != 0: - raise Storage.MissingKey(key) - elif self.data[key] != other.data[key]: - raise Storage.KeyValueMismatch(address, key, self.data[key], other.data[key]) + raise Storage.MissingKey(key=key) + elif self[key] != other[key]: + raise Storage.KeyValueMismatch( + address=address, key=key, want=self[key], got=other[key] + ) - def must_be_equal(self, address: str, other: "Storage"): + def must_be_equal(self, address: Address, other: "Storage"): """ Succeeds only if "self" is equal to "other" storage. """ # Test keys contained in both storage objects - for key in self.data.keys() & other.data.keys(): - if self.data[key] != other.data[key]: - raise Storage.KeyValueMismatch(address, key, self.data[key], other.data[key]) + for key in self.keys() & other.keys(): + if self[key] != other[key]: + raise Storage.KeyValueMismatch( + address=address, key=key, want=self[key], got=other[key] + ) # Test keys contained in either one of the storage objects - for key in self.data.keys() ^ other.data.keys(): - if key in self.data: - if self.data[key] != 0: - raise Storage.KeyValueMismatch(address, key, self.data[key], 0) + for key in self.keys() ^ other.keys(): + if key in self: + if self[key] != 0: + raise Storage.KeyValueMismatch(address=address, key=key, want=self[key], got=0) - elif other.data[key] != 0: - raise Storage.KeyValueMismatch(address, key, 0, other.data[key]) + elif other[key] != 0: + raise Storage.KeyValueMismatch(address=address, key=key, want=0, got=other[key]) @dataclass(kw_only=True) @@ -573,17 +396,18 @@ class Account: state. """ + @dataclass(kw_only=True) class NonceMismatch(Exception): """ Test expected a certain nonce value for an account but a different value was found. """ - address: str + address: Address want: int | None got: int | None - def __init__(self, address: str, want: int | None, got: int | None, *args): + def __init__(self, address: Address, want: int | None, got: int | None, *args): super().__init__(args) self.address = address self.want = want @@ -596,17 +420,18 @@ def __str__(self): + f"want {self.want}, got {self.got}" ) + @dataclass(kw_only=True) class BalanceMismatch(Exception): """ Test expected a certain balance for an account but a different value was found. """ - address: str + address: Address want: int | None got: int | None - def __init__(self, address: str, want: int | None, got: int | None, *args): + def __init__(self, address: Address, want: int | None, got: int | None, *args): super().__init__(args) self.address = address self.want = want @@ -619,17 +444,18 @@ def __str__(self): + f"want {self.want}, got {self.got}" ) + @dataclass(kw_only=True) class CodeMismatch(Exception): """ Test expected a certain bytecode for an account but a different one was found. """ - address: str + address: Address want: str | None got: str | None - def __init__(self, address: str, want: str | None, got: str | None, *args): + def __init__(self, address: Address, want: str | None, got: str | None, *args): super().__init__(args) self.address = address self.want = want @@ -642,7 +468,7 @@ def __str__(self): + f"want {self.want}, got {self.got}" ) - def check_alloc(self: "Account", address: str, alloc: dict): + def check_alloc(self: "Account", address: Address, alloc: dict): """ Checks the returned alloc against an expected account in post state. Raises exception on failure. @@ -684,6 +510,12 @@ def check_alloc(self: "Account", address: str, alloc: dict): actual_storage = Storage(alloc["storage"]) if "storage" in alloc else Storage({}) expected_storage.must_be_equal(address=address, other=actual_storage) + def has_empty_code(self: "Account") -> bool: + """ + Returns true if an account has no bytecode. + """ + return not self.code or Bytes(self.code) == b"" + def is_empty(self: "Account") -> bool: """ Returns true if an account deemed empty. @@ -691,7 +523,7 @@ def is_empty(self: "Account") -> bool: return ( (self.nonce == 0 or self.nonce is None) and (self.balance == 0 or self.balance is None) - and (not self.code and self.code is None) + and self.has_empty_code() and (not self.storage or self.storage == {} or self.storage is None) ) @@ -742,12 +574,11 @@ class Alloc(dict, Mapping[Address, Account], SupportsJSON): """ def __init__(self, d: Mapping[FixedSizeBytesConvertible, Account | Dict] = {}): - for address, account in d.items(): - address = Address(address) - assert address not in self, f"Duplicate address in alloc: {address}" - account = Account.from_dict(account) - #assert not account.is_empty(), f"Empty account: {account} for address: {address}" - self[address] = account + super().__init__( + (Address(address), Account.from_dict(account)) for address, account in d.items() + ) + if len(self) != len(d): + raise Exception("Duplicate addresses in alloc") @classmethod def merge(cls, alloc_1: "Alloc", alloc_2: "Alloc") -> "Alloc": @@ -757,10 +588,21 @@ def merge(cls, alloc_1: "Alloc", alloc_2: "Alloc") -> "Alloc": merged = alloc_1.copy() for address, other_account in alloc_2.items(): - merged[address] = Account.merge(merged.get(address, None), other_account) + merged_account = Account.merge(merged.get(address, None), other_account) + if merged_account.is_empty(): + if address in merged: + merged.pop(address, None) + else: + merged[address] = merged_account return Alloc(merged) + def empty_accounts(self) -> List[Address]: + """ + Returns a list of addresses of empty accounts. + """ + return [address for address, account in self.items() if account.is_empty()] + def __json__(self, encoder: JSONEncoder) -> Mapping[str, Any]: """ Returns the JSON representation of the allocation. @@ -769,6 +611,33 @@ def __json__(self, encoder: JSONEncoder) -> Mapping[str, Any]: {Address(address): Account.from_dict(account) for address, account in self.items()} ) + def state_root(self) -> bytes: + """ + Returns the state root of the allocation. + """ + state = State() + for address, account in self.items(): + set_account( + state=state, + address=address, + account=FrontierAccount( + nonce=Uint(Number(account.nonce)) if account.nonce is not None else Uint(0), + balance=( + U256(Number(account.balance)) if account.balance is not None else U256(0) + ), + code=Bytes(account.code) if account.code is not None else b"", + ), + ) + if account.storage is not None: + for key, value in account.storage.items(): + set_storage( + state=state, + address=address, + key=Hash(key), + value=U256(Number(value)), + ) + return state_root(state) + def alloc_to_accounts(got_alloc: Dict[str, Any]) -> Mapping[str, Account]: """ @@ -838,39 +707,6 @@ def withdrawals_root(withdrawals: List[Withdrawal]) -> bytes: return t.root_hash -@dataclass(kw_only=True) -class FixtureWithdrawal(Withdrawal): - """ - Structure to represent a single withdrawal of a validator's balance from - the beacon chain in the output fixture. - """ - - index: NumberConvertible = field( - json_encoder=JSONEncoder.Field( - cast_type=ZeroPaddedHexNumber, - ), - ) - validator: NumberConvertible = field( - json_encoder=JSONEncoder.Field( - name="validatorIndex", - cast_type=ZeroPaddedHexNumber, - ), - ) - amount: NumberConvertible = field( - json_encoder=JSONEncoder.Field( - cast_type=ZeroPaddedHexNumber, - ), - ) - - @classmethod - def from_withdrawal(cls, w: Withdrawal) -> "FixtureWithdrawal": - """ - Returns a FixtureWithdrawal from a Withdrawal. - """ - kwargs = {field.name: getattr(w, field.name) for field in fields(w)} - return cls(**kwargs) - - DEFAULT_BASE_FEE = 7 @@ -1037,23 +873,6 @@ class Environment: ), ) - @staticmethod - def from_parent_header(parent: "FixtureHeader") -> "Environment": - """ - Instantiates a new environment with the provided header as parent. - """ - return Environment( - parent_difficulty=parent.difficulty, - parent_timestamp=parent.timestamp, - parent_base_fee=parent.base_fee, - parent_blob_gas_used=parent.blob_gas_used, - parent_excess_blob_gas=parent.excess_blob_gas, - parent_gas_used=parent.gas_used, - parent_gas_limit=parent.gas_limit, - parent_ommers_hash=parent.ommers_hash, - block_hashes={parent.number: parent.hash if parent.hash is not None else 0}, - ) - def parent_hash(self) -> bytes: """ Obtains the latest hash according to the highest block number in @@ -1065,22 +884,6 @@ def parent_hash(self) -> bytes: last_index = max([Number(k) for k in self.block_hashes.keys()]) return Hash(self.block_hashes[last_index]) - def apply_new_parent(self, new_parent: "FixtureHeader") -> "Environment": - """ - Applies a header as parent to a copy of this environment. - """ - env = copy(self) - env.parent_difficulty = new_parent.difficulty - env.parent_timestamp = new_parent.timestamp - env.parent_base_fee = new_parent.base_fee - env.parent_blob_gas_used = new_parent.blob_gas_used - env.parent_excess_blob_gas = new_parent.excess_blob_gas - env.parent_gas_used = new_parent.gas_used - env.parent_gas_limit = new_parent.gas_limit - env.parent_ommers_hash = new_parent.ommers_hash - env.block_hashes[new_parent.number] = new_parent.hash if new_parent.hash is not None else 0 - return env - def set_fork_requirements(self, fork: Fork, in_place: bool = False) -> "Environment": """ Fills the required fields in an environment depending on the fork. @@ -1103,6 +906,8 @@ def set_fork_requirements(self, fork: Fork, in_place: bool = False) -> "Environm if fork.header_zero_difficulty_required(number, timestamp): res.difficulty = 0 + elif res.difficulty is None and res.parent_difficulty is None: + res.difficulty = 0x20000 if ( fork.header_excess_blob_gas_required(number, timestamp) @@ -1208,8 +1013,8 @@ class Transaction: cast_type=HexNumber, ), ) - to: Optional[FixedSizeBytesConvertible] = field( - default=AddrAA, + to: Optional[FixedSizeBytesConvertible | Address] = field( + default=Address(0xAA), json_encoder=JSONEncoder.Field( cast_type=Address, ), @@ -1310,7 +1115,13 @@ class Transaction: skip=True, ), ) - error: Optional[str] = field( + error: Optional[TransactionException | ExceptionList] = field( + default=None, + json_encoder=JSONEncoder.Field( + skip=True, + ), + ) + rlp: Optional[bytes] = field( default=None, json_encoder=JSONEncoder.Field( skip=True, @@ -1379,7 +1190,7 @@ def __post_init__(self) -> None: if self.ty >= 2 and self.max_priority_fee_per_gas is None: self.max_priority_fee_per_gas = 0 - def with_error(self, error: str) -> "Transaction": + def with_error(self, error: TransactionException | ExceptionList) -> "Transaction": """ Create a copy of the transaction with an added error. """ @@ -1546,6 +1357,9 @@ def serialized_bytes(self) -> bytes: Returns bytes of the serialized representation of the transaction, which is almost always RLP encoding. """ + if self.rlp is not None: + return self.rlp + if self.ty is None: raise ValueError("ty must be set for all tx types") @@ -1675,7 +1489,7 @@ def signature_bytes(self) -> bytes: + bytes([v]) ) - def with_signature_and_sender(self) -> "Transaction": + def with_signature_and_sender(self, *, keep_secret_key: bool = False) -> "Transaction": """ Returns a signed version of the transaction using the private key. """ @@ -1718,11 +1532,22 @@ def with_signature_and_sender(self) -> "Transaction": else: # not protected tx.v += 27 - # Remove the secret key because otherwise we might attempt to sign again (?) - tx.secret_key = None + # Remove the secret key if requested + if not keep_secret_key: + tx.secret_key = None return tx +def transaction_list_root(input_txs: List[Transaction] | None) -> Hash: + """ + Returns the transactions root of a list of transactions. + """ + t = HexaryTrie(db={}) + for i, tx in enumerate(input_txs or []): + t.set(eth_rlp.encode(Uint(i)), tx.serialized_bytes()) + return Hash(t.root_hash) + + def transaction_list_to_serializable_list(input_txs: List[Transaction] | None) -> List[Any]: """ Returns the transaction list as a list of serializable objects. @@ -1765,1085 +1590,3 @@ def blob_versioned_hashes_from_transactions( versioned_hashes.extend(tx.blob_versioned_hashes) return versioned_hashes - - -@dataclass -class FixtureTransaction(Transaction): - """ - Representation of an Ethereum transaction within a test Fixture. - """ - - ty: Optional[int] = field( - default=None, - json_encoder=JSONEncoder.Field( - name="type", - cast_type=ZeroPaddedHexNumber, - ), - ) - """ - Transaction type value. - """ - chain_id: int = field( - default=1, - json_encoder=JSONEncoder.Field( - name="chainId", - cast_type=ZeroPaddedHexNumber, - ), - ) - nonce: int = field( - default=0, - json_encoder=JSONEncoder.Field( - cast_type=ZeroPaddedHexNumber, - ), - ) - gas_price: Optional[int] = field( - default=None, - json_encoder=JSONEncoder.Field( - name="gasPrice", - cast_type=ZeroPaddedHexNumber, - ), - ) - max_priority_fee_per_gas: Optional[int] = field( - default=None, - json_encoder=JSONEncoder.Field( - name="maxPriorityFeePerGas", - cast_type=ZeroPaddedHexNumber, - ), - ) - max_fee_per_gas: Optional[int] = field( - default=None, - json_encoder=JSONEncoder.Field( - name="maxFeePerGas", - cast_type=ZeroPaddedHexNumber, - ), - ) - gas_limit: int = field( - default=21000, - json_encoder=JSONEncoder.Field( - name="gasLimit", - cast_type=ZeroPaddedHexNumber, - ), - ) - to: Optional[FixedSizeBytesConvertible] = field( - default=AddrAA, - json_encoder=JSONEncoder.Field( - cast_type=Address, - default_value_skip_cast="", - ), - ) - value: int = field( - default=0, - json_encoder=JSONEncoder.Field( - cast_type=ZeroPaddedHexNumber, - ), - ) - data: BytesConvertible = field( - default_factory=bytes, - json_encoder=JSONEncoder.Field( - cast_type=Bytes, - ), - ) - max_fee_per_blob_gas: Optional[int] = field( - default=None, - json_encoder=JSONEncoder.Field( - name="maxFeePerBlobGas", - cast_type=ZeroPaddedHexNumber, - ), - ) - v: Optional[int] = field( - default=None, - json_encoder=JSONEncoder.Field( - cast_type=ZeroPaddedHexNumber, - ), - ) - r: Optional[int] = field( - default=None, - json_encoder=JSONEncoder.Field( - cast_type=ZeroPaddedHexNumber, - ), - ) - s: Optional[int] = field( - default=None, - json_encoder=JSONEncoder.Field( - cast_type=ZeroPaddedHexNumber, - ), - ) - - @classmethod - def from_transaction(cls, tx: Transaction) -> "FixtureTransaction": - """ - Returns a FixtureTransaction from a Transaction. - """ - kwargs = {field.name: getattr(tx, field.name) for field in fields(tx)} - return cls(**kwargs) - - -@dataclass(kw_only=True) -class Header: - """ - Header type used to describe block header properties in test specs. - """ - - parent_hash: Optional[FixedSizeBytesConvertible] = None - ommers_hash: Optional[FixedSizeBytesConvertible] = None - coinbase: Optional[FixedSizeBytesConvertible] = None - state_root: Optional[FixedSizeBytesConvertible] = None - transactions_root: Optional[FixedSizeBytesConvertible] = None - receipt_root: Optional[FixedSizeBytesConvertible] = None - bloom: Optional[FixedSizeBytesConvertible] = None - difficulty: Optional[NumberConvertible] = None - number: Optional[NumberConvertible] = None - gas_limit: Optional[NumberConvertible] = None - gas_used: Optional[NumberConvertible] = None - timestamp: Optional[NumberConvertible] = None - extra_data: Optional[BytesConvertible] = None - mix_digest: Optional[FixedSizeBytesConvertible] = None - nonce: Optional[FixedSizeBytesConvertible] = None - base_fee: Optional[NumberConvertible | Removable] = None - withdrawals_root: Optional[FixedSizeBytesConvertible | Removable] = None - blob_gas_used: Optional[NumberConvertible | Removable] = None - excess_blob_gas: Optional[NumberConvertible | Removable] = None - beacon_root: Optional[FixedSizeBytesConvertible | Removable] = None - hash: Optional[FixedSizeBytesConvertible] = None - - REMOVE_FIELD: ClassVar[Removable] = Removable() - """ - Sentinel object used to specify that a header field should be removed. - """ - EMPTY_FIELD: ClassVar[Removable] = Removable() - """ - Sentinel object used to specify that a header field must be empty during verification. - """ - - -@dataclass(kw_only=True) -class HeaderFieldSource: - """ - Block header field metadata specifying the source used to populate the field when collecting - the block header from different sources, and to validate it. - """ - - required: bool = True - """ - Whether the field is required or not, regardless of the fork. - """ - fork_requirement_check: Optional[str] = None - """ - Name of the method to call to check if the field is required for the current fork. - """ - default: Optional[Any] = None - """ - Default value for the field if no value was provided by either the transition tool or the - environment - """ - parse_type: Optional[Callable] = None - """ - The type or function to use to parse the field to before initializing the object. - """ - source_environment: Optional[str] = None - """ - Name of the field in the environment object, which can be a callable. - """ - source_transition_tool: Optional[str] = None - """ - Name of the field in the transition tool result dictionary. - """ - - def collect( - self, - *, - target: Dict[str, Any], - field_name: str, - fork: Fork, - number: int, - timestamp: int, - transition_tool_result: Dict[str, Any], - environment: Environment, - ) -> None: - """ - Collects the field from the different sources according to the - metadata description. - """ - value = None - required = self.required - if self.fork_requirement_check is not None: - required = getattr(fork, self.fork_requirement_check)(number, timestamp) - - if self.source_transition_tool is not None: - if self.source_transition_tool in transition_tool_result: - got_value = transition_tool_result.get(self.source_transition_tool) - if got_value is not None: - value = got_value - - if self.source_environment is not None: - got_value = getattr(environment, self.source_environment, None) - if callable(got_value): - got_value = got_value() - if got_value is not None: - value = got_value - - if required: - if value is None: - if self.default is not None: - value = self.default - else: - raise ValueError(f"missing required field '{field_name}'") - - if value is not None and self.parse_type is not None: - value = self.parse_type(value) - - target[field_name] = value - - -def header_field(*args, source: Optional[HeaderFieldSource] = None, **kwargs) -> Any: - """ - A wrapper around `dataclasses.field` that allows for json configuration info and header - metadata. - """ - if "metadata" in kwargs: - metadata = kwargs["metadata"] - else: - metadata = {} - assert isinstance(metadata, dict) - - if source is not None: - metadata["source"] = source - - kwargs["metadata"] = metadata - return field(*args, **kwargs) - - -@dataclass(kw_only=True) -class FixtureHeader: - """ - Representation of an Ethereum header within a test Fixture. - """ - - parent_hash: Hash = header_field( - source=HeaderFieldSource( - parse_type=Hash, - source_environment="parent_hash", - ), - json_encoder=JSONEncoder.Field(name="parentHash"), - ) - ommers_hash: Hash = header_field( - source=HeaderFieldSource( - parse_type=Hash, - source_transition_tool="sha3Uncles", - default=EmptyOmmersRoot, - ), - json_encoder=JSONEncoder.Field(name="uncleHash"), - ) - coinbase: Address = header_field( - source=HeaderFieldSource( - parse_type=Address, - source_environment="coinbase", - ), - json_encoder=JSONEncoder.Field(), - ) - state_root: Hash = header_field( - source=HeaderFieldSource( - parse_type=Hash, - source_transition_tool="stateRoot", - ), - json_encoder=JSONEncoder.Field(name="stateRoot"), - ) - transactions_root: Hash = header_field( - source=HeaderFieldSource( - parse_type=Hash, - source_transition_tool="txRoot", - ), - json_encoder=JSONEncoder.Field(name="transactionsTrie"), - ) - receipt_root: Hash = header_field( - source=HeaderFieldSource( - parse_type=Hash, - source_transition_tool="receiptsRoot", - ), - json_encoder=JSONEncoder.Field(name="receiptTrie"), - ) - bloom: Bloom = header_field( - source=HeaderFieldSource( - parse_type=Bloom, - source_transition_tool="logsBloom", - ), - json_encoder=JSONEncoder.Field(), - ) - difficulty: int = header_field( - source=HeaderFieldSource( - parse_type=Number, - source_transition_tool="currentDifficulty", - source_environment="difficulty", - default=0, - ), - json_encoder=JSONEncoder.Field(cast_type=ZeroPaddedHexNumber), - ) - number: int = header_field( - source=HeaderFieldSource( - parse_type=Number, - source_environment="number", - ), - json_encoder=JSONEncoder.Field(cast_type=ZeroPaddedHexNumber), - ) - gas_limit: int = header_field( - source=HeaderFieldSource( - parse_type=Number, - source_environment="gas_limit", - ), - json_encoder=JSONEncoder.Field(name="gasLimit", cast_type=ZeroPaddedHexNumber), - ) - gas_used: int = header_field( - source=HeaderFieldSource( - parse_type=Number, - source_transition_tool="gasUsed", - ), - json_encoder=JSONEncoder.Field(name="gasUsed", cast_type=ZeroPaddedHexNumber), - ) - timestamp: int = header_field( - source=HeaderFieldSource( - parse_type=Number, - source_environment="timestamp", - ), - json_encoder=JSONEncoder.Field(cast_type=ZeroPaddedHexNumber), - ) - extra_data: Bytes = header_field( - source=HeaderFieldSource( - parse_type=Bytes, - source_environment="extra_data", - default=b"", - ), - json_encoder=JSONEncoder.Field(name="extraData"), - ) - mix_digest: Hash = header_field( - source=HeaderFieldSource( - parse_type=Hash, - source_environment="prev_randao", - default=b"", - ), - json_encoder=JSONEncoder.Field(name="mixHash"), - ) - nonce: HeaderNonce = header_field( - source=HeaderFieldSource( - parse_type=HeaderNonce, - default=b"", - ), - json_encoder=JSONEncoder.Field(), - ) - base_fee: Optional[int] = header_field( - default=None, - source=HeaderFieldSource( - parse_type=Number, - fork_requirement_check="header_base_fee_required", - source_transition_tool="currentBaseFee", - source_environment="base_fee", - ), - json_encoder=JSONEncoder.Field(name="baseFeePerGas", cast_type=ZeroPaddedHexNumber), - ) - withdrawals_root: Optional[Hash] = header_field( - default=None, - source=HeaderFieldSource( - parse_type=Hash, - fork_requirement_check="header_withdrawals_required", - source_transition_tool="withdrawalsRoot", - ), - json_encoder=JSONEncoder.Field(name="withdrawalsRoot"), - ) - blob_gas_used: Optional[int] = header_field( - default=None, - source=HeaderFieldSource( - parse_type=Number, - fork_requirement_check="header_blob_gas_used_required", - source_transition_tool="blobGasUsed", - ), - json_encoder=JSONEncoder.Field(name="blobGasUsed", cast_type=ZeroPaddedHexNumber), - ) - excess_blob_gas: Optional[int] = header_field( - default=None, - source=HeaderFieldSource( - parse_type=Number, - fork_requirement_check="header_excess_blob_gas_required", - source_transition_tool="currentExcessBlobGas", - ), - json_encoder=JSONEncoder.Field(name="excessBlobGas", cast_type=ZeroPaddedHexNumber), - ) - beacon_root: Optional[Hash] = header_field( - default=None, - source=HeaderFieldSource( - parse_type=Hash, - fork_requirement_check="header_beacon_root_required", - source_environment="beacon_root", - ), - json_encoder=JSONEncoder.Field(name="parentBeaconBlockRoot"), - ) - hash: Optional[Hash] = header_field( - default=None, - source=HeaderFieldSource( - required=False, - ), - json_encoder=JSONEncoder.Field(), - ) - - @classmethod - def collect( - cls, - *, - fork: Fork, - transition_tool_result: Dict[str, Any], - environment: Environment, - ) -> "FixtureHeader": - """ - Collects a FixtureHeader object from multiple sources: - - The transition tool result - - The test's current environment - """ - # We depend on the environment to get the number and timestamp to check the fork - # requirements - number, timestamp = Number(environment.number), Number(environment.timestamp) - - # Collect the header fields - kwargs: Dict[str, Any] = {} - for header_field in fields(cls): - field_name = header_field.name - metadata = header_field.metadata - assert metadata is not None, f"Field {field_name} has no header field metadata" - field_metadata = metadata.get("source") - assert isinstance(field_metadata, HeaderFieldSource), ( - f"Field {field_name} has invalid header_field " f"metadata: {field_metadata}" - ) - field_metadata.collect( - target=kwargs, - field_name=field_name, - fork=fork, - number=number, - timestamp=timestamp, - transition_tool_result=transition_tool_result, - environment=environment, - ) - - # Pass the collected fields as keyword arguments to the constructor - return cls(**kwargs) - - def join(self, modifier: Header) -> "FixtureHeader": - """ - Produces a fixture header copy with the set values from the modifier. - """ - new_fixture_header = copy(self) - for header_field in self.__dataclass_fields__: - value = getattr(modifier, header_field) - if value is not None: - if value is Header.REMOVE_FIELD: - setattr(new_fixture_header, header_field, None) - else: - setattr(new_fixture_header, header_field, value) - return new_fixture_header - - def verify(self, baseline: Header): - """ - Verifies that the header fields from the baseline are as expected. - """ - for header_field in fields(self): - field_name = header_field.name - baseline_value = getattr(baseline, field_name) - if baseline_value is not None: - assert baseline_value is not Header.REMOVE_FIELD, "invalid baseline header" - value = getattr(self, field_name) - if baseline_value is Header.EMPTY_FIELD: - assert value is None, f"invalid header field {header_field}" - continue - metadata = header_field.metadata - field_metadata = metadata.get("source") - # type check is performed on collect() - if field_metadata.parse_type is not None: # type: ignore - baseline_value = field_metadata.parse_type(baseline_value) # type: ignore - assert value == baseline_value, f"invalid header field {header_field}" - - def build( - self, - *, - txs: List[Transaction], - ommers: List[Header], - withdrawals: List[Withdrawal] | None, - ) -> Tuple[Bytes, Hash]: - """ - Returns the serialized version of the block and its hash. - """ - header = [ - self.parent_hash, - self.ommers_hash, - self.coinbase, - self.state_root, - self.transactions_root, - self.receipt_root, - self.bloom, - Uint(int(self.difficulty)), - Uint(int(self.number)), - Uint(int(self.gas_limit)), - Uint(int(self.gas_used)), - Uint(int(self.timestamp)), - self.extra_data, - self.mix_digest, - self.nonce, - ] - if self.base_fee is not None: - header.append(Uint(int(self.base_fee))) - if self.withdrawals_root is not None: - header.append(self.withdrawals_root) - if self.blob_gas_used is not None: - header.append(Uint(int(self.blob_gas_used))) - if self.excess_blob_gas is not None: - header.append(Uint(self.excess_blob_gas)) - if self.beacon_root is not None: - header.append(self.beacon_root) - - block = [ - header, - transaction_list_to_serializable_list(txs), - ommers, # TODO: This is incorrect, and we probably need to serialize the ommers - ] - - if withdrawals is not None: - block.append([w.to_serializable_list() for w in withdrawals]) - - serialized_bytes = Bytes(eth_rlp.encode(block)) - - return serialized_bytes, Hash(keccak256(eth_rlp.encode(header))) - - -@dataclass(kw_only=True) -class Block(Header): - """ - Block type used to describe block properties in test specs - """ - - rlp: Optional[BytesConvertible] = None - """ - If set, blockchain test will skip generating the block and will pass this value directly to - the Fixture. - - Only meant to be used to simulate blocks with bad formats, and therefore - requires the block to produce an exception. - """ - header_verify: Optional[Header] = None - """ - If set, the block header will be verified against the specified values. - """ - rlp_modifier: Optional[Header] = None - """ - An RLP modifying header which values would be used to override the ones - returned by the `evm_transition_tool`. - """ - exception: Optional[str] = None - """ - If set, the block is expected to be rejected by the client. - """ - engine_api_error_code: Optional[EngineAPIError] = None - """ - If set, the block is expected to produce an error response from the Engine API. - """ - txs: Optional[List[Transaction]] = None - """ - List of transactions included in the block. - """ - ommers: Optional[List[Header]] = None - """ - List of ommer headers included in the block. - """ - withdrawals: Optional[List[Withdrawal]] = None - """ - List of withdrawals to perform for this block. - """ - - def set_environment(self, env: Environment) -> Environment: - """ - Creates a copy of the environment with the characteristics of this - specific block. - """ - new_env = copy(env) - - """ - Values that need to be set in the environment and are `None` for - this block need to be set to their defaults. - """ - environment_default = Environment() - new_env.difficulty = self.difficulty - new_env.coinbase = ( - self.coinbase if self.coinbase is not None else environment_default.coinbase - ) - new_env.gas_limit = ( - self.gas_limit if self.gas_limit is not None else environment_default.gas_limit - ) - if not isinstance(self.base_fee, Removable): - new_env.base_fee = self.base_fee - new_env.withdrawals = self.withdrawals - if not isinstance(self.excess_blob_gas, Removable): - new_env.excess_blob_gas = self.excess_blob_gas - if not isinstance(self.blob_gas_used, Removable): - new_env.blob_gas_used = self.blob_gas_used - if not isinstance(self.beacon_root, Removable): - new_env.beacon_root = self.beacon_root - """ - These values are required, but they depend on the previous environment, - so they can be calculated here. - """ - if self.number is not None: - new_env.number = self.number - else: - # calculate the next block number for the environment - if len(new_env.block_hashes) == 0: - new_env.number = 0 - else: - new_env.number = max([Number(n) for n in new_env.block_hashes.keys()]) + 1 - - if self.timestamp is not None: - new_env.timestamp = self.timestamp - else: - assert new_env.parent_timestamp is not None - new_env.timestamp = int(Number(new_env.parent_timestamp) + 12) - - return new_env - - def copy_with_rlp(self, rlp: Bytes | BytesConvertible | None) -> "Block": - """ - Creates a copy of the block and adds the specified RLP. - """ - new_block = deepcopy(self) - new_block.rlp = Bytes.or_none(rlp) - return new_block - - -@dataclass(kw_only=True) -class FixtureExecutionPayload(FixtureHeader): - """ - Representation of the execution payload of a block within a test fixture. - """ - - # Skipped fields in the Engine API - ommers_hash: Hash = field( - json_encoder=JSONEncoder.Field( - skip=True, - ), - ) - transactions_root: Hash = field( - json_encoder=JSONEncoder.Field( - skip=True, - ), - ) - difficulty: int = field( - json_encoder=JSONEncoder.Field( - skip=True, - ) - ) - nonce: HeaderNonce = field( - json_encoder=JSONEncoder.Field( - skip=True, - ) - ) - withdrawals_root: Optional[Hash] = field( - default=None, - json_encoder=JSONEncoder.Field( - skip=True, - ), - ) - - # Fields with different names - coinbase: Address = field( - json_encoder=JSONEncoder.Field( - name="feeRecipient", - ) - ) - receipt_root: Hash = field( - json_encoder=JSONEncoder.Field( - name="receiptsRoot", - ), - ) - bloom: Bloom = field( - json_encoder=JSONEncoder.Field( - name="logsBloom", - ) - ) - mix_digest: Hash = field( - json_encoder=JSONEncoder.Field( - name="prevRandao", - ), - ) - hash: Optional[Hash] = field( - default=None, - json_encoder=JSONEncoder.Field( - name="blockHash", - ), - ) - - # Fields with different formatting - number: int = field( - json_encoder=JSONEncoder.Field( - name="blockNumber", - cast_type=HexNumber, - ) - ) - gas_limit: int = field(json_encoder=JSONEncoder.Field(name="gasLimit", cast_type=HexNumber)) - gas_used: int = field(json_encoder=JSONEncoder.Field(name="gasUsed", cast_type=HexNumber)) - timestamp: int = field(json_encoder=JSONEncoder.Field(cast_type=HexNumber)) - base_fee: Optional[int] = field( - default=None, - json_encoder=JSONEncoder.Field(name="baseFeePerGas", cast_type=HexNumber), - ) - blob_gas_used: Optional[int] = field( - default=None, - json_encoder=JSONEncoder.Field(name="blobGasUsed", cast_type=HexNumber), - ) - excess_blob_gas: Optional[int] = field( - default=None, - json_encoder=JSONEncoder.Field(name="excessBlobGas", cast_type=HexNumber), - ) - - # Fields only used in the Engine API - transactions: Optional[List[Transaction]] = field( - default=None, - json_encoder=JSONEncoder.Field( - cast_type=lambda txs: [Bytes(tx.serialized_bytes()) for tx in txs], - to_json=True, - ), - ) - withdrawals: Optional[List[Withdrawal]] = field( - default=None, - json_encoder=JSONEncoder.Field( - to_json=True, - ), - ) - - @classmethod - def from_fixture_header( - cls, - header: FixtureHeader, - transactions: Optional[List[Transaction]] = None, - withdrawals: Optional[List[Withdrawal]] = None, - ) -> "FixtureExecutionPayload": - """ - Returns a FixtureExecutionPayload from a FixtureHeader, a list - of transactions and a list of withdrawals. - """ - kwargs = {field.name: getattr(header, field.name) for field in fields(header)} - return cls(**kwargs, transactions=transactions, withdrawals=withdrawals) - - -@dataclass(kw_only=True) -class FixtureEngineNewPayload: - """ - Representation of the `engine_newPayloadVX` information to be - sent using the block information. - """ - - payload: FixtureExecutionPayload = field( - json_encoder=JSONEncoder.Field( - name="executionPayload", - to_json=True, - ) - ) - blob_versioned_hashes: Optional[List[FixedSizeBytesConvertible]] = field( - default=None, - json_encoder=JSONEncoder.Field( - name="expectedBlobVersionedHashes", - cast_type=lambda hashes: [Hash(hash) for hash in hashes], - to_json=True, - ), - ) - beacon_root: Optional[FixedSizeBytesConvertible] = field( - default=None, - json_encoder=JSONEncoder.Field( - name="parentBeaconBlockRoot", - cast_type=Hash, - ), - ) - valid: bool = field( - json_encoder=JSONEncoder.Field( - skip_string_convert=True, - ), - ) - version: int = field( - json_encoder=JSONEncoder.Field(), - ) - error_code: Optional[EngineAPIError] = field( - default=None, - json_encoder=JSONEncoder.Field( - name="errorCode", - cast_type=int, - ), - ) - - @classmethod - def from_fixture_header( - cls, - fork: Fork, - header: FixtureHeader, - transactions: List[Transaction], - withdrawals: Optional[List[Withdrawal]], - valid: bool, - error_code: Optional[EngineAPIError], - ) -> Optional["FixtureEngineNewPayload"]: - """ - Creates a `FixtureEngineNewPayload` from a `FixtureHeader`. - """ - new_payload_version = fork.engine_new_payload_version(header.number, header.timestamp) - - if new_payload_version is None: - return None - - new_payload = cls( - payload=FixtureExecutionPayload.from_fixture_header( - header=replace(header, beacon_root=None), - transactions=transactions, - withdrawals=withdrawals, - ), - version=new_payload_version, - valid=valid, - error_code=error_code, - ) - - if fork.engine_new_payload_blob_hashes(header.number, header.timestamp): - new_payload.blob_versioned_hashes = blob_versioned_hashes_from_transactions( - transactions - ) - - if fork.engine_new_payload_beacon_root(header.number, header.timestamp): - new_payload.beacon_root = header.beacon_root - - return new_payload - - -@dataclass(kw_only=True) -class FixtureBlock: - """ - Representation of an Ethereum block within a test Fixture. - """ - - @staticmethod - def _txs_encoder(txs: List[Transaction]) -> List[FixtureTransaction]: - return [FixtureTransaction.from_transaction(tx) for tx in txs] - - @staticmethod - def _withdrawals_encoder(withdrawals: List[Withdrawal]) -> List[FixtureWithdrawal]: - return [FixtureWithdrawal.from_withdrawal(w) for w in withdrawals] - - rlp: Bytes = field( - default=None, - json_encoder=JSONEncoder.Field(), - ) - block_header: Optional[FixtureHeader] = field( - default=None, - json_encoder=JSONEncoder.Field( - name="blockHeader", - to_json=True, - ), - ) - expected_exception: Optional[str] = field( - default=None, - json_encoder=JSONEncoder.Field( - name="expectException", - ), - ) - block_number: Optional[NumberConvertible] = field( - default=None, - json_encoder=JSONEncoder.Field( - name="blocknumber", - cast_type=Number, - ), - ) - txs: Optional[List[Transaction]] = field( - default=None, - json_encoder=JSONEncoder.Field( - name="transactions", - cast_type=_txs_encoder, - to_json=True, - ), - ) - ommers: Optional[List[FixtureHeader]] = field( - default=None, - json_encoder=JSONEncoder.Field( - name="uncleHeaders", - to_json=True, - ), - ) - withdrawals: Optional[List[Withdrawal]] = field( - default=None, - json_encoder=JSONEncoder.Field( - name="withdrawals", - cast_type=_withdrawals_encoder, - to_json=True, - ), - ) - - -@dataclass(kw_only=True) -class InvalidFixtureBlock: - """ - Representation of an invalid Ethereum block within a test Fixture. - """ - - rlp: Bytes = field( - json_encoder=JSONEncoder.Field(), - ) - expected_exception: Optional[str] = field( - default=None, - json_encoder=JSONEncoder.Field( - name="expectException", - ), - ) - rlp_decoded: FixtureBlock = field( - default=None, - json_encoder=JSONEncoder.Field( - name="rlp_decoded", - to_json=True, - ), - ) - - -@dataclass(kw_only=True) -class BaseFixture: - """ - Base Ethereum test fixture fields class. - """ - - info: Dict[str, str] = field( - default_factory=dict, - json_encoder=JSONEncoder.Field( - name="_info", - to_json=True, - ), - ) - name: str = field( - default="", - json_encoder=JSONEncoder.Field( - skip=True, - ), - ) - fork: str = field( - json_encoder=JSONEncoder.Field( - name="network", - ), - ) - _json: Dict[str, Any] | None = field( - default=None, - json_encoder=JSONEncoder.Field( - skip=True, - ), - ) - - def __post_init__(self): - """ - Post init hook to convert to JSON after instantiation. - """ - self._json = to_json(self) - - def to_json(self) -> Dict[str, Any]: - """ - Convert to JSON. - """ - assert self._json is not None, "Fixture not initialized" - self._json["_info"] = self.info - return self._json - - def fill_info( - self, - t8n: TransitionTool, - ref_spec: ReferenceSpec | None, - ): - """ - Fill the info field for this fixture - """ - self.info["filling-transition-tool"] = t8n.version() - if ref_spec is not None: - ref_spec.write_info(self.info) - - -@dataclass(kw_only=True) -class Fixture(BaseFixture): - """ - Cross-client specific test fixture information. - """ - - genesis_rlp: Bytes = field( - json_encoder=JSONEncoder.Field( - name="genesisRLP", - ), - ) - genesis: FixtureHeader = field( - json_encoder=JSONEncoder.Field( - name="genesisBlockHeader", - to_json=True, - ), - ) - blocks: Optional[List[FixtureBlock | InvalidFixtureBlock]] = field( - default=None, - json_encoder=JSONEncoder.Field( - name="blocks", - to_json=True, - ), - ) - last_block_hash: Hash = field( - json_encoder=JSONEncoder.Field( - name="lastblockhash", - ), - ) - pre_state: Mapping[str, Account] = field( - json_encoder=JSONEncoder.Field( - name="pre", - cast_type=Alloc, - to_json=True, - ), - ) - post_state: Optional[Mapping[str, Account]] = field( - default=None, - json_encoder=JSONEncoder.Field( - name="postState", - cast_type=Alloc, - to_json=True, - ), - ) - seal_engine: str = field( - default="NoProof", - json_encoder=JSONEncoder.Field( - name="sealEngine", - ), - ) - - -@dataclass(kw_only=True) -class HiveFixture(BaseFixture): - """ - Hive specific test fixture information. - """ - - genesis: FixtureHeader = field( - json_encoder=JSONEncoder.Field( - name="genesisBlockHeader", - to_json=True, - ), - ) - payloads: Optional[List[Optional[FixtureEngineNewPayload]]] = field( - default=None, - json_encoder=JSONEncoder.Field( - name="engineNewPayloads", - to_json=True, - ), - ) - fcu_version: Optional[int] = field( - default=None, - json_encoder=JSONEncoder.Field( - name="engineFcuVersion", - ), - ) - pre_state: Mapping[str, Account] = field( - json_encoder=JSONEncoder.Field( - name="pre", - cast_type=Alloc, - to_json=True, - ), - ) - post_state: Optional[Mapping[str, Account]] = field( - default=None, - json_encoder=JSONEncoder.Field( - name="postState", - cast_type=Alloc, - to_json=True, - ), - ) diff --git a/src/ethereum_test_tools/exceptions/__init__.py b/src/ethereum_test_tools/exceptions/__init__.py new file mode 100644 index 0000000000..99c3606906 --- /dev/null +++ b/src/ethereum_test_tools/exceptions/__init__.py @@ -0,0 +1,7 @@ +""" +Exceptions for invalid execution. +""" + +from .exceptions import BlockException, ExceptionList, ExceptionType, TransactionException + +__all__ = ["BlockException", "ExceptionType", "ExceptionList", "TransactionException"] diff --git a/src/ethereum_test_tools/exceptions/exceptions.py b/src/ethereum_test_tools/exceptions/exceptions.py new file mode 100644 index 0000000000..dfd91a625e --- /dev/null +++ b/src/ethereum_test_tools/exceptions/exceptions.py @@ -0,0 +1,162 @@ +""" +Exceptions for invalid execution. +""" + +from enum import Enum, auto, unique +from typing import List, Union + + +class ExceptionList(list): + """ + A list of exceptions. + """ + + def __init__(self, *exceptions: "ExceptionBase") -> None: + """ + Create a new ExceptionList. + """ + exceptions_set: List[ExceptionBase] = [] + for exception in exceptions: + if not isinstance(exception, ExceptionBase): + raise TypeError(f"Expected ExceptionBase, got {type(exception)}") + if exception not in exceptions_set: + exceptions_set.append(exception) + super().__init__(exceptions_set) + + def __or__(self, other: Union["ExceptionBase", "ExceptionList"]) -> "ExceptionList": + """ + Combine two ExceptionLists. + """ + if isinstance(other, list): + return ExceptionList(*(self + other)) + return ExceptionList(*(self + [other])) + + def __str__(self) -> str: + """ + String representation of the ExceptionList. + """ + return "|".join(str(exception) for exception in self) + + +class ExceptionBase(Enum): + """ + Base class for exceptions. + """ + + def __contains__(self, exception) -> bool: + """ + Checks if provided exception is equal to this + """ + return self == exception + + def __or__( + self, + other: Union["TransactionException", "BlockException", ExceptionList], + ) -> "ExceptionList": + """ + Combine two exceptions into an ExceptionList. + """ + if isinstance(other, ExceptionList): + return ExceptionList(self, *other) + return ExceptionList(self, other) + + +@unique +class TransactionException(ExceptionBase): + """ + Exception raised when a transaction is invalid, and thus cannot be executed. + + If a transaction with any of these exceptions is included in a block, the block is invalid. + """ + + INSUFFICIENT_ACCOUNT_FUNDS = auto() + """ + Transaction's sender does not have enough funds to pay for the transaction. + """ + INSUFFICIENT_MAX_FEE_PER_GAS = auto() + """ + Transaction's max-fee-per-gas is lower than the block base-fee. + """ + PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS = auto() + """ + Transaction's max-priority-fee-per-gas is greater than the max-fee-per-gas. + """ + INSUFFICIENT_MAX_FEE_PER_BLOB_GAS = auto() + """ + Transaction's max-fee-per-blob-gas is lower than the block's blob-gas price. + """ + INTRINSIC_GAS_TOO_LOW = auto() + """ + Transaction's gas limit is too low. + """ + INITCODE_SIZE_EXCEEDED = auto() + """ + Transaction's initcode for a contract-creating transaction is too large. + """ + TYPE_3_TX_PRE_FORK = auto() + """ + Transaction type 3 included before activation fork. + """ + TYPE_3_TX_ZERO_BLOBS_PRE_FORK = auto() + """ + Transaction type 3, with zero blobs, included before activation fork. + """ + TYPE_3_TX_INVALID_BLOB_VERSIONED_HASH = auto() + """ + Transaction contains a blob versioned hash with an invalid version. + """ + TYPE_3_TX_WITH_FULL_BLOBS = auto() + """ + Transaction contains full blobs (network-version of the transaction). + """ + TYPE_3_TX_BLOB_COUNT_EXCEEDED = auto() + """ + Transaction contains too many blob versioned hashes. + """ + TYPE_3_TX_CONTRACT_CREATION = auto() + """ + Transaction is a type 3 transaction and has an empty `to`. + """ + TYPE_3_TX_MAX_BLOB_GAS_ALLOWANCE_EXCEEDED = auto() + """ + Transaction causes block to go over blob gas limit. + """ + TYPE_3_TX_ZERO_BLOBS = auto() + """ + Transaction is type 3, but has no blobs. + """ + + +@unique +class BlockException(ExceptionBase): + """ + Exception raised when a block is invalid, but not due to a transaction. + + E.g. all transactions in the block are valid, and can be applied to the state, but the + block header contains an invalid field. + """ + + INCORRECT_BLOCK_FORMAT = auto() + """ + Block's format is incorrect, contains invalid fields, is missing fields, or contains fields of + a fork that is not active yet. + """ + BLOB_GAS_USED_ABOVE_LIMIT = auto() + """ + Block's blob gas used in header is above the limit. + """ + INCORRECT_BLOB_GAS_USED = auto() + """ + Block's blob gas used in header is incorrect. + """ + INCORRECT_EXCESS_BLOB_GAS = auto() + """ + Block's excess blob gas in header is incorrect. + """ + RLP_STRUCTURES_ENCODING = auto() + """ + Block's rlp encoding is valid but ethereum structures in it are invalid + """ + + +ExceptionType = Union[TransactionException, BlockException, ExceptionList] diff --git a/src/ethereum_test_tools/filling/__init__.py b/src/ethereum_test_tools/filling/__init__.py deleted file mode 100644 index 60ae0519d7..0000000000 --- a/src/ethereum_test_tools/filling/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -""" -Test filling methods. -""" -from .fill import fill_test - -__all__ = ("fill_test",) diff --git a/src/ethereum_test_tools/filling/fill.py b/src/ethereum_test_tools/filling/fill.py deleted file mode 100644 index 742c87de83..0000000000 --- a/src/ethereum_test_tools/filling/fill.py +++ /dev/null @@ -1,33 +0,0 @@ -""" -Test filler definitions. -""" -from typing import List, Optional, Union - -from ethereum_test_forks import Fork -from evm_transition_tool import TransitionTool - -from ..common import Fixture, HiveFixture -from ..reference_spec.reference_spec import ReferenceSpec -from ..spec import BaseTest - - -def fill_test( - t8n: TransitionTool, - test_spec: BaseTest, - fork: Fork, - spec: ReferenceSpec | None, - eips: Optional[List[int]] = None, -) -> Optional[Union[Fixture, HiveFixture]]: - """ - Fills default/hive fixture for the specified fork and test spec. - """ - fixture: Union[Fixture, HiveFixture] - t8n.reset_traces() - if test_spec.base_test_config.enable_hive: - if fork.engine_new_payload_version() is None: - return None # pre Merge tests are not supported in Hive - fixture = test_spec.make_hive_fixture(t8n, fork, eips) - else: - fixture = test_spec.make_fixture(t8n, fork, eips) - fixture.fill_info(t8n, spec) - return fixture diff --git a/src/ethereum_test_tools/spec/__init__.py b/src/ethereum_test_tools/spec/__init__.py index 8150f9b8c4..9f32e30838 100644 --- a/src/ethereum_test_tools/spec/__init__.py +++ b/src/ethereum_test_tools/spec/__init__.py @@ -1,19 +1,28 @@ """ Test spec definitions and utilities. """ -from .base_test import BaseTest, BaseTestConfig, TestSpec, verify_post_alloc -from .blockchain_test import BlockchainTest, BlockchainTestFiller, BlockchainTestSpec -from .state_test import StateTest, StateTestFiller, StateTestSpec +from typing import List, Type + +from .base.base_test import BaseFixture, BaseTest, TestSpec, verify_post_alloc +from .blockchain.blockchain_test import BlockchainTest, BlockchainTestFiller, BlockchainTestSpec +from .fixture_collector import FixtureCollector, TestInfo +from .state.state_test import StateTest, StateTestFiller, StateTestOnly, StateTestSpec + +SPEC_TYPES: List[Type[BaseTest]] = [BlockchainTest, StateTest, StateTestOnly] __all__ = ( + "SPEC_TYPES", + "BaseFixture", "BaseTest", - "BaseTestConfig", "BlockchainTest", "BlockchainTestFiller", "BlockchainTestSpec", + "FixtureCollector", "StateTest", "StateTestFiller", + "StateTestOnly", "StateTestSpec", + "TestInfo", "TestSpec", "verify_post_alloc", ) diff --git a/src/ethereum_test_tools/spec/base/__init__.py b/src/ethereum_test_tools/spec/base/__init__.py new file mode 100644 index 0000000000..0519b8f7a2 --- /dev/null +++ b/src/ethereum_test_tools/spec/base/__init__.py @@ -0,0 +1,3 @@ +""" +BaseTest spec class and utilities. +""" diff --git a/src/ethereum_test_tools/spec/base_test.py b/src/ethereum_test_tools/spec/base/base_test.py similarity index 50% rename from src/ethereum_test_tools/spec/base_test.py rename to src/ethereum_test_tools/spec/base/base_test.py index e5965b8361..dc989d18c3 100644 --- a/src/ethereum_test_tools/spec/base_test.py +++ b/src/ethereum_test_tools/spec/base/base_test.py @@ -1,25 +1,25 @@ """ Base test class and helper functions for Ethereum state and blockchain tests. """ + +import hashlib +import json from abc import abstractmethod from dataclasses import dataclass, field from itertools import count from os import path -from typing import Any, Callable, Dict, Generator, Iterator, List, Mapping, Optional +from pathlib import Path +from typing import Any, Callable, Dict, Generator, Iterator, List, Mapping, Optional, TextIO from ethereum_test_forks import Fork -from evm_transition_tool import TransitionTool +from evm_transition_tool import FixtureFormats, TransitionTool -from ..common import ( - Account, - Address, - Environment, - Fixture, - HiveFixture, - Transaction, - withdrawals_root, -) -from ..common.conversions import to_hex +from ...common import Account, Address, Environment, Transaction, withdrawals_root +from ...common.conversions import to_hex +from ...common.json import JSONEncoder +from ...common.json import field as json_field +from ...common.json import to_json +from ...reference_spec.reference_spec import ReferenceSpec def verify_transactions(txs: List[Transaction] | None, result) -> List[int]: @@ -52,11 +52,11 @@ def verify_post_alloc(expected_post: Mapping, got_alloc: Mapping): Verify that an allocation matches the expected post in the test. Raises exception on unexpected values. """ - got_alloc_normalized: Dict[str, Any] = { - Address(address).hex(): got_alloc[address] for address in got_alloc + got_alloc_normalized: Dict[Address, Any] = { + Address(address): got_alloc[address] for address in got_alloc } for address, account in expected_post.items(): - address = Address(address).hex() + address = Address(address) if account is not None: if account == Account.NONEXISTENT: if address in got_alloc_normalized: @@ -78,65 +78,141 @@ def verify_result(result: Mapping, env: Environment): @dataclass(kw_only=True) -class BaseTestConfig: +class BaseFixture: """ - General configuration that all tests must support. + Represents a base Ethereum test fixture of any type. """ - enable_hive: bool = False - """ - Enable any hive-related properties that the output could contain. - """ + info: Dict[str, str] = json_field( + default_factory=dict, + json_encoder=JSONEncoder.Field( + name="_info", + to_json=True, + ), + ) + + _json: Optional[Dict[str, Any]] = None + + def fill_info( + self, + t8n: TransitionTool, + ref_spec: ReferenceSpec | None, + ): + """ + Fill the info field for this fixture + """ + if "comment" not in self.info: + self.info["comment"] = "`execution-spec-tests` generated test" + self.info["filling-transition-tool"] = t8n.version() + if ref_spec is not None: + ref_spec.write_info(self.info) + + def __post_init__(self): + """ + Post init hook to convert to JSON after instantiation. + """ + self._json = to_json(self) + json_str = json.dumps(self._json, sort_keys=True, separators=(",", ":")) + h = hashlib.sha256(json_str.encode("utf-8")).hexdigest() + self.info["hash"] = f"0x{h}" + + def to_json(self) -> Dict[str, Any]: + """ + Convert to JSON. + """ + assert self._json is not None, "Fixture not initialized" + self._json["_info"] = self.info + return self._json + + @classmethod + @abstractmethod + def format(cls) -> FixtureFormats: + """ + Returns the fixture format which the evm tool can use to determine how to verify the + fixture. + """ + pass + + @classmethod + @abstractmethod + def collect_into_file(cls, fd: TextIO, fixtures: Dict[str, "BaseFixture"]): + """ + Returns the name of the subdirectory where this type of fixture should be dumped to. + """ + pass + + @classmethod + @abstractmethod + def output_base_dir_name(cls) -> Path: + """ + Returns the name of the subdirectory where this type of fixture should be dumped to. + """ + pass + + @classmethod + def output_file_extension(cls) -> str: + """ + Returns the file extension for this type of fixture. + + By default, fixtures are dumped as JSON files. + """ + return ".json" @dataclass(kw_only=True) class BaseTest: """ - Represents a base Ethereum test which must return a genesis and a - blockchain. + Represents a base Ethereum test which must return a single test fixture. """ pre: Mapping tag: str = "" - base_test_config: BaseTestConfig = field(default_factory=BaseTestConfig) + # Setting a default here is just for type checking, the correct value is automatically set + # by pytest. + fixture_format: FixtureFormats = FixtureFormats.UNSET_TEST_FORMAT # Transition tool specific fields t8n_dump_dir: Optional[str] = "" t8n_call_counter: Iterator[int] = field(init=False, default_factory=count) @abstractmethod - def make_fixture( + def generate( self, t8n: TransitionTool, fork: Fork, eips: Optional[List[int]] = None, - ) -> Fixture: + ) -> BaseFixture: """ - Generate blockchain that must be executed sequentially during test. + Generate the list of test fixtures. """ pass + @classmethod @abstractmethod - def make_hive_fixture( - self, - t8n: TransitionTool, - fork: Fork, - eips: Optional[List[int]] = None, - ) -> HiveFixture: + def pytest_parameter_name(cls) -> str: """ - Generate the blockchain that must be executed sequentially during test. + Must return the name of the parameter used in pytest to select this + spec type as filler for the test. """ pass @classmethod @abstractmethod - def pytest_parameter_name(cls) -> str: + def fixture_formats(cls) -> List[FixtureFormats]: """ - Must return the name of the parameter used in pytest to select this - spec type as filler for the test. + Returns a list of fixture formats that can be output to the test spec. """ pass + def __post_init__(self) -> None: + """ + Validate the fixture format. + """ + if self.fixture_format not in self.fixture_formats(): + raise ValueError( + f"Invalid fixture format {self.fixture_format} for {self.__class__.__name__}." + ) + def get_next_transition_tool_output_path(self) -> str: """ Returns the path to the next transition tool output file. diff --git a/src/ethereum_test_tools/spec/blockchain/__init__.py b/src/ethereum_test_tools/spec/blockchain/__init__.py new file mode 100644 index 0000000000..4a58da648e --- /dev/null +++ b/src/ethereum_test_tools/spec/blockchain/__init__.py @@ -0,0 +1,3 @@ +""" +BlockchainTest type definitions and logic +""" diff --git a/src/ethereum_test_tools/spec/blockchain_test.py b/src/ethereum_test_tools/spec/blockchain/blockchain_test.py similarity index 54% rename from src/ethereum_test_tools/spec/blockchain_test.py rename to src/ethereum_test_tools/spec/blockchain/blockchain_test.py index 0a4c32a509..2dbeba6741 100644 --- a/src/ethereum_test_tools/spec/blockchain_test.py +++ b/src/ethereum_test_tools/spec/blockchain/blockchain_test.py @@ -2,39 +2,93 @@ Ethereum blockchain test spec definition and filler. """ -from dataclasses import dataclass, field +from copy import copy +from dataclasses import dataclass, field, replace from pprint import pprint from typing import Any, Callable, Dict, Generator, List, Mapping, Optional, Tuple, Type from ethereum_test_forks import Fork -from evm_transition_tool import TransitionTool +from evm_transition_tool import FixtureFormats, TransitionTool -from ..common import ( +from ...common import ( Address, Alloc, - Block, Bloom, Bytes, EmptyTrieRoot, Environment, - Fixture, - FixtureBlock, - FixtureEngineNewPayload, - FixtureHeader, Hash, HeaderNonce, - HiveFixture, - InvalidFixtureBlock, Number, Transaction, ZeroPaddedHexNumber, alloc_to_accounts, - to_json, + transaction_list_root, withdrawals_root, ) -from ..common.constants import EmptyOmmersRoot -from .base_test import BaseTest, verify_post_alloc, verify_result, verify_transactions -from .debugging import print_traces +from ...common.constants import EmptyOmmersRoot +from ...common.json import to_json +from ..base.base_test import ( + BaseFixture, + BaseTest, + verify_post_alloc, + verify_result, + verify_transactions, +) +from ..debugging import print_traces +from .types import ( + Block, + BlockException, + Fixture, + FixtureBlock, + FixtureEngineNewPayload, + FixtureHeader, + HiveFixture, + InvalidFixtureBlock, +) + + +def environment_from_parent_header(parent: "FixtureHeader") -> "Environment": + """ + Instantiates a new environment with the provided header as parent. + """ + return Environment( + parent_difficulty=parent.difficulty, + parent_timestamp=parent.timestamp, + parent_base_fee=parent.base_fee, + parent_blob_gas_used=parent.blob_gas_used, + parent_excess_blob_gas=parent.excess_blob_gas, + parent_gas_used=parent.gas_used, + parent_gas_limit=parent.gas_limit, + parent_ommers_hash=parent.ommers_hash, + block_hashes={parent.number: parent.hash if parent.hash is not None else 0}, + ) + + +def apply_new_parent(env: Environment, new_parent: FixtureHeader) -> "Environment": + """ + Applies a header as parent to a copy of this environment. + """ + env = copy(env) + env.parent_difficulty = new_parent.difficulty + env.parent_timestamp = new_parent.timestamp + env.parent_base_fee = new_parent.base_fee + env.parent_blob_gas_used = new_parent.blob_gas_used + env.parent_excess_blob_gas = new_parent.excess_blob_gas + env.parent_gas_used = new_parent.gas_used + env.parent_gas_limit = new_parent.gas_limit + env.parent_ommers_hash = new_parent.ommers_hash + env.block_hashes[new_parent.number] = new_parent.hash if new_parent.hash is not None else 0 + return env + + +def count_blobs(txs: List[Transaction]) -> int: + """ + Returns the number of blobs in a list of transactions. + """ + return sum( + [len(tx.blob_versioned_hashes) for tx in txs if tx.blob_versioned_hashes is not None] + ) @dataclass(kw_only=True) @@ -47,6 +101,7 @@ class BlockchainTest(BaseTest): post: Mapping blocks: List[Block] genesis_environment: Environment = field(default_factory=Environment) + verify_sync: Optional[bool] = None tag: str = "" chain_id: int = 1 @@ -57,12 +112,15 @@ def pytest_parameter_name(cls) -> str: """ return "blockchain_test" - @property - def hive_enabled(self) -> bool: + @classmethod + def fixture_formats(cls) -> List[FixtureFormats]: """ - Returns true if hive fixture generation is enabled, false otherwise. + Returns a list of fixture formats that can be output to the test spec. """ - return self.base_test_config.enable_hive + return [ + FixtureFormats.BLOCKCHAIN_TEST, + FixtureFormats.BLOCKCHAIN_TEST_HIVE, + ] def make_genesis( self, @@ -78,15 +136,13 @@ def make_genesis( if env.beacon_root is not None: assert Hash(env.beacon_root) == Hash(0), "beacon_root must be empty at genesis" - pre_alloc = Alloc( - fork.pre_allocation(block_number=0, timestamp=Number(env.timestamp)), - ) - - new_alloc, state_root = t8n.calc_state_root( - alloc=to_json(Alloc.merge(pre_alloc, Alloc(self.pre))), - fork=fork, - debug_output_path=self.get_next_transition_tool_output_path(), + pre_alloc = Alloc.merge( + Alloc(fork.pre_allocation_blockchain()), + Alloc(self.pre), ) + if empty_accounts := pre_alloc.empty_accounts(): + raise Exception(f"Empty accounts in pre state: {empty_accounts}") + state_root = pre_alloc.state_root() genesis = FixtureHeader( parent_hash=Hash(0), ommers_hash=Hash(EmptyOmmersRoot), @@ -118,7 +174,7 @@ def make_genesis( withdrawals=env.withdrawals, ) - return Alloc(new_alloc), genesis_rlp, genesis + return pre_alloc, genesis_rlp, genesis def generate_block_data( self, @@ -144,11 +200,24 @@ def generate_block_data( txs = [tx.with_signature_and_sender() for tx in block.txs] if block.txs is not None else [] + if failing_tx_count := len([tx for tx in txs if tx.error]) > 0: + if failing_tx_count > 1: + raise Exception( + "test correctness: only one transaction can produce an exception in a block" + ) + if not txs[-1].error: + raise Exception( + "test correctness: the transaction that produces an exception " + + "must be the last transaction in the block" + ) + next_alloc, result = t8n.evaluate( alloc=previous_alloc, txs=to_json(txs), env=to_json(env), - fork_name=fork.fork(block_number=Number(env.number), timestamp=Number(env.timestamp)), + fork_name=fork.transition_tool_name( + block_number=Number(env.number), timestamp=Number(env.timestamp) + ), chain_id=self.chain_id, reward=fork.get_reward(Number(env.number), Number(env.timestamp)), eips=eips, @@ -182,6 +251,18 @@ def generate_block_data( environment=env, ) + # Update the transactions root to the one calculated locally. + header.transactions_root = transaction_list_root(txs) + + # One special case of the invalid transactions is the blob gas used, since this value + # is not included in the transition tool result, but it is included in the block header, + # and some clients check it before executing the block by simply counting the type-3 txs, + # we need to set the correct value by default. + if ( + blob_gas_per_blob := fork.blob_gas_per_blob(Number(env.number), Number(env.timestamp)) + ) > 0: + header.blob_gas_used = blob_gas_per_blob * count_blobs(txs) + if block.header_verify is not None: # Verify the header after transition tool processing. header.verify(block.header_verify) @@ -199,11 +280,15 @@ def generate_block_data( return header, rlp, txs, next_alloc, env - def network_info(self, fork, eips=None): + def network_info(self, fork: Fork, eips: Optional[List[int]] = None): """ Returns fixture network information for the fork & EIP/s. """ - return "+".join([fork.name()] + [str(eip) for eip in eips]) if eips else fork.name() + return ( + "+".join([fork.blockchain_test_network_name()] + [str(eip) for eip in eips]) + if eips + else fork.blockchain_test_network_name() + ) def verify_post_state(self, t8n, alloc): """ @@ -229,46 +314,53 @@ def make_fixture( pre, genesis_rlp, genesis = self.make_genesis(t8n, fork) alloc = to_json(pre) - env = Environment.from_parent_header(genesis) + env = environment_from_parent_header(genesis) head = genesis.hash if genesis.hash is not None else Hash(0) for block in self.blocks: - header, rlp, txs, new_alloc, new_env = self.generate_block_data( - t8n=t8n, fork=fork, block=block, previous_env=env, previous_alloc=alloc, eips=eips - ) if block.rlp is None: # This is the most common case, the RLP needs to be constructed # based on the transactions to be included in the block. # Set the environment according to the block to execute. + header, rlp, txs, new_alloc, new_env = self.generate_block_data( + t8n=t8n, + fork=fork, + block=block, + previous_env=env, + previous_alloc=alloc, + eips=eips, + ) + fixture_block = FixtureBlock( + rlp=rlp, + block_header=header, + block_number=Number(header.number), + txs=txs, + ommers=[], + withdrawals=new_env.withdrawals, + ) if block.exception is None: - fixture_blocks.append( - FixtureBlock( - rlp=rlp, - block_header=header, - block_number=Number(header.number), - txs=txs, - ommers=[], - withdrawals=new_env.withdrawals, - ), - ) + fixture_blocks.append(fixture_block) # Update env, alloc and last block hash for the next block. alloc = new_alloc - env = new_env.apply_new_parent(header) + env = apply_new_parent(new_env, header) head = header.hash if header.hash is not None else Hash(0) else: fixture_blocks.append( InvalidFixtureBlock( rlp=rlp, expected_exception=block.exception, - rlp_decoded=FixtureBlock( - block_header=header, - txs=txs, - ommers=[], - withdrawals=new_env.withdrawals, + rlp_decoded=( + None + if BlockException.RLP_STRUCTURES_ENCODING in block.exception + else replace(fixture_block, rlp=None) ), ), ) else: + assert block.exception is not None, ( + "test correctness: if the block's rlp is hard-coded, " + + "the block is expected to produce an exception" + ) fixture_blocks.append( InvalidFixtureBlock( rlp=Bytes(block.rlp), @@ -297,11 +389,12 @@ def make_hive_fixture( """ Create a hive fixture from the blocktest definition. """ - fixture_payloads: List[Optional[FixtureEngineNewPayload]] = [] + fixture_payloads: List[FixtureEngineNewPayload] = [] pre, _, genesis = self.make_genesis(t8n, fork) alloc = to_json(pre) - env = Environment.from_parent_header(genesis) + env = environment_from_parent_header(genesis) + head_hash = genesis.hash for block in self.blocks: header, _, txs, new_alloc, new_env = self.generate_block_data( @@ -314,16 +407,49 @@ def make_hive_fixture( header=header, transactions=txs, withdrawals=new_env.withdrawals, - valid=block.exception is None, + validation_error=block.exception, error_code=block.engine_api_error_code, ) ) if block.exception is None: alloc = new_alloc - env = env.apply_new_parent(header) + env = apply_new_parent(env, header) + head_hash = header.hash fcu_version = fork.engine_forkchoice_updated_version(header.number, header.timestamp) + assert ( + fcu_version is not None + ), "A hive fixture was requested but no forkchoice update is defined. The framework should" + " never try to execute this test case." self.verify_post_state(t8n, alloc) + + sync_payload: Optional[FixtureEngineNewPayload] = None + if self.verify_sync: + # Test is marked for syncing verification. + assert ( + genesis.hash != head_hash + ), "Invalid payload tests negative test via sync is not supported yet." + + # Most clients require the header to start the sync process, so we create an empty + # block on top of the last block of the test to send it as new payload and trigger the + # sync process. + sync_header, _, _, _, _ = self.generate_block_data( + t8n=t8n, + fork=fork, + block=Block(), + previous_env=env, + previous_alloc=alloc, + eips=eips, + ) + sync_payload = FixtureEngineNewPayload.from_fixture_header( + fork=fork, + header=sync_header, + transactions=[], + withdrawals=[], + validation_error=None, + error_code=None, + ) + return HiveFixture( fork=self.network_info(fork, eips), genesis=genesis, @@ -331,9 +457,32 @@ def make_hive_fixture( fcu_version=fcu_version, pre_state=pre, post_state=alloc_to_accounts(alloc), + sync_payload=sync_payload, name=self.tag, ) + def generate( + self, + t8n: TransitionTool, + fork: Fork, + eips: Optional[List[int]] = None, + ) -> BaseFixture: + """ + Generate the BlockchainTest fixture. + """ + t8n.reset_traces() + if self.fixture_format == FixtureFormats.BLOCKCHAIN_TEST_HIVE: + if fork.engine_forkchoice_updated_version() is None: + raise Exception( + "A hive fixture was requested but no forkchoice update is defined. " + "The framework should never try to execute this test case." + ) + return self.make_hive_fixture(t8n, fork, eips) + elif self.fixture_format == FixtureFormats.BLOCKCHAIN_TEST: + return self.make_fixture(t8n, fork, eips) + + raise Exception(f"Unknown fixture format: {self.fixture_format}") + BlockchainTestSpec = Callable[[str], Generator[BlockchainTest, None, None]] BlockchainTestFiller = Type[BlockchainTest] diff --git a/src/ethereum_test_tools/spec/blockchain/types.py b/src/ethereum_test_tools/spec/blockchain/types.py new file mode 100644 index 0000000000..42ca7a8d6b --- /dev/null +++ b/src/ethereum_test_tools/spec/blockchain/types.py @@ -0,0 +1,1159 @@ +""" +BlockchainTest types +""" + +import json +from copy import copy, deepcopy +from dataclasses import dataclass, fields, replace +from pathlib import Path +from typing import Any, Callable, ClassVar, Dict, List, Mapping, Optional, TextIO, Tuple + +from ethereum import rlp as eth_rlp +from ethereum.base_types import Uint +from ethereum.crypto.hash import keccak256 + +from ethereum_test_forks import Fork +from evm_transition_tool import FixtureFormats + +from ...common.base_types import ( + Address, + Bloom, + Bytes, + Hash, + HeaderNonce, + HexNumber, + Number, + ZeroPaddedHexNumber, +) +from ...common.constants import AddrAA, EmptyOmmersRoot, EngineAPIError +from ...common.conversions import BytesConvertible, FixedSizeBytesConvertible, NumberConvertible +from ...common.json import JSONEncoder, field +from ...common.types import ( + Account, + Alloc, + Environment, + Removable, + Transaction, + Withdrawal, + blob_versioned_hashes_from_transactions, + transaction_list_to_serializable_list, +) +from ...exceptions import BlockException, ExceptionList, TransactionException +from ..base.base_test import BaseFixture + + +@dataclass(kw_only=True) +class Header: + """ + Header type used to describe block header properties in test specs. + """ + + parent_hash: Optional[FixedSizeBytesConvertible] = None + ommers_hash: Optional[FixedSizeBytesConvertible] = None + coinbase: Optional[FixedSizeBytesConvertible] = None + state_root: Optional[FixedSizeBytesConvertible] = None + transactions_root: Optional[FixedSizeBytesConvertible] = None + receipt_root: Optional[FixedSizeBytesConvertible] = None + bloom: Optional[FixedSizeBytesConvertible] = None + difficulty: Optional[NumberConvertible] = None + number: Optional[NumberConvertible] = None + gas_limit: Optional[NumberConvertible] = None + gas_used: Optional[NumberConvertible] = None + timestamp: Optional[NumberConvertible] = None + extra_data: Optional[BytesConvertible] = None + mix_digest: Optional[FixedSizeBytesConvertible] = None + nonce: Optional[FixedSizeBytesConvertible] = None + base_fee: Optional[NumberConvertible | Removable] = None + withdrawals_root: Optional[FixedSizeBytesConvertible | Removable] = None + blob_gas_used: Optional[NumberConvertible | Removable] = None + excess_blob_gas: Optional[NumberConvertible | Removable] = None + beacon_root: Optional[FixedSizeBytesConvertible | Removable] = None + hash: Optional[FixedSizeBytesConvertible] = None + + REMOVE_FIELD: ClassVar[Removable] = Removable() + """ + Sentinel object used to specify that a header field should be removed. + """ + EMPTY_FIELD: ClassVar[Removable] = Removable() + """ + Sentinel object used to specify that a header field must be empty during verification. + """ + + +@dataclass(kw_only=True) +class HeaderFieldSource: + """ + Block header field metadata specifying the source used to populate the field when collecting + the block header from different sources, and to validate it. + """ + + required: bool = True + """ + Whether the field is required or not, regardless of the fork. + """ + fork_requirement_check: Optional[str] = None + """ + Name of the method to call to check if the field is required for the current fork. + """ + default: Optional[Any] = None + """ + Default value for the field if no value was provided by either the transition tool or the + environment + """ + parse_type: Optional[Callable] = None + """ + The type or function to use to parse the field to before initializing the object. + """ + source_environment: Optional[str] = None + """ + Name of the field in the environment object, which can be a callable. + """ + source_transition_tool: Optional[str] = None + """ + Name of the field in the transition tool result dictionary. + """ + + def collect( + self, + *, + target: Dict[str, Any], + field_name: str, + fork: Fork, + number: int, + timestamp: int, + transition_tool_result: Dict[str, Any], + environment: Environment, + ) -> None: + """ + Collects the field from the different sources according to the + metadata description. + """ + value = None + required = self.required + if self.fork_requirement_check is not None: + required = getattr(fork, self.fork_requirement_check)(number, timestamp) + + if self.source_transition_tool is not None: + if self.source_transition_tool in transition_tool_result: + got_value = transition_tool_result.get(self.source_transition_tool) + if got_value is not None: + value = got_value + + if self.source_environment is not None: + got_value = getattr(environment, self.source_environment, None) + if callable(got_value): + got_value = got_value() + if got_value is not None: + value = got_value + + if required: + if value is None: + if self.default is not None: + value = self.default + else: + raise ValueError(f"missing required field '{field_name}'") + + if value is not None and self.parse_type is not None: + value = self.parse_type(value) + + target[field_name] = value + + +def header_field(*args, source: Optional[HeaderFieldSource] = None, **kwargs) -> Any: + """ + A wrapper around `dataclasses.field` that allows for json configuration info and header + metadata. + """ + if "metadata" in kwargs: + metadata = kwargs["metadata"] + else: + metadata = {} + assert isinstance(metadata, dict) + + if source is not None: + metadata["source"] = source + + kwargs["metadata"] = metadata + return field(*args, **kwargs) + + +@dataclass(kw_only=True) +class FixtureHeader: + """ + Representation of an Ethereum header within a test Fixture. + """ + + parent_hash: Hash = header_field( + source=HeaderFieldSource( + parse_type=Hash, + source_environment="parent_hash", + ), + json_encoder=JSONEncoder.Field(name="parentHash"), + ) + ommers_hash: Hash = header_field( + source=HeaderFieldSource( + parse_type=Hash, + source_transition_tool="sha3Uncles", + default=EmptyOmmersRoot, + ), + json_encoder=JSONEncoder.Field(name="uncleHash"), + ) + coinbase: Address = header_field( + source=HeaderFieldSource( + parse_type=Address, + source_environment="coinbase", + ), + json_encoder=JSONEncoder.Field(), + ) + state_root: Hash = header_field( + source=HeaderFieldSource( + parse_type=Hash, + source_transition_tool="stateRoot", + ), + json_encoder=JSONEncoder.Field(name="stateRoot"), + ) + transactions_root: Hash = header_field( + source=HeaderFieldSource( + parse_type=Hash, + source_transition_tool="txRoot", + ), + json_encoder=JSONEncoder.Field(name="transactionsTrie"), + ) + receipt_root: Hash = header_field( + source=HeaderFieldSource( + parse_type=Hash, + source_transition_tool="receiptsRoot", + ), + json_encoder=JSONEncoder.Field(name="receiptTrie"), + ) + bloom: Bloom = header_field( + source=HeaderFieldSource( + parse_type=Bloom, + source_transition_tool="logsBloom", + ), + json_encoder=JSONEncoder.Field(), + ) + difficulty: int = header_field( + source=HeaderFieldSource( + parse_type=Number, + source_transition_tool="currentDifficulty", + source_environment="difficulty", + default=0, + ), + json_encoder=JSONEncoder.Field(cast_type=ZeroPaddedHexNumber), + ) + number: int = header_field( + source=HeaderFieldSource( + parse_type=Number, + source_environment="number", + ), + json_encoder=JSONEncoder.Field(cast_type=ZeroPaddedHexNumber), + ) + gas_limit: int = header_field( + source=HeaderFieldSource( + parse_type=Number, + source_environment="gas_limit", + ), + json_encoder=JSONEncoder.Field(name="gasLimit", cast_type=ZeroPaddedHexNumber), + ) + gas_used: int = header_field( + source=HeaderFieldSource( + parse_type=Number, + source_transition_tool="gasUsed", + ), + json_encoder=JSONEncoder.Field(name="gasUsed", cast_type=ZeroPaddedHexNumber), + ) + timestamp: int = header_field( + source=HeaderFieldSource( + parse_type=Number, + source_environment="timestamp", + ), + json_encoder=JSONEncoder.Field(cast_type=ZeroPaddedHexNumber), + ) + extra_data: Bytes = header_field( + source=HeaderFieldSource( + parse_type=Bytes, + source_environment="extra_data", + default=b"", + ), + json_encoder=JSONEncoder.Field(name="extraData"), + ) + mix_digest: Hash = header_field( + source=HeaderFieldSource( + parse_type=Hash, + source_environment="prev_randao", + default=b"", + ), + json_encoder=JSONEncoder.Field(name="mixHash"), + ) + nonce: HeaderNonce = header_field( + source=HeaderFieldSource( + parse_type=HeaderNonce, + default=b"", + ), + json_encoder=JSONEncoder.Field(), + ) + base_fee: Optional[int] = header_field( + default=None, + source=HeaderFieldSource( + parse_type=Number, + fork_requirement_check="header_base_fee_required", + source_transition_tool="currentBaseFee", + source_environment="base_fee", + ), + json_encoder=JSONEncoder.Field(name="baseFeePerGas", cast_type=ZeroPaddedHexNumber), + ) + withdrawals_root: Optional[Hash] = header_field( + default=None, + source=HeaderFieldSource( + parse_type=Hash, + fork_requirement_check="header_withdrawals_required", + source_transition_tool="withdrawalsRoot", + ), + json_encoder=JSONEncoder.Field(name="withdrawalsRoot"), + ) + blob_gas_used: Optional[int] = header_field( + default=None, + source=HeaderFieldSource( + parse_type=Number, + fork_requirement_check="header_blob_gas_used_required", + source_transition_tool="blobGasUsed", + ), + json_encoder=JSONEncoder.Field(name="blobGasUsed", cast_type=ZeroPaddedHexNumber), + ) + excess_blob_gas: Optional[int] = header_field( + default=None, + source=HeaderFieldSource( + parse_type=Number, + fork_requirement_check="header_excess_blob_gas_required", + source_transition_tool="currentExcessBlobGas", + ), + json_encoder=JSONEncoder.Field(name="excessBlobGas", cast_type=ZeroPaddedHexNumber), + ) + beacon_root: Optional[Hash] = header_field( + default=None, + source=HeaderFieldSource( + parse_type=Hash, + fork_requirement_check="header_beacon_root_required", + source_environment="beacon_root", + ), + json_encoder=JSONEncoder.Field(name="parentBeaconBlockRoot"), + ) + hash: Optional[Hash] = header_field( + default=None, + source=HeaderFieldSource( + required=False, + ), + json_encoder=JSONEncoder.Field(), + ) + + @classmethod + def collect( + cls, + *, + fork: Fork, + transition_tool_result: Dict[str, Any], + environment: Environment, + ) -> "FixtureHeader": + """ + Collects a FixtureHeader object from multiple sources: + - The transition tool result + - The test's current environment + """ + # We depend on the environment to get the number and timestamp to check the fork + # requirements + number, timestamp = Number(environment.number), Number(environment.timestamp) + + # Collect the header fields + kwargs: Dict[str, Any] = {} + for header_field in fields(cls): + field_name = header_field.name + metadata = header_field.metadata + assert metadata is not None, f"Field {field_name} has no header field metadata" + field_metadata = metadata.get("source") + assert isinstance(field_metadata, HeaderFieldSource), ( + f"Field {field_name} has invalid header_field " f"metadata: {field_metadata}" + ) + field_metadata.collect( + target=kwargs, + field_name=field_name, + fork=fork, + number=number, + timestamp=timestamp, + transition_tool_result=transition_tool_result, + environment=environment, + ) + + # Pass the collected fields as keyword arguments to the constructor + return cls(**kwargs) + + def join(self, modifier: Header) -> "FixtureHeader": + """ + Produces a fixture header copy with the set values from the modifier. + """ + new_fixture_header = copy(self) + for header_field in fields(self): + field_name = header_field.name + value = getattr(modifier, field_name) + if value is not None: + if value is Header.REMOVE_FIELD: + setattr(new_fixture_header, field_name, None) + else: + metadata = header_field.metadata + assert metadata is not None, f"Field {field_name} has no header field metadata" + field_metadata = metadata.get("source") + assert isinstance(field_metadata, HeaderFieldSource), ( + f"Field {field_name} has invalid header_field " + f"metadata: {field_metadata}" + ) + if field_metadata.parse_type is not None: + value = field_metadata.parse_type(value) + setattr(new_fixture_header, field_name, value) + return new_fixture_header + + def verify(self, baseline: Header): + """ + Verifies that the header fields from the baseline are as expected. + """ + for header_field in fields(self): + field_name = header_field.name + baseline_value = getattr(baseline, field_name) + if baseline_value is not None: + assert baseline_value is not Header.REMOVE_FIELD, "invalid baseline header" + value = getattr(self, field_name) + if baseline_value is Header.EMPTY_FIELD: + assert ( + value is None + ), f"invalid header field {field_name}, got {value}, want None" + continue + metadata = header_field.metadata + field_metadata = metadata.get("source") + # type check is performed on collect() + if field_metadata.parse_type is not None: # type: ignore + baseline_value = field_metadata.parse_type(baseline_value) # type: ignore + assert value == baseline_value, ( + f"invalid header field ({field_name}) value, " + + f"got {value}, want {baseline_value}" + ) + + def build( + self, + *, + txs: List[Transaction], + ommers: List[Header], + withdrawals: List[Withdrawal] | None, + ) -> Tuple[Bytes, Hash]: + """ + Returns the serialized version of the block and its hash. + """ + header = [ + self.parent_hash, + self.ommers_hash, + self.coinbase, + self.state_root, + self.transactions_root, + self.receipt_root, + self.bloom, + Uint(int(self.difficulty)), + Uint(int(self.number)), + Uint(int(self.gas_limit)), + Uint(int(self.gas_used)), + Uint(int(self.timestamp)), + self.extra_data, + self.mix_digest, + self.nonce, + ] + if self.base_fee is not None: + header.append(Uint(int(self.base_fee))) + if self.withdrawals_root is not None: + header.append(self.withdrawals_root) + if self.blob_gas_used is not None: + header.append(Uint(int(self.blob_gas_used))) + if self.excess_blob_gas is not None: + header.append(Uint(self.excess_blob_gas)) + if self.beacon_root is not None: + header.append(self.beacon_root) + + block = [ + header, + transaction_list_to_serializable_list(txs), + ommers, # TODO: This is incorrect, and we probably need to serialize the ommers + ] + + if withdrawals is not None: + block.append([w.to_serializable_list() for w in withdrawals]) + + serialized_bytes = Bytes(eth_rlp.encode(block)) + + return serialized_bytes, Hash(keccak256(eth_rlp.encode(header))) + + +@dataclass(kw_only=True) +class Block(Header): + """ + Block type used to describe block properties in test specs + """ + + rlp: Optional[BytesConvertible] = None + """ + If set, blockchain test will skip generating the block and will pass this value directly to + the Fixture. + + Only meant to be used to simulate blocks with bad formats, and therefore + requires the block to produce an exception. + """ + header_verify: Optional[Header] = None + """ + If set, the block header will be verified against the specified values. + """ + rlp_modifier: Optional[Header] = None + """ + An RLP modifying header which values would be used to override the ones + returned by the `evm_transition_tool`. + """ + exception: Optional[BlockException | TransactionException | ExceptionList] = None + """ + If set, the block is expected to be rejected by the client. + """ + engine_api_error_code: Optional[EngineAPIError] = None + """ + If set, the block is expected to produce an error response from the Engine API. + """ + txs: Optional[List[Transaction]] = None + """ + List of transactions included in the block. + """ + ommers: Optional[List[Header]] = None + """ + List of ommer headers included in the block. + """ + withdrawals: Optional[List[Withdrawal]] = None + """ + List of withdrawals to perform for this block. + """ + + def set_environment(self, env: Environment) -> Environment: + """ + Creates a copy of the environment with the characteristics of this + specific block. + """ + new_env = copy(env) + + """ + Values that need to be set in the environment and are `None` for + this block need to be set to their defaults. + """ + environment_default = Environment() + new_env.difficulty = self.difficulty + new_env.coinbase = ( + self.coinbase if self.coinbase is not None else environment_default.coinbase + ) + new_env.gas_limit = self.gas_limit or env.parent_gas_limit or environment_default.gas_limit + if not isinstance(self.base_fee, Removable): + new_env.base_fee = self.base_fee + new_env.withdrawals = self.withdrawals + if not isinstance(self.excess_blob_gas, Removable): + new_env.excess_blob_gas = self.excess_blob_gas + if not isinstance(self.blob_gas_used, Removable): + new_env.blob_gas_used = self.blob_gas_used + if not isinstance(self.beacon_root, Removable): + new_env.beacon_root = self.beacon_root + """ + These values are required, but they depend on the previous environment, + so they can be calculated here. + """ + if self.number is not None: + new_env.number = self.number + else: + # calculate the next block number for the environment + if len(new_env.block_hashes) == 0: + new_env.number = 0 + else: + new_env.number = max([Number(n) for n in new_env.block_hashes.keys()]) + 1 + + if self.timestamp is not None: + new_env.timestamp = self.timestamp + else: + assert new_env.parent_timestamp is not None + new_env.timestamp = int(Number(new_env.parent_timestamp) + 12) + + return new_env + + def copy_with_rlp(self, rlp: Bytes | BytesConvertible | None) -> "Block": + """ + Creates a copy of the block and adds the specified RLP. + """ + new_block = deepcopy(self) + new_block.rlp = Bytes.or_none(rlp) + return new_block + + +@dataclass(kw_only=True) +class FixtureExecutionPayload(FixtureHeader): + """ + Representation of the execution payload of a block within a test fixture. + """ + + # Skipped fields in the Engine API + ommers_hash: Hash = field( + json_encoder=JSONEncoder.Field( + skip=True, + ), + ) + transactions_root: Hash = field( + json_encoder=JSONEncoder.Field( + skip=True, + ), + ) + difficulty: int = field( + json_encoder=JSONEncoder.Field( + skip=True, + ) + ) + nonce: HeaderNonce = field( + json_encoder=JSONEncoder.Field( + skip=True, + ) + ) + withdrawals_root: Optional[Hash] = field( + default=None, + json_encoder=JSONEncoder.Field( + skip=True, + ), + ) + + # Fields with different names + coinbase: Address = field( + json_encoder=JSONEncoder.Field( + name="feeRecipient", + ) + ) + receipt_root: Hash = field( + json_encoder=JSONEncoder.Field( + name="receiptsRoot", + ), + ) + bloom: Bloom = field( + json_encoder=JSONEncoder.Field( + name="logsBloom", + ) + ) + mix_digest: Hash = field( + json_encoder=JSONEncoder.Field( + name="prevRandao", + ), + ) + hash: Optional[Hash] = field( + default=None, + json_encoder=JSONEncoder.Field( + name="blockHash", + ), + ) + + # Fields with different formatting + number: int = field( + json_encoder=JSONEncoder.Field( + name="blockNumber", + cast_type=HexNumber, + ) + ) + gas_limit: int = field(json_encoder=JSONEncoder.Field(name="gasLimit", cast_type=HexNumber)) + gas_used: int = field(json_encoder=JSONEncoder.Field(name="gasUsed", cast_type=HexNumber)) + timestamp: int = field(json_encoder=JSONEncoder.Field(cast_type=HexNumber)) + base_fee: Optional[int] = field( + default=None, + json_encoder=JSONEncoder.Field(name="baseFeePerGas", cast_type=HexNumber), + ) + blob_gas_used: Optional[int] = field( + default=None, + json_encoder=JSONEncoder.Field(name="blobGasUsed", cast_type=HexNumber), + ) + excess_blob_gas: Optional[int] = field( + default=None, + json_encoder=JSONEncoder.Field(name="excessBlobGas", cast_type=HexNumber), + ) + + # Fields only used in the Engine API + transactions: Optional[List[Transaction]] = field( + default=None, + json_encoder=JSONEncoder.Field( + cast_type=lambda txs: [Bytes(tx.serialized_bytes()) for tx in txs], + to_json=True, + ), + ) + withdrawals: Optional[List[Withdrawal]] = field( + default=None, + json_encoder=JSONEncoder.Field( + to_json=True, + ), + ) + + @classmethod + def from_fixture_header( + cls, + header: FixtureHeader, + transactions: Optional[List[Transaction]] = None, + withdrawals: Optional[List[Withdrawal]] = None, + ) -> "FixtureExecutionPayload": + """ + Returns a FixtureExecutionPayload from a FixtureHeader, a list + of transactions and a list of withdrawals. + """ + kwargs = {field.name: getattr(header, field.name) for field in fields(header)} + return cls(**kwargs, transactions=transactions, withdrawals=withdrawals) + + +@dataclass(kw_only=True) +class FixtureEngineNewPayload: + """ + Representation of the `engine_newPayloadVX` information to be + sent using the block information. + """ + + payload: FixtureExecutionPayload = field( + json_encoder=JSONEncoder.Field( + name="executionPayload", + to_json=True, + ) + ) + blob_versioned_hashes: Optional[List[FixedSizeBytesConvertible]] = field( + default=None, + json_encoder=JSONEncoder.Field( + name="expectedBlobVersionedHashes", + cast_type=lambda hashes: [Hash(hash) for hash in hashes], + to_json=True, + ), + ) + beacon_root: Optional[FixedSizeBytesConvertible] = field( + default=None, + json_encoder=JSONEncoder.Field( + name="parentBeaconBlockRoot", + cast_type=Hash, + ), + ) + validation_error: Optional[TransactionException | BlockException | ExceptionList] = field( + json_encoder=JSONEncoder.Field( + name="validationError", + ), + ) + version: int = field( + json_encoder=JSONEncoder.Field(), + ) + error_code: Optional[EngineAPIError] = field( + default=None, + json_encoder=JSONEncoder.Field( + name="errorCode", + cast_type=int, + ), + ) + + @classmethod + def from_fixture_header( + cls, + fork: Fork, + header: FixtureHeader, + transactions: List[Transaction], + withdrawals: Optional[List[Withdrawal]], + validation_error: Optional[TransactionException | BlockException | ExceptionList], + error_code: Optional[EngineAPIError], + ) -> "FixtureEngineNewPayload": + """ + Creates a `FixtureEngineNewPayload` from a `FixtureHeader`. + """ + new_payload_version = fork.engine_new_payload_version(header.number, header.timestamp) + + assert new_payload_version is not None, "Invalid header for engine_newPayload" + + new_payload = cls( + payload=FixtureExecutionPayload.from_fixture_header( + header=replace(header, beacon_root=None), + transactions=transactions, + withdrawals=withdrawals, + ), + version=new_payload_version, + validation_error=validation_error, + error_code=error_code, + ) + + if fork.engine_new_payload_blob_hashes(header.number, header.timestamp): + new_payload.blob_versioned_hashes = blob_versioned_hashes_from_transactions( + transactions + ) + + if fork.engine_new_payload_beacon_root(header.number, header.timestamp): + new_payload.beacon_root = header.beacon_root + + return new_payload + + +@dataclass +class FixtureTransaction(Transaction): + """ + Representation of an Ethereum transaction within a test Fixture. + """ + + ty: Optional[int] = field( + default=None, + json_encoder=JSONEncoder.Field( + name="type", + cast_type=ZeroPaddedHexNumber, + ), + ) + """ + Transaction type value. + """ + chain_id: int = field( + default=1, + json_encoder=JSONEncoder.Field( + name="chainId", + cast_type=ZeroPaddedHexNumber, + ), + ) + nonce: int = field( + default=0, + json_encoder=JSONEncoder.Field( + cast_type=ZeroPaddedHexNumber, + ), + ) + gas_price: Optional[int] = field( + default=None, + json_encoder=JSONEncoder.Field( + name="gasPrice", + cast_type=ZeroPaddedHexNumber, + ), + ) + max_priority_fee_per_gas: Optional[int] = field( + default=None, + json_encoder=JSONEncoder.Field( + name="maxPriorityFeePerGas", + cast_type=ZeroPaddedHexNumber, + ), + ) + max_fee_per_gas: Optional[int] = field( + default=None, + json_encoder=JSONEncoder.Field( + name="maxFeePerGas", + cast_type=ZeroPaddedHexNumber, + ), + ) + gas_limit: int = field( + default=21000, + json_encoder=JSONEncoder.Field( + name="gasLimit", + cast_type=ZeroPaddedHexNumber, + ), + ) + to: Optional[FixedSizeBytesConvertible] = field( + default=AddrAA, + json_encoder=JSONEncoder.Field( + cast_type=Address, + default_value_skip_cast="", + ), + ) + value: int = field( + default=0, + json_encoder=JSONEncoder.Field( + cast_type=ZeroPaddedHexNumber, + ), + ) + data: BytesConvertible = field( + default_factory=bytes, + json_encoder=JSONEncoder.Field( + cast_type=Bytes, + ), + ) + max_fee_per_blob_gas: Optional[int] = field( + default=None, + json_encoder=JSONEncoder.Field( + name="maxFeePerBlobGas", + cast_type=ZeroPaddedHexNumber, + ), + ) + v: Optional[int] = field( + default=None, + json_encoder=JSONEncoder.Field( + cast_type=ZeroPaddedHexNumber, + ), + ) + r: Optional[int] = field( + default=None, + json_encoder=JSONEncoder.Field( + cast_type=ZeroPaddedHexNumber, + ), + ) + s: Optional[int] = field( + default=None, + json_encoder=JSONEncoder.Field( + cast_type=ZeroPaddedHexNumber, + ), + ) + + @classmethod + def from_transaction(cls, tx: Transaction) -> "FixtureTransaction": + """ + Returns a FixtureTransaction from a Transaction. + """ + kwargs = {field.name: getattr(tx, field.name) for field in fields(tx)} + return cls(**kwargs) + + +@dataclass(kw_only=True) +class FixtureWithdrawal(Withdrawal): + """ + Structure to represent a single withdrawal of a validator's balance from + the beacon chain in the output fixture. + """ + + index: NumberConvertible = field( + json_encoder=JSONEncoder.Field( + cast_type=ZeroPaddedHexNumber, + ), + ) + validator: NumberConvertible = field( + json_encoder=JSONEncoder.Field( + name="validatorIndex", + cast_type=ZeroPaddedHexNumber, + ), + ) + amount: NumberConvertible = field( + json_encoder=JSONEncoder.Field( + cast_type=ZeroPaddedHexNumber, + ), + ) + + @classmethod + def from_withdrawal(cls, w: Withdrawal) -> "FixtureWithdrawal": + """ + Returns a FixtureWithdrawal from a Withdrawal. + """ + kwargs = {field.name: getattr(w, field.name) for field in fields(w)} + return cls(**kwargs) + + +@dataclass(kw_only=True) +class FixtureBlock: + """ + Representation of an Ethereum block within a test Fixture. + """ + + rlp: Optional[Bytes] = field( + json_encoder=JSONEncoder.Field(), + ) + block_header: FixtureHeader = field( + json_encoder=JSONEncoder.Field( + name="blockHeader", + to_json=True, + ), + ) + block_number: NumberConvertible = field( + json_encoder=JSONEncoder.Field( + name="blocknumber", + cast_type=Number, + ), + ) + txs: List[Transaction] = field( + json_encoder=JSONEncoder.Field( + name="transactions", + cast_type=lambda txs: [FixtureTransaction.from_transaction(tx) for tx in txs], + to_json=True, + ), + ) + ommers: List[FixtureHeader] = field( + json_encoder=JSONEncoder.Field( + name="uncleHeaders", + to_json=True, + ), + ) + withdrawals: Optional[List[Withdrawal]] = field( + default=None, + json_encoder=JSONEncoder.Field( + name="withdrawals", + cast_type=lambda withdrawals: [ + FixtureWithdrawal.from_withdrawal(w) for w in withdrawals + ], + to_json=True, + ), + ) + + +@dataclass(kw_only=True) +class InvalidFixtureBlock: + """ + Representation of an invalid Ethereum block within a test Fixture. + """ + + rlp: Bytes = field( + json_encoder=JSONEncoder.Field(), + ) + expected_exception: TransactionException | BlockException | ExceptionList = field( + json_encoder=JSONEncoder.Field( + name="expectException", + ), + ) + rlp_decoded: Optional[FixtureBlock] = field( + default=None, + json_encoder=JSONEncoder.Field( + name="rlp_decoded", + to_json=True, + ), + ) + + +@dataclass(kw_only=True) +class FixtureCommon(BaseFixture): + """ + Base Ethereum test fixture fields class. + """ + + name: str = field( + default="", + json_encoder=JSONEncoder.Field( + skip=True, + ), + ) + fork: str = field( + json_encoder=JSONEncoder.Field( + name="network", + ), + ) + + @classmethod + def collect_into_file(cls, fd: TextIO, fixtures: Dict[str, "BaseFixture"]): + """ + For BlockchainTest format, we simply join the json fixtures into a single file. + """ + json_fixtures: Dict[str, Dict[str, Any]] = {} + for name, fixture in fixtures.items(): + assert isinstance(fixture, FixtureCommon), f"Invalid fixture type: {type(fixture)}" + json_fixtures[name] = fixture.to_json() + json.dump(json_fixtures, fd, indent=4) + + +@dataclass(kw_only=True) +class Fixture(FixtureCommon): + """ + Cross-client specific test fixture information. + """ + + genesis_rlp: Bytes = field( + json_encoder=JSONEncoder.Field( + name="genesisRLP", + ), + ) + genesis: FixtureHeader = field( + json_encoder=JSONEncoder.Field( + name="genesisBlockHeader", + to_json=True, + ), + ) + blocks: List[FixtureBlock | InvalidFixtureBlock] = field( + json_encoder=JSONEncoder.Field( + name="blocks", + to_json=True, + ), + ) + last_block_hash: Hash = field( + json_encoder=JSONEncoder.Field( + name="lastblockhash", + ), + ) + pre_state: Mapping[str, Account] = field( + json_encoder=JSONEncoder.Field( + name="pre", + cast_type=Alloc, + to_json=True, + ), + ) + post_state: Optional[Mapping[str, Account]] = field( + default=None, + json_encoder=JSONEncoder.Field( + name="postState", + cast_type=Alloc, + to_json=True, + ), + ) + seal_engine: str = field( + default="NoProof", + json_encoder=JSONEncoder.Field( + name="sealEngine", + ), + ) + + @classmethod + def output_base_dir_name(cls) -> Path: + """ + Returns the name of the subdirectory where this type of fixture should be dumped to. + """ + return Path("blockchain_tests") + + @classmethod + def format(cls) -> FixtureFormats: + """ + Returns the fixture format which the evm tool can use to determine how to verify the + fixture. + """ + return FixtureFormats.BLOCKCHAIN_TEST + + +@dataclass(kw_only=True) +class HiveFixture(FixtureCommon): + """ + Hive specific test fixture information. + """ + + genesis: FixtureHeader = field( + json_encoder=JSONEncoder.Field( + name="genesisBlockHeader", + to_json=True, + ), + ) + payloads: List[FixtureEngineNewPayload] = field( + default=None, + json_encoder=JSONEncoder.Field( + name="engineNewPayloads", + to_json=True, + ), + ) + fcu_version: int = field( + default=1, + json_encoder=JSONEncoder.Field( + name="engineFcuVersion", + ), + ) + sync_payload: Optional[FixtureEngineNewPayload] = field( + default=None, + json_encoder=JSONEncoder.Field( + name="syncPayload", + to_json=True, + ), + ) + pre_state: Mapping[str, Account] = field( + json_encoder=JSONEncoder.Field( + name="pre", + cast_type=Alloc, + to_json=True, + ), + ) + post_state: Optional[Mapping[str, Account]] = field( + default=None, + json_encoder=JSONEncoder.Field( + name="postState", + cast_type=Alloc, + to_json=True, + ), + ) + + @classmethod + def output_base_dir_name(cls) -> Path: + """ + Returns the name of the subdirectory where this type of fixture should be dumped to. + """ + return Path("blockchain_tests_hive") + + @classmethod + def format(cls) -> FixtureFormats: + """ + Returns the fixture format which the evm tool can use to determine how to verify the + fixture. + """ + return FixtureFormats.BLOCKCHAIN_TEST_HIVE diff --git a/src/ethereum_test_tools/spec/fixture_collector.py b/src/ethereum_test_tools/spec/fixture_collector.py new file mode 100644 index 0000000000..a9ea55e615 --- /dev/null +++ b/src/ethereum_test_tools/spec/fixture_collector.py @@ -0,0 +1,199 @@ +""" +Fixture collector class used to collect, sort and combine the different types of generated +fixtures. +""" + +import os +import re +from dataclasses import dataclass, field +from pathlib import Path +from typing import Dict, Literal, Optional, Tuple + +from evm_transition_tool import FixtureFormats, TransitionTool + +from .base.base_test import BaseFixture + + +def strip_test_prefix(name: str) -> str: + """ + Removes the test prefix from a test case name. + """ + TEST_PREFIX = "test_" + if name.startswith(TEST_PREFIX): + return name[len(TEST_PREFIX) :] + return name + + +def get_module_relative_output_dir(test_module: Path, filler_path: Path) -> Path: + """ + Return a directory name for the provided test_module (relative to the + base ./tests directory) that can be used for output (within the + configured fixtures output path or the base_dump_dir directory). + + Example: + tests/shanghai/eip3855_push0/test_push0.py -> shanghai/eip3855_push0/test_push0 + """ + basename = test_module.with_suffix("").absolute() + basename_relative = basename.relative_to(filler_path.absolute()) + module_path = basename_relative.parent / basename_relative.stem + return module_path + + +@dataclass(kw_only=True) +class TestInfo: + """ + Contains test information from the current node. + """ + + name: str # pytest: Item.name + id: str # pytest: Item.nodeid + original_name: str # pytest: Item.originalname + path: Path # pytest: Item.path + + def get_name_and_parameters(self) -> Tuple[str, str]: + """ + Converts a test name to a tuple containing the test name and test parameters. + + Example: + test_push0_key_sstore[fork_Shanghai] -> test_push0_key_sstore, fork_Shanghai + """ + test_name, parameters = self.name.split("[") + return test_name, re.sub(r"[\[\-]", "_", parameters).replace("]", "") + + def get_single_test_name(self) -> str: + """ + Converts a test name to a single test name. + """ + test_name, test_parameters = self.get_name_and_parameters() + return f"{test_name}__{test_parameters}" + + def get_dump_dir_path( + self, + base_dump_dir: Optional[Path], + filler_path: Path, + level: Literal["test_module", "test_function", "test_parameter"] = "test_parameter", + ) -> Optional[Path]: + """ + The path to dump the debug output as defined by the level to dump at. + """ + if not base_dump_dir: + return None + test_module_relative_dir = get_module_relative_output_dir(self.path, filler_path) + if level == "test_module": + return Path(base_dump_dir) / Path(str(test_module_relative_dir).replace(os.sep, "__")) + test_name, test_parameter_string = self.get_name_and_parameters() + flat_path = f"{str(test_module_relative_dir).replace(os.sep, '__')}__{test_name}" + if level == "test_function": + return Path(base_dump_dir) / flat_path + elif level == "test_parameter": + return Path(base_dump_dir) / flat_path / test_parameter_string + raise Exception("Unexpected level.") + + +@dataclass(kw_only=True) +class FixtureCollector: + """ + Collects all fixtures generated by the test cases. + """ + + output_dir: str + flat_output: bool + single_fixture_per_file: bool + filler_path: Path + base_dump_dir: Optional[Path] = None + + # Internal state + all_fixtures: Dict[Path, Dict[str, BaseFixture]] = field(default_factory=dict) + json_path_to_fixture_type: Dict[Path, FixtureFormats] = field(default_factory=dict) + json_path_to_test_item: Dict[Path, TestInfo] = field(default_factory=dict) + + def get_fixture_basename(self, info: TestInfo) -> Path: + """ + Returns the basename of the fixture file for a given test case. + """ + if self.flat_output: + if self.single_fixture_per_file: + return Path(strip_test_prefix(info.get_single_test_name())) + return Path(strip_test_prefix(info.original_name)) + else: + relative_fixture_output_dir = Path(info.path).parent / strip_test_prefix( + Path(info.path).stem + ) + module_relative_output_dir = get_module_relative_output_dir( + relative_fixture_output_dir, self.filler_path + ) + + if self.single_fixture_per_file: + return module_relative_output_dir / strip_test_prefix(info.get_single_test_name()) + return module_relative_output_dir / strip_test_prefix(info.original_name) + + def add_fixture(self, info: TestInfo, fixture: BaseFixture) -> None: + """ + Adds a fixture to the list of fixtures of a given test case. + """ + fixture_basename = self.get_fixture_basename(info) + + fixture_path = ( + self.output_dir + / fixture.output_base_dir_name() + / fixture_basename.with_suffix(fixture.output_file_extension()) + ) + if fixture_path not in self.all_fixtures: # relevant when we group by test function + self.all_fixtures[fixture_path] = {} + if fixture_path in self.json_path_to_fixture_type: + if self.json_path_to_fixture_type[fixture_path] != fixture.format(): + raise Exception( + f"Fixture {fixture_path} has two different types: " + f"{self.json_path_to_fixture_type[fixture_path]} " + f"and {fixture.format()}" + ) + else: + self.json_path_to_fixture_type[fixture_path] = fixture.format() + self.json_path_to_test_item[fixture_path] = info + + self.all_fixtures[fixture_path][info.id] = fixture + + def dump_fixtures(self) -> None: + """ + Dumps all collected fixtures to their respective files. + """ + os.makedirs(self.output_dir, exist_ok=True) + for fixture_path, fixtures in self.all_fixtures.items(): + os.makedirs(fixture_path.parent, exist_ok=True) + + # Get the first fixture to dump to get its type + fixture = next(iter(fixtures.values())) + # Call class method to dump all the fixtures + with open(fixture_path, "w") as fd: + fixture.collect_into_file(fd, fixtures) + + def verify_fixture_files(self, evm_fixture_verification: TransitionTool) -> None: + """ + Runs `evm [state|block]test` on each fixture. + """ + for fixture_path, fixture_format in self.json_path_to_fixture_type.items(): + if FixtureFormats.is_verifiable(fixture_format): + info = self.json_path_to_test_item[fixture_path] + verify_fixtures_dump_dir = self._get_verify_fixtures_dump_dir(info) + evm_fixture_verification.verify_fixture( + fixture_format, fixture_path, verify_fixtures_dump_dir + ) + + def _get_verify_fixtures_dump_dir( + self, + info: TestInfo, + ): + """ + The directory to dump the current test function's fixture.json and fixture + verification debug output. + """ + if not self.base_dump_dir: + return None + if self.single_fixture_per_file: + return info.get_dump_dir_path( + self.base_dump_dir, self.filler_path, level="test_parameter" + ) + else: + return info.get_dump_dir_path( + self.base_dump_dir, self.filler_path, level="test_function" + ) diff --git a/src/ethereum_test_tools/spec/state/__init__.py b/src/ethereum_test_tools/spec/state/__init__.py new file mode 100644 index 0000000000..ed5114b881 --- /dev/null +++ b/src/ethereum_test_tools/spec/state/__init__.py @@ -0,0 +1,3 @@ +""" +StateTest type definitions and logic +""" diff --git a/src/ethereum_test_tools/spec/state/state_test.py b/src/ethereum_test_tools/spec/state/state_test.py new file mode 100644 index 0000000000..888cddfbfd --- /dev/null +++ b/src/ethereum_test_tools/spec/state/state_test.py @@ -0,0 +1,218 @@ +""" +Ethereum state test spec definition and filler. +""" + +from copy import copy +from dataclasses import dataclass +from typing import Callable, Generator, List, Mapping, Optional, Type + +from ethereum_test_forks import Fork +from evm_transition_tool import FixtureFormats, TransitionTool + +from ...common import Address, Alloc, Environment, Number, Transaction +from ...common.constants import EngineAPIError +from ...common.json import to_json +from ..base.base_test import BaseFixture, BaseTest, verify_post_alloc +from ..blockchain.blockchain_test import Block, BlockchainTest +from ..blockchain.types import Header +from ..debugging import print_traces +from .types import Fixture, FixtureForkPost + +BEACON_ROOTS_ADDRESS = Address(0x000F3DF6D732807EF1319FB7B8BB8522D0BEAC02) +TARGET_BLOB_GAS_PER_BLOCK = 393216 + + +@dataclass(kw_only=True) +class StateTest(BaseTest): + """ + Filler type that tests transactions over the period of a single block. + """ + + env: Environment + pre: Mapping + post: Mapping + tx: Transaction + engine_api_error_code: Optional[EngineAPIError] = None + blockchain_test_header_verify: Optional[Header] = None + blockchain_test_rlp_modifier: Optional[Header] = None + tag: str = "" + chain_id: int = 1 + + @classmethod + def pytest_parameter_name(cls) -> str: + """ + Returns the parameter name used to identify this filler in a test. + """ + return "state_test" + + @classmethod + def fixture_formats(cls) -> List[FixtureFormats]: + """ + Returns a list of fixture formats that can be output to the test spec. + """ + return [ + FixtureFormats.BLOCKCHAIN_TEST, + FixtureFormats.BLOCKCHAIN_TEST_HIVE, + FixtureFormats.STATE_TEST, + ] + + def _generate_blockchain_genesis_environment(self) -> Environment: + """ + Generate the genesis environment for the BlockchainTest formatted test. + """ + genesis_env = copy(self.env) + + # Modify values to the proper values for the genesis block + # TODO: All of this can be moved to a new method in `Fork` + genesis_env.withdrawals = None + genesis_env.beacon_root = None + genesis_env.number = Number(genesis_env.number) - 1 + assert ( + genesis_env.number >= 0 + ), "genesis block number cannot be negative, set state test env.number to 1" + if genesis_env.excess_blob_gas: + # The excess blob gas environment value means the value of the context (block header) + # where the transaction is executed. In a blockchain test, we need to indirectly + # set the excess blob gas by setting the excess blob gas of the genesis block + # to the expected value plus the TARGET_BLOB_GAS_PER_BLOCK, which is the value + # that will be subtracted from the excess blob gas when the first block is mined. + genesis_env.excess_blob_gas = ( + Number(genesis_env.excess_blob_gas) + TARGET_BLOB_GAS_PER_BLOCK + ) + + return genesis_env + + def _generate_blockchain_blocks(self) -> List[Block]: + """ + Generate the single block that represents this state test in a BlockchainTest format. + """ + return [ + Block( + number=self.env.number, + timestamp=self.env.timestamp, + coinbase=self.env.coinbase, + difficulty=self.env.difficulty, + gas_limit=self.env.gas_limit, + extra_data=self.env.extra_data, + withdrawals=self.env.withdrawals, + beacon_root=self.env.beacon_root, + txs=[self.tx], + ommers=[], + exception=self.tx.error, + header_verify=self.blockchain_test_header_verify, + rlp_modifier=self.blockchain_test_rlp_modifier, + ) + ] + + def generate_blockchain_test(self) -> BlockchainTest: + """ + Generate a BlockchainTest fixture from this StateTest fixture. + """ + return BlockchainTest( + genesis_environment=self._generate_blockchain_genesis_environment(), + pre=self.pre, + post=self.post, + blocks=self._generate_blockchain_blocks(), + fixture_format=self.fixture_format, + t8n_dump_dir=self.t8n_dump_dir, + ) + + def make_state_test_fixture( + self, + t8n: TransitionTool, + fork: Fork, + eips: Optional[List[int]] = None, + ) -> Fixture: + """ + Create a fixture from the state test definition. + """ + env = self.env.set_fork_requirements(fork) + tx = self.tx.with_signature_and_sender(keep_secret_key=True) + pre_alloc = Alloc.merge( + Alloc(fork.pre_allocation()), + Alloc(self.pre), + ) + if empty_accounts := pre_alloc.empty_accounts(): + raise Exception(f"Empty accounts in pre state: {empty_accounts}") + transition_tool_name = fork.transition_tool_name( + block_number=Number(self.env.number), + timestamp=Number(self.env.timestamp), + ) + fork_name = ( + "+".join([transition_tool_name] + [str(eip) for eip in eips]) + if eips + else transition_tool_name + ) + next_alloc, result = t8n.evaluate( + alloc=to_json(pre_alloc), + txs=to_json([tx]), + env=to_json(env), + fork_name=fork_name, + chain_id=self.chain_id, + reward=0, # Reward on state tests is always zero + eips=eips, + debug_output_path=self.get_next_transition_tool_output_path(), + ) + + try: + verify_post_alloc(self.post, next_alloc) + except Exception as e: + print_traces(t8n.get_traces()) + raise e + + return Fixture( + env=env, + pre_state=pre_alloc, + post={ + fork.blockchain_test_network_name(): [ + FixtureForkPost.collect( + transition_tool_result=result, + transaction=tx.with_signature_and_sender(), + ) + ] + }, + transaction=tx, + ) + + def generate( + self, + t8n: TransitionTool, + fork: Fork, + eips: Optional[List[int]] = None, + ) -> BaseFixture: + """ + Generate the BlockchainTest fixture. + """ + if self.fixture_format in BlockchainTest.fixture_formats(): + return self.generate_blockchain_test().generate(t8n, fork, eips) + elif self.fixture_format == FixtureFormats.STATE_TEST: + # We can't generate a state test fixture that names a transition fork, + # so we get the fork at the block number and timestamp of the state test + fork = fork.fork_at(Number(self.env.number), Number(self.env.timestamp)) + return self.make_state_test_fixture(t8n, fork, eips) + + raise Exception(f"Unknown fixture format: {self.fixture_format}") + + +class StateTestOnly(StateTest): + """ + StateTest filler that only generates a state test fixture. + """ + + @classmethod + def pytest_parameter_name(cls) -> str: + """ + Returns the parameter name used to identify this filler in a test. + """ + return "state_test_only" + + @classmethod + def fixture_formats(cls) -> List[FixtureFormats]: + """ + Returns a list of fixture formats that can be output to the test spec. + """ + return [FixtureFormats.STATE_TEST] + + +StateTestSpec = Callable[[str], Generator[StateTest, None, None]] +StateTestFiller = Type[StateTest] diff --git a/src/ethereum_test_tools/spec/state/types.py b/src/ethereum_test_tools/spec/state/types.py new file mode 100644 index 0000000000..94b26d65fa --- /dev/null +++ b/src/ethereum_test_tools/spec/state/types.py @@ -0,0 +1,318 @@ +""" +StateTest types +""" + +import json +from dataclasses import dataclass, fields +from pathlib import Path +from typing import Any, Dict, List, Mapping, Optional, Sequence, TextIO + +from evm_transition_tool import FixtureFormats + +from ...common.base_types import Address, Bytes, Hash, HexNumber, ZeroPaddedHexNumber +from ...common.conversions import BytesConvertible, FixedSizeBytesConvertible, NumberConvertible +from ...common.json import JSONEncoder, field +from ...common.types import AccessList, Alloc, Environment, Transaction +from ...exceptions import ExceptionList, TransactionException +from ..base.base_test import BaseFixture + + +@dataclass(kw_only=True) +class FixtureEnvironment: + """ + Type used to describe the environment of a state test. + """ + + coinbase: FixedSizeBytesConvertible = field( + default="0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + json_encoder=JSONEncoder.Field( + name="currentCoinbase", + cast_type=Address, + ), + ) + gas_limit: NumberConvertible = field( + default=100000000000000000, + json_encoder=JSONEncoder.Field( + name="currentGasLimit", + cast_type=ZeroPaddedHexNumber, + ), + ) + number: NumberConvertible = field( + default=1, + json_encoder=JSONEncoder.Field( + name="currentNumber", + cast_type=ZeroPaddedHexNumber, + ), + ) + timestamp: NumberConvertible = field( + default=1000, + json_encoder=JSONEncoder.Field( + name="currentTimestamp", + cast_type=ZeroPaddedHexNumber, + ), + ) + prev_randao: Optional[NumberConvertible] = field( + default=None, + json_encoder=JSONEncoder.Field( + name="currentRandom", + cast_type=Hash, + ), + ) + difficulty: Optional[NumberConvertible] = field( + default=None, + json_encoder=JSONEncoder.Field( + name="currentDifficulty", + cast_type=ZeroPaddedHexNumber, + ), + ) + base_fee: Optional[NumberConvertible] = field( + default=None, + json_encoder=JSONEncoder.Field( + name="currentBaseFee", + cast_type=ZeroPaddedHexNumber, + ), + ) + excess_blob_gas: Optional[NumberConvertible] = field( + default=None, + json_encoder=JSONEncoder.Field( + name="currentExcessBlobGas", + cast_type=ZeroPaddedHexNumber, + ), + ) + + @classmethod + def from_env(cls, env: Environment) -> "FixtureEnvironment": + """ + Returns a FixtureEnvironment from an Environment. + """ + kwargs = {field.name: getattr(env, field.name) for field in fields(cls)} + return cls(**kwargs) + + +@dataclass(kw_only=True) +class FixtureTransaction: + """ + Type used to describe a transaction in a state test. + """ + + nonce: int = field( + json_encoder=JSONEncoder.Field( + cast_type=ZeroPaddedHexNumber, + ), + ) + gas_price: Optional[int] = field( + json_encoder=JSONEncoder.Field( + name="gasPrice", + cast_type=ZeroPaddedHexNumber, + ), + ) + max_priority_fee_per_gas: Optional[int] = field( + json_encoder=JSONEncoder.Field( + name="maxPriorityFeePerGas", + cast_type=HexNumber, + ), + ) + max_fee_per_gas: Optional[int] = field( + json_encoder=JSONEncoder.Field( + name="maxFeePerGas", + cast_type=HexNumber, + ), + ) + gas_limit: int = field( + json_encoder=JSONEncoder.Field( + name="gasLimit", + cast_type=lambda x: [ZeroPaddedHexNumber(x)], # Converted to list + to_json=True, + ), + ) + to: Optional[FixedSizeBytesConvertible] = field( + json_encoder=JSONEncoder.Field( + default_value_skip_cast="", # Empty string for None + cast_type=Address, + ), + ) + value: int = field( + json_encoder=JSONEncoder.Field( + cast_type=lambda x: [ZeroPaddedHexNumber(x)], # Converted to list + to_json=True, + ), + ) + data: BytesConvertible = field( + json_encoder=JSONEncoder.Field( + cast_type=lambda x: [Bytes(x)], + to_json=True, + ), + ) + access_list: Optional[List[AccessList]] = field( + json_encoder=JSONEncoder.Field( + name="accessLists", + cast_type=lambda x: [x], # Converted to list of lists + to_json=True, + ), + ) + max_fee_per_blob_gas: Optional[int] = field( + json_encoder=JSONEncoder.Field( + name="maxFeePerBlobGas", + cast_type=HexNumber, + ), + ) + blob_versioned_hashes: Optional[Sequence[FixedSizeBytesConvertible]] = field( + json_encoder=JSONEncoder.Field( + name="blobVersionedHashes", + cast_type=lambda x: [Hash(k) for k in x], + to_json=True, + ), + ) + + sender: FixedSizeBytesConvertible = field( + json_encoder=JSONEncoder.Field( + cast_type=Address, + ), + ) + secret_key: Optional[FixedSizeBytesConvertible] = field( + json_encoder=JSONEncoder.Field( + name="secretKey", + cast_type=Hash, + ), + ) + + @classmethod + def from_transaction(cls, tx: Transaction) -> "FixtureTransaction": + """ + Returns a FixtureTransaction from a Transaction. + """ + kwargs = {field.name: getattr(tx, field.name) for field in fields(cls)} + return cls(**kwargs) + + +@dataclass(kw_only=True) +class FixtureForkPostIndexes: + """ + Type used to describe the indexes of a single post state of a single Fork. + """ + + data: int = field(default=0, json_encoder=JSONEncoder.Field(skip_string_convert=True)) + gas: int = field(default=0, json_encoder=JSONEncoder.Field(skip_string_convert=True)) + value: int = field(default=0, json_encoder=JSONEncoder.Field(skip_string_convert=True)) + + +@dataclass(kw_only=True) +class FixtureForkPost: + """ + Type used to describe the post state of a single Fork. + """ + + state_root: Hash = field( + json_encoder=JSONEncoder.Field( + name="hash", + ), + ) + logs_hash: Hash = field( + json_encoder=JSONEncoder.Field( + name="logs", + ), + ) + tx_bytes: BytesConvertible = field( + json_encoder=JSONEncoder.Field( + name="txbytes", + cast_type=Bytes, + ), + ) + expected_exception: Optional[ExceptionList | TransactionException] = field( + default=None, + json_encoder=JSONEncoder.Field( + name="expectException", + ), + ) + indexes: FixtureForkPostIndexes = field( + json_encoder=JSONEncoder.Field( + to_json=True, + ), + ) + + @classmethod + def collect( + cls, + *, + transition_tool_result: Dict[str, Any], + transaction: Transaction, + ) -> "FixtureForkPost": + """ + Collects the post state of a single Fork from the transition tool result. + """ + state_root = Hash(transition_tool_result["stateRoot"]) + logs_hash = Hash(transition_tool_result["logsHash"]) + indexes = FixtureForkPostIndexes() + return cls( + state_root=state_root, + logs_hash=logs_hash, + tx_bytes=transaction.serialized_bytes(), + expected_exception=transaction.error, + indexes=indexes, + ) + + +@dataclass(kw_only=True) +class Fixture(BaseFixture): + """ + Fixture for a single StateTest. + """ + + env: Environment = field( + json_encoder=JSONEncoder.Field( + cast_type=FixtureEnvironment.from_env, + to_json=True, + ), + ) + + pre_state: Alloc = field( + json_encoder=JSONEncoder.Field( + name="pre", + cast_type=Alloc, + to_json=True, + ), + ) + + transaction: Transaction = field( + json_encoder=JSONEncoder.Field( + to_json=True, + cast_type=FixtureTransaction.from_transaction, + ), + ) + + post: Mapping[str, List[FixtureForkPost]] = field( + default_factory=dict, + json_encoder=JSONEncoder.Field( + name="post", + to_json=True, + ), + ) + + @classmethod + def collect_into_file(cls, fd: TextIO, fixtures: Dict[str, "BaseFixture"]): + """ + For StateTest format, we simply join the json fixtures into a single file. + + We could do extra processing like combining tests that use the same pre-state, + and similar transaction, but this is not done for now. + """ + json_fixtures: Dict[str, Dict[str, Any]] = {} + for name, fixture in fixtures.items(): + assert isinstance(fixture, Fixture), f"Invalid fixture type: {type(fixture)}" + json_fixtures[name] = fixture.to_json() + json.dump(json_fixtures, fd, indent=4) + + @classmethod + def output_base_dir_name(cls) -> Path: + """ + Returns the name of the subdirectory where this type of fixture should be dumped to. + """ + return Path("state_tests") + + @classmethod + def format(cls) -> FixtureFormats: + """ + Returns the fixture format which the evm tool can use to determine how to verify the + fixture. + """ + return FixtureFormats.STATE_TEST diff --git a/src/ethereum_test_tools/spec/state_test.py b/src/ethereum_test_tools/spec/state_test.py deleted file mode 100644 index 74321496b4..0000000000 --- a/src/ethereum_test_tools/spec/state_test.py +++ /dev/null @@ -1,246 +0,0 @@ -""" -Ethereum state test spec definition and filler. -""" -from copy import copy -from dataclasses import dataclass -from typing import Any, Callable, Dict, Generator, List, Mapping, Optional, Tuple, Type - -from ethereum_test_forks import Fork -from evm_transition_tool import TransitionTool - -from ..common import ( - Address, - Alloc, - Bloom, - Bytes, - EmptyTrieRoot, - Environment, - Fixture, - FixtureBlock, - FixtureEngineNewPayload, - FixtureHeader, - Hash, - HeaderNonce, - HiveFixture, - Number, - Transaction, - ZeroPaddedHexNumber, - alloc_to_accounts, - to_json, -) -from ..common.constants import EmptyOmmersRoot, EngineAPIError -from .base_test import BaseTest, verify_post_alloc, verify_result, verify_transactions -from .debugging import print_traces - - -@dataclass(kw_only=True) -class StateTest(BaseTest): - """ - Filler type that tests transactions over the period of a single block. - """ - - env: Environment - pre: Mapping - post: Mapping - txs: List[Transaction] - engine_api_error_code: Optional[EngineAPIError] = None - tag: str = "" - chain_id: int = 1 - - @classmethod - def pytest_parameter_name(cls) -> str: - """ - Returns the parameter name used to identify this filler in a test. - """ - return "state_test" - - def make_genesis( - self, - t8n: TransitionTool, - fork: Fork, - ) -> Tuple[Alloc, Bytes, FixtureHeader]: - """ - Create a genesis block from the state test definition. - """ - # Similar to the block 1 environment specified by the test - # with some slight differences, so make a copy here - genesis_env = copy(self.env) - - # Modify values to the proper values for the genesis block - genesis_env.withdrawals = None - genesis_env.beacon_root = None - genesis_env.number = Number(genesis_env.number) - 1 - assert ( - genesis_env.number >= 0 - ), "genesis block number cannot be negative, set state test env.number to 1" - - genesis_env.set_fork_requirements(fork, in_place=True) - pre_alloc = Alloc( - fork.pre_allocation( - block_number=genesis_env.number, timestamp=Number(genesis_env.timestamp) - ) - ) - new_alloc, state_root = t8n.calc_state_root( - alloc=to_json(Alloc.merge(pre_alloc, Alloc(self.pre))), - fork=fork, - debug_output_path=self.get_next_transition_tool_output_path(), - ) - genesis = FixtureHeader( - parent_hash=Hash(0), - ommers_hash=Hash(EmptyOmmersRoot), - coinbase=Address(0), - state_root=Hash(state_root), - transactions_root=Hash(EmptyTrieRoot), - receipt_root=Hash(EmptyTrieRoot), - bloom=Bloom(0), - difficulty=ZeroPaddedHexNumber( - 0x20000 if genesis_env.difficulty is None else genesis_env.difficulty - ), - number=ZeroPaddedHexNumber(genesis_env.number), - gas_limit=ZeroPaddedHexNumber(genesis_env.gas_limit), - gas_used=0, - timestamp=0, - extra_data=Bytes([0]), - mix_digest=Hash(0), - nonce=HeaderNonce(0), - base_fee=ZeroPaddedHexNumber.or_none(genesis_env.base_fee), - blob_gas_used=ZeroPaddedHexNumber.or_none(genesis_env.blob_gas_used), - excess_blob_gas=ZeroPaddedHexNumber.or_none(genesis_env.excess_blob_gas), - withdrawals_root=Hash.or_none( - EmptyTrieRoot if genesis_env.withdrawals is not None else None - ), - beacon_root=Hash.or_none(genesis_env.beacon_root), - ) - - genesis_rlp, genesis.hash = genesis.build( - txs=[], - ommers=[], - withdrawals=genesis_env.withdrawals, - ) - - return Alloc(new_alloc), genesis_rlp, genesis - - def generate_fixture_data( - self, t8n: TransitionTool, fork: Fork, eips: Optional[List[int]] = None - ) -> Tuple[FixtureHeader, Bytes, Alloc, List[Transaction], Dict, Dict[str, Any], str]: - """ - Generate common fixture data for both make_fixture and make_hive_fixture. - """ - pre, genesis_rlp, genesis = self.make_genesis(t8n, fork) - network_info = ( - "+".join([fork.name()] + [str(eip) for eip in eips]) if eips else fork.name() - ) - - self.env = self.env.apply_new_parent(genesis).set_fork_requirements(fork) - txs = [tx.with_signature_and_sender() for tx in self.txs] if self.txs else [] - - t8n_alloc, t8n_result = t8n.evaluate( - alloc=to_json(pre), - txs=to_json(txs), - env=to_json(self.env), - fork_name=network_info, - chain_id=self.chain_id, - reward=fork.get_reward(Number(self.env.number), Number(self.env.timestamp)), - eips=eips, - debug_output_path=self.get_next_transition_tool_output_path(), - ) - - rejected_txs = verify_transactions(txs, t8n_result) - if len(rejected_txs) > 0: - raise Exception( - "one or more transactions in `StateTest` are " - + "intrinsically invalid, which are not allowed. " - + "Use `BlockchainTest` to verify rejection of blocks " - + "that include invalid transactions." - ) - try: - verify_post_alloc(self.post, t8n_alloc) - verify_result(t8n_result, self.env) - except Exception as e: - print_traces(traces=t8n.get_traces()) - raise e - - return genesis, genesis_rlp, pre, txs, t8n_result, t8n_alloc, network_info - - def make_fixture( - self, t8n: TransitionTool, fork: Fork, eips: Optional[List[int]] = None - ) -> Fixture: - """ - Create a fixture from the state test definition. - """ - ( - genesis, - genesis_rlp, - pre, - txs, - t8n_result, - t8n_alloc, - network_info, - ) = self.generate_fixture_data(t8n, fork, eips) - header = FixtureHeader.collect( - fork=fork, transition_tool_result=t8n_result, environment=self.env - ) - block, header.hash = header.build(txs=txs, ommers=[], withdrawals=self.env.withdrawals) - - return Fixture( - fork=network_info, - genesis=genesis, - genesis_rlp=genesis_rlp, - blocks=[ - FixtureBlock( - rlp=block, - block_header=header, - txs=txs, - ommers=[], - withdrawals=self.env.withdrawals, - ) - ], - last_block_hash=header.hash, - pre_state=pre, - post_state=alloc_to_accounts(t8n_alloc), - name=self.tag, - ) - - def make_hive_fixture( - self, t8n: TransitionTool, fork: Fork, eips: Optional[List[int]] = None - ) -> HiveFixture: - """ - Create a hive fixture from the state test definition. - """ - ( - genesis, - _, - pre, - txs, - t8n_result, - t8n_alloc, - network_info, - ) = self.generate_fixture_data(t8n, fork, eips) - - header = FixtureHeader.collect( - fork=fork, transition_tool_result=t8n_result, environment=self.env - ) - _, header.hash = header.build(txs=txs, ommers=[], withdrawals=self.env.withdrawals) - fixture_payload = FixtureEngineNewPayload.from_fixture_header( - fork=fork, - header=header, - transactions=txs, - withdrawals=self.env.withdrawals, - valid=True, - error_code=None, - ) - fcu_version = fork.engine_forkchoice_updated_version(header.number, header.timestamp) - - return HiveFixture( - fork=network_info, - genesis=genesis, - payloads=[fixture_payload], - fcu_version=fcu_version, - pre_state=pre, - post_state=alloc_to_accounts(t8n_alloc), - name=self.tag, - ) - - -StateTestSpec = Callable[[str], Generator[StateTest, None, None]] -StateTestFiller = Type[StateTest] diff --git a/src/ethereum_test_tools/tests/conftest.py b/src/ethereum_test_tools/tests/conftest.py index 7735932953..16a6068eb9 100644 --- a/src/ethereum_test_tools/tests/conftest.py +++ b/src/ethereum_test_tools/tests/conftest.py @@ -5,9 +5,9 @@ import pytest from semver import Version -from ..code import Yul +from ethereum_test_forks import Frontier -SUPPORTED_SOLC_VERSIONS = [Version.parse(v) for v in ["0.8.20", "0.8.21", "0.8.22"]] +from ..code import Yul SOLC_PADDING_VERSION = Version.parse("0.8.21") @@ -16,6 +16,6 @@ def solc_version() -> Version: """Return the version of solc being used for tests.""" solc_version = Yul("").version().finalize_version() - if solc_version not in SUPPORTED_SOLC_VERSIONS: + if solc_version < Frontier.solc_min_version(): raise Exception("Unsupported solc version: {}".format(solc_version)) return solc_version diff --git a/src/ethereum_test_tools/tests/test_base_types.py b/src/ethereum_test_tools/tests/test_base_types.py new file mode 100644 index 0000000000..d9b8971e78 --- /dev/null +++ b/src/ethereum_test_tools/tests/test_base_types.py @@ -0,0 +1,54 @@ +""" +Test suite for `ethereum_test` module base types. +""" + +from typing import Any + +import pytest + +from ..common.base_types import Address, Hash + + +@pytest.mark.parametrize( + "a, b, equal", + [ + (Address("0x0"), Address("0x0"), True), + (Address("0x0"), Address("0x1"), False), + (Address("0x1"), Address("0x0"), False), + (Address("0x1"), "0x1", True), + (Address("0x1"), "0x2", False), + (Address("0x1"), 1, True), + (Address("0x1"), 2, False), + (Address("0x1"), b"\x01", True), + (Address("0x1"), b"\x02", False), + ("0x1", Address("0x1"), True), + ("0x2", Address("0x1"), False), + (1, Address("0x1"), True), + (2, Address("0x1"), False), + (b"\x01", Address("0x1"), True), + (b"\x02", Address("0x1"), False), + (Hash("0x0"), Hash("0x0"), True), + (Hash("0x0"), Hash("0x1"), False), + (Hash("0x1"), Hash("0x0"), False), + (Hash("0x1"), "0x1", True), + (Hash("0x1"), "0x2", False), + (Hash("0x1"), 1, True), + (Hash("0x1"), 2, False), + (Hash("0x1"), b"\x01", True), + (Hash("0x1"), b"\x02", False), + ("0x1", Hash("0x1"), True), + ("0x2", Hash("0x1"), False), + (1, Hash("0x1"), True), + (2, Hash("0x1"), False), + ], +) +def test_comparisons(a: Any, b: Any, equal: bool): + """ + Test the comparison methods of the base types. + """ + if equal: + assert a == b + assert not a != b + else: + assert a != b + assert not a == b diff --git a/src/ethereum_test_tools/tests/test_code.py b/src/ethereum_test_tools/tests/test_code.py index 74019f8332..03d815bc9b 100644 --- a/src/ethereum_test_tools/tests/test_code.py +++ b/src/ethereum_test_tools/tests/test_code.py @@ -8,12 +8,11 @@ import pytest from semver import Version -from ethereum_test_forks import Fork, Homestead, Shanghai, forks_from_until, get_deployed_forks -from evm_transition_tool import GethTransitionTool +from ethereum_test_forks import Fork, Homestead, Shanghai, get_deployed_forks +from evm_transition_tool import FixtureFormats, GethTransitionTool from ..code import CalldataCase, Case, Code, Conditional, Initcode, Switch, Yul -from ..common import Account, Environment, TestAddress, Transaction, to_hash_bytes -from ..filling import fill_test +from ..common import Account, Environment, Hash, TestAddress, Transaction from ..spec import StateTest from ..vm.opcode import Opcodes as Op from .conftest import SOLC_PADDING_VERSION @@ -50,14 +49,10 @@ def test_code_operations(code: Code, expected_bytes: bytes): assert bytes(code) == expected_bytes -@pytest.fixture(params=forks_from_until(get_deployed_forks()[1], get_deployed_forks()[-1])) +@pytest.fixture(params=get_deployed_forks()) def fork(request: pytest.FixtureRequest): """ Return the target evm-version (fork) for solc compilation. - - Note: - - get_deployed_forks()[1] (Homestead) is the first fork that solc supports. - - forks_from_util: Used to remove the Glacier forks """ return request.param @@ -82,7 +77,7 @@ def expected_bytes(request: pytest.FixtureRequest, solc_version: Version, fork: """Return the expected bytes for the test.""" expected_bytes = request.param if isinstance(expected_bytes, Template): - if solc_version < SOLC_PADDING_VERSION or fork == Homestead: + if solc_version < SOLC_PADDING_VERSION or fork <= Homestead: solc_padding = "" else: solc_padding = "00" @@ -90,7 +85,7 @@ def expected_bytes(request: pytest.FixtureRequest, solc_version: Version, fork: if isinstance(expected_bytes, bytes): if fork == Shanghai: expected_bytes = b"\x5f" + expected_bytes[2:] - if solc_version < SOLC_PADDING_VERSION or fork == Homestead: + if solc_version < SOLC_PADDING_VERSION or fork <= Homestead: return expected_bytes else: return expected_bytes + b"\x00" @@ -318,7 +313,7 @@ def test_opcodes_if(conditional_bytecode: bytes, expected: bytes): "tx_data,switch_bytecode,expected_storage", [ pytest.param( - to_hash_bytes(1), + Hash(1), Switch( cases=[ Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1)), @@ -330,7 +325,7 @@ def test_opcodes_if(conditional_bytecode: bytes, expected: bytes): id="no-default-action-condition-met", ), pytest.param( - to_hash_bytes(1), + Hash(1), Switch( cases=[ CalldataCase(value=1, action=Op.SSTORE(0, 1)), @@ -342,7 +337,7 @@ def test_opcodes_if(conditional_bytecode: bytes, expected: bytes): id="no-default-action-condition-met-calldata", ), pytest.param( - to_hash_bytes(0), + Hash(0), Switch( cases=[ Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1)), @@ -354,7 +349,7 @@ def test_opcodes_if(conditional_bytecode: bytes, expected: bytes): id="no-default-action-no-condition-met", ), pytest.param( - to_hash_bytes(1), + Hash(1), Switch( cases=[], default_action=Op.SSTORE(0, 3), @@ -363,7 +358,7 @@ def test_opcodes_if(conditional_bytecode: bytes, expected: bytes): id="no-cases", ), pytest.param( - to_hash_bytes(1), + Hash(1), Switch( cases=[Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1))], default_action=Op.SSTORE(0, 3), @@ -372,7 +367,7 @@ def test_opcodes_if(conditional_bytecode: bytes, expected: bytes): id="one-case-condition-met", ), pytest.param( - to_hash_bytes(0), + Hash(0), Switch( cases=[Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1))], default_action=Op.SSTORE(0, 3), @@ -381,7 +376,7 @@ def test_opcodes_if(conditional_bytecode: bytes, expected: bytes): id="one-case-condition-not-met", ), pytest.param( - to_hash_bytes(0), + Hash(0), Switch( cases=[ Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1)), @@ -393,7 +388,7 @@ def test_opcodes_if(conditional_bytecode: bytes, expected: bytes): id="two-cases-no-condition-met", ), pytest.param( - to_hash_bytes(1), + Hash(1), Switch( cases=[ Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1)), @@ -405,7 +400,7 @@ def test_opcodes_if(conditional_bytecode: bytes, expected: bytes): id="two-cases-first-condition-met", ), pytest.param( - to_hash_bytes(2), + Hash(2), Switch( cases=[ Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1)), @@ -417,7 +412,7 @@ def test_opcodes_if(conditional_bytecode: bytes, expected: bytes): id="two-cases-second-condition-met", ), pytest.param( - to_hash_bytes(1), + Hash(1), Switch( cases=[ Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1)), @@ -432,7 +427,7 @@ def test_opcodes_if(conditional_bytecode: bytes, expected: bytes): id="five-cases-first-condition-met", ), pytest.param( - to_hash_bytes(1), + Hash(1), Switch( cases=[ CalldataCase(value=1, action=Op.SSTORE(0, 1)), @@ -447,7 +442,7 @@ def test_opcodes_if(conditional_bytecode: bytes, expected: bytes): id="five-cases-first-condition-met-calldata", ), pytest.param( - to_hash_bytes(3), + Hash(3), Switch( cases=[ Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1)), @@ -462,7 +457,7 @@ def test_opcodes_if(conditional_bytecode: bytes, expected: bytes): id="five-cases-third-condition-met", ), pytest.param( - to_hash_bytes(3), + Hash(3), Switch( cases=[ CalldataCase(value=1, action=Op.SSTORE(0, 1)), @@ -477,7 +472,7 @@ def test_opcodes_if(conditional_bytecode: bytes, expected: bytes): id="five-cases-third-condition-met-calldata", ), pytest.param( - to_hash_bytes(5), + Hash(5), Switch( cases=[ Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1)), @@ -492,7 +487,7 @@ def test_opcodes_if(conditional_bytecode: bytes, expected: bytes): id="five-cases-last-met", ), pytest.param( - to_hash_bytes(3), + Hash(3), Switch( cases=[ Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1)), @@ -507,7 +502,7 @@ def test_opcodes_if(conditional_bytecode: bytes, expected: bytes): id="five-cases-multiple-conditions-met", # first in list should be evaluated ), pytest.param( - to_hash_bytes(9), + Hash(9), Switch( cases=[ Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1)), @@ -522,7 +517,7 @@ def test_opcodes_if(conditional_bytecode: bytes, expected: bytes): id="five-cases-no-condition-met", ), pytest.param( - to_hash_bytes(0), + Hash(0), Switch( cases=[ Case(condition=Op.EQ(1, 2), action=Op.SSTORE(0, 1)), @@ -537,7 +532,7 @@ def test_opcodes_if(conditional_bytecode: bytes, expected: bytes): id="no-calldataload-condition-met", ), pytest.param( - to_hash_bytes(0), + Hash(0), Switch( cases=[ Case(condition=Op.EQ(1, 2), action=Op.SSTORE(0, 1)), @@ -555,7 +550,7 @@ def test_opcodes_if(conditional_bytecode: bytes, expected: bytes): id="no-calldataload-condition-met-different-length-actions", ), pytest.param( - to_hash_bytes(0), + Hash(0), Switch( cases=[ Case( @@ -585,7 +580,7 @@ def test_opcodes_if(conditional_bytecode: bytes, expected: bytes): id="different-length-conditions-condition-met-different-length-actions", ), pytest.param( - to_hash_bytes(0), + Hash(0), Op.SSTORE(0x10, 1) + Switch( cases=[ @@ -617,7 +612,7 @@ def test_opcodes_if(conditional_bytecode: bytes, expected: bytes): id="nested-within-bytecode", ), pytest.param( - to_hash_bytes(1), + Hash(1), Switch( cases=[Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1))], default_action=Op.PUSH32(2**256 - 1) * 8, @@ -626,7 +621,7 @@ def test_opcodes_if(conditional_bytecode: bytes, expected: bytes): id="jumpi-larger-than-1-byte", ), pytest.param( - to_hash_bytes(1), + Hash(1), Switch( cases=[Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1))], default_action=Op.PUSH32(2**256 - 1) * 2048, @@ -645,12 +640,17 @@ def test_switch(tx_data: bytes, switch_bytecode: bytes, expected_storage: Mappin TestAddress: Account(balance=10_000_000, nonce=0), code_address: Account(code=switch_bytecode), } - txs = [Transaction(to=code_address, data=tx_data, gas_limit=1_000_000)] + tx = Transaction(to=code_address, data=tx_data, gas_limit=1_000_000) post = {TestAddress: Account(nonce=1), code_address: Account(storage=expected_storage)} - state_test = StateTest(env=Environment(), pre=pre, txs=txs, post=post) - fill_test( + state_test = StateTest( + env=Environment(), + pre=pre, + tx=tx, + post=post, + fixture_format=FixtureFormats.BLOCKCHAIN_TEST, + ) + state_test.generate( t8n=GethTransitionTool(), - test_spec=state_test, fork=Shanghai, - spec=None, + eips=None, ) diff --git a/src/ethereum_test_tools/tests/test_exceptions.py b/src/ethereum_test_tools/tests/test_exceptions.py new file mode 100644 index 0000000000..558d8f7f07 --- /dev/null +++ b/src/ethereum_test_tools/tests/test_exceptions.py @@ -0,0 +1,57 @@ +""" +Test suite for ethereum_test_tools.exceptions +""" + +from ..exceptions import BlockException, TransactionException + + +def test_exceptions_string_conversion(): + """ + Test that the exceptions are unique and have the correct string representation. + """ + assert ( + str(TransactionException.INSUFFICIENT_ACCOUNT_FUNDS) + == "TransactionException.INSUFFICIENT_ACCOUNT_FUNDS" + ) + assert str(BlockException.INCORRECT_BLOB_GAS_USED) == "BlockException.INCORRECT_BLOB_GAS_USED" + + +def test_exceptions_or(): + """ + Test that the exceptions can be combined using the | operator. + """ + assert ( + str( + BlockException.INCORRECT_BLOB_GAS_USED + | TransactionException.INSUFFICIENT_ACCOUNT_FUNDS + ) + == "BlockException.INCORRECT_BLOB_GAS_USED|TransactionException.INSUFFICIENT_ACCOUNT_FUNDS" + ) + + assert ( + str( + BlockException.INCORRECT_BLOB_GAS_USED + | TransactionException.INSUFFICIENT_ACCOUNT_FUNDS + | TransactionException.INITCODE_SIZE_EXCEEDED + ) + == "BlockException.INCORRECT_BLOB_GAS_USED" + "|TransactionException.INSUFFICIENT_ACCOUNT_FUNDS" + "|TransactionException.INITCODE_SIZE_EXCEEDED" + ) + + assert ( + str( + TransactionException.INSUFFICIENT_ACCOUNT_FUNDS + | BlockException.INCORRECT_BLOB_GAS_USED + ) + == "TransactionException.INSUFFICIENT_ACCOUNT_FUNDS|BlockException.INCORRECT_BLOB_GAS_USED" + ) + + assert ( + str( + TransactionException.INSUFFICIENT_ACCOUNT_FUNDS + | BlockException.INCORRECT_BLOB_GAS_USED + | BlockException.INCORRECT_BLOB_GAS_USED + ) + == "TransactionException.INSUFFICIENT_ACCOUNT_FUNDS|BlockException.INCORRECT_BLOB_GAS_USED" + ) diff --git a/src/ethereum_test_tools/tests/test_filler.py b/src/ethereum_test_tools/tests/test_filler.py deleted file mode 100644 index f98a18ef5e..0000000000 --- a/src/ethereum_test_tools/tests/test_filler.py +++ /dev/null @@ -1,814 +0,0 @@ -""" -Test suite for `ethereum_test` module. -""" - -import json -import os -from typing import Any, Dict, List - -import pytest -from semver import Version - -from ethereum_test_forks import Berlin, Fork, Istanbul, London, Merge, Shanghai -from evm_transition_tool import GethTransitionTool - -from ..code import Yul -from ..common import Account, Block, Environment, TestAddress, Transaction, to_json -from ..filling import fill_test -from ..spec import BaseTestConfig, BlockchainTest, StateTest -from .conftest import SOLC_PADDING_VERSION - - -def remove_info(fixture_json: Dict[str, Any]): # noqa: D103 - for t in fixture_json: - if "_info" in fixture_json[t]: - del fixture_json[t]["_info"] - - -@pytest.fixture() -def hash(request: pytest.FixtureRequest, solc_version: Version): - """ - Set the hash based on the fork and solc version. - """ - if solc_version == Version.parse("0.8.20"): - if request.node.funcargs["fork"] == Berlin: - return bytes.fromhex("193e550de3") - elif request.node.funcargs["fork"] == London: - return bytes.fromhex("b053deac0e") - else: - if request.node.funcargs["fork"] == Berlin: - return bytes.fromhex("f3a35d34f6") - elif request.node.funcargs["fork"] == London: - return bytes.fromhex("c5fa75d7f6") - - -@pytest.mark.parametrize( - "fork,hash", - [ - (Berlin, "set using indirect & hash fixture"), - (London, "set using indirect & hash fixture"), - ], - indirect=["hash"], -) -def test_make_genesis(fork: Fork, hash: bytes): # noqa: D103 - env = Environment() - - pre = { - "0x1000000000000000000000000000000000000000": Account( - balance=0x0BA1A9CE0BA1A9CE, - code=Yul( - """ - { - function f(a, b) -> c { - c := add(a, b) - } - - sstore(0, f(1, 2)) - return(0, 32) - } - """, - fork=fork, - ), - ), - TestAddress: Account(balance=0x0BA1A9CE0BA1A9CE), - } - - t8n = GethTransitionTool() - - _, _, genesis = StateTest( - env=env, pre=pre, post={}, txs=[], tag="some_state_test" - ).make_genesis( - t8n, - fork, - ) - assert genesis.hash is not None - assert genesis.hash.startswith(hash) - - -@pytest.mark.parametrize( - "fork,enable_hive,expected_json_file", - [ - (Istanbul, False, "chainid_istanbul_filled.json"), - (London, False, "chainid_london_filled.json"), - (Merge, True, "chainid_merge_filled_hive.json"), - (Shanghai, True, "chainid_shanghai_filled_hive.json"), - ], -) -def test_fill_state_test(fork: Fork, expected_json_file: str, enable_hive: bool): - """ - Test `ethereum_test.filler.fill_fixtures` with `StateTest`. - """ - env = Environment( - coinbase="0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", - difficulty=0x20000, - gas_limit=10000000000, - number=1, - timestamp=1000, - ) - - pre = { - 0x1000000000000000000000000000000000000000: Account(code="0x4660015500"), - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": Account(balance=1000000000000000000000), - } - - tx = Transaction( - ty=0x0, - chain_id=0x0, - nonce=0, - to="0x1000000000000000000000000000000000000000", - gas_limit=100000000, - gas_price=10, - protected=False, - ) - - post = { - "0x1000000000000000000000000000000000000000": Account( - code="0x4660015500", storage={"0x01": "0x01"} - ), - } - - state_test = StateTest( - env=env, - pre=pre, - post=post, - txs=[tx], - tag="my_chain_id_test", - base_test_config=BaseTestConfig(enable_hive=enable_hive), - ) - - t8n = GethTransitionTool() - - fixture = { - f"000/my_chain_id_test/{fork}": fill_test( - t8n=t8n, - test_spec=state_test, - fork=fork, - spec=None, - ), - } - - with open( - os.path.join( - "src", - "ethereum_test_tools", - "tests", - "test_fixtures", - expected_json_file, - ) - ) as f: - expected = json.load(f) - - fixture_json = to_json(fixture) - remove_info(fixture_json) - assert fixture_json == expected - - -@pytest.mark.parametrize( - "fork,enable_hive,expected_json_file", - [ - (London, False, "blockchain_london_valid_filled.json"), - (Shanghai, True, "blockchain_shanghai_valid_filled_hive.json"), - ], -) -def test_fill_blockchain_valid_txs( - fork: Fork, solc_version: str, enable_hive: bool, expected_json_file: str -): - """ - Test `ethereum_test.filler.fill_fixtures` with `BlockchainTest`. - """ - pre = { - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": Account(balance=0x1000000000000000000), - "0xd02d72E067e77158444ef2020Ff2d325f929B363": Account( - balance=0x1000000000000000000, nonce=1 - ), - "0xcccccccccccccccccccccccccccccccccccccccc": Account( - balance=0x10000000000, - nonce=1, - code=Yul( - """ - { - sstore(number(), basefee()) - sstore(add(number(), 0x1000), sub(gasprice(), basefee())) - sstore(add(number(), 0x2000), selfbalance()) - stop() - } - """, - fork=fork, - ), - ), - "0xcccccccccccccccccccccccccccccccccccccccd": Account( - balance=0x20000000000, - nonce=1, - code=Yul( - """ - { - let throwMe := delegatecall(gas(), - 0xcccccccccccccccccccccccccccccccccccccccc, - 0, 0, 0, 0) - } - """, - fork=fork, - ), - ), - 0xC0DE: Account( - balance=0, - nonce=1, - code=Yul( - """ - { - let throwMe := delegatecall(gas(), - 0xcccccccccccccccccccccccccccccccccccccccc, - 0, 0, 0, 0) - } - """, - fork=fork, - ), - ), - "0xccccccccccccccccccccccccccccccccccccccce": Account( - balance=0x20000000000, - nonce=1, - code=Yul( - """ - { - let throwMe := call(gas(), 0xC0DE, 0x1000, - 0, 0, 0, 0) - throwMe := delegatecall(gas(), - 0xcccccccccccccccccccccccccccccccccccccccc, - 0, 0, 0, 0) - } - """, - fork=fork, - ), - ), - } - - blocks: List[Block] = [ - Block( - coinbase="0xba5e000000000000000000000000000000000000", - txs=[ - Transaction( - data="0x01", - nonce=0, - gas_limit=1000000, - max_priority_fee_per_gas=1, - max_fee_per_gas=1000, - to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", - ), - ], - ), - Block( - coinbase="0xba5e000000000000000000000000000000000000", - txs=[ - Transaction( - data="0x0201", - nonce=1, - gas_limit=1000000, - max_priority_fee_per_gas=10, - max_fee_per_gas=1000, - to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", - ), - Transaction( - data="0x0202", - nonce=2, - gas_limit=1000000, - max_priority_fee_per_gas=100, - max_fee_per_gas=1000, - to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD", - ), - Transaction( - data="0x0203", - nonce=3, - gas_limit=1000000, - max_priority_fee_per_gas=100, - max_fee_per_gas=1000, - to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCE", - ), - ], - ), - Block( - coinbase="0xba5e000000000000000000000000000000000000", - txs=[ - Transaction( - data="0x0301", - nonce=4, - gas_limit=1000000, - max_priority_fee_per_gas=1000, - max_fee_per_gas=1000, - to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", - ), - Transaction( - data="0x0303", - nonce=5, - gas_limit=1000000, - max_priority_fee_per_gas=100, - max_fee_per_gas=1000, - to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCE", - ), - Transaction( - data="0x0304", - nonce=6, - gas_limit=1000000, - max_priority_fee_per_gas=100000, - max_fee_per_gas=100000, - to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD", - ), - ], - ), - Block( - coinbase="0xba5e000000000000000000000000000000000000", - txs=[ - Transaction( - data="0x0401", - nonce=7, - gas_limit=1000000, - max_priority_fee_per_gas=1000, - max_fee_per_gas=1000, - to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", - ), - Transaction( - data="0x0403", - nonce=8, - gas_limit=1000000, - max_priority_fee_per_gas=100, - max_fee_per_gas=1000, - to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCE", - ), - Transaction( - data="0x0404", - nonce=9, - gas_limit=1000000, - max_priority_fee_per_gas=100000, - max_fee_per_gas=100000, - to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD", - ), - ], - ), - ] - - post = { - "0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC": Account( - storage={ - # BASEFEE and the tip in block 1 - 0x0001: 875, # BASEFEE - 0x1001: 1, # tip - # Block 2 - 0x0002: 766, # BASEFEE - 0x1002: 10, # tip - # Block 3 - 0x0003: 671, - 0x1003: 329, - # Block 4 - 0x0004: 588, - 0x1004: 412, - # SELFBALANCE, always the same - 0x2001: 0x010000000000, - 0x2002: 0x010000000000, - 0x2003: 0x010000000000, - 0x2004: 0x010000000000, - } - ), - "0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD": Account( - storage={ - # Block 2 - 0x0002: 766, # BASEFEE - 0x1002: 100, # tip - # Block 3 - 0x0003: 671, - 0x1003: 99329, - # Block 4 - 0x0004: 588, - 0x1004: 99412, - # SELFBALANCE, always the same - 0x2002: 0x020000000000, - 0x2003: 0x020000000000, - 0x2004: 0x020000000000, - } - ), - "0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCE": Account( - storage={ - # Block 2 - 0x0002: 766, # BASEFEE - 0x1002: 100, # tip - 0x0003: 671, - 0x1003: 100, - 0x0004: 588, - 0x1004: 100, - # SELFBALANCE - 0x2002: 0x01FFFFFFF000, - 0x2003: 0x01FFFFFFE000, - 0x2004: 0x01FFFFFFD000, - } - ), - 0xC0DE: Account( - storage={ - # Block 2 - 0x0002: 766, - 0x1002: 100, - # Block 3 - 0x0003: 671, - 0x1003: 100, - # Block 4 - 0x0004: 588, - 0x1004: 100, - # SELFBALANCE - 0x2002: 0x1000, - 0x2003: 0x2000, - 0x2004: 0x3000, - } - ), - } - - # We start genesis with a baseFee of 1000 - genesis_environment = Environment( - base_fee=1000, - coinbase="0xba5e000000000000000000000000000000000000", - ) - - blockchain_test = BlockchainTest( - pre=pre, - post=post, - blocks=blocks, - genesis_environment=genesis_environment, - tag="my_blockchain_test_valid_txs", - base_test_config=BaseTestConfig(enable_hive=enable_hive), - ) - - t8n = GethTransitionTool() - - fixture = { - f"000/my_blockchain_test/{fork.name()}": fill_test( - t8n=t8n, - test_spec=blockchain_test, - fork=fork, - spec=None, - ) - } - - with open( - os.path.join( - "src", - "ethereum_test_tools", - "tests", - "test_fixtures", - expected_json_file, - ) - ) as f: - expected = json.load(f) - - fixture_json = to_json(fixture) - remove_info(fixture_json) - - if solc_version >= SOLC_PADDING_VERSION: - expected = expected["solc=padding_version"] - else: - expected = expected[f"solc={solc_version}"] - - assert fixture_json == expected - - -@pytest.mark.parametrize( - "fork,enable_hive,expected_json_file", - [ - (London, False, "blockchain_london_invalid_filled.json"), - (Shanghai, True, "blockchain_shanghai_invalid_filled_hive.json"), - ], -) -def test_fill_blockchain_invalid_txs( - fork: Fork, solc_version: str, enable_hive: bool, expected_json_file: str -): - """ - Test `ethereum_test.filler.fill_fixtures` with `BlockchainTest`. - """ - pre = { - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": Account(balance=0x1000000000000000000), - "0xd02d72E067e77158444ef2020Ff2d325f929B363": Account( - balance=0x1000000000000000000, nonce=1 - ), - "0xcccccccccccccccccccccccccccccccccccccccc": Account( - balance=0x10000000000, - nonce=1, - code=Yul( - """ - { - sstore(number(), basefee()) - sstore(add(number(), 0x1000), sub(gasprice(), basefee())) - sstore(add(number(), 0x2000), selfbalance()) - stop() - } - """, - fork=fork, - ), - ), - "0xcccccccccccccccccccccccccccccccccccccccd": Account( - balance=0x20000000000, - nonce=1, - code=Yul( - """ - { - let throwMe := delegatecall(gas(), - 0xcccccccccccccccccccccccccccccccccccccccc, - 0, 0, 0, 0) - } - """, - fork=fork, - ), - ), - 0xC0DE: Account( - balance=0, - nonce=1, - code=Yul( - """ - { - let throwMe := delegatecall(gas(), - 0xcccccccccccccccccccccccccccccccccccccccc, - 0, 0, 0, 0) - } - """, - fork=fork, - ), - ), - "0xccccccccccccccccccccccccccccccccccccccce": Account( - balance=0x20000000000, - nonce=1, - code=Yul( - """ - { - let throwMe := call(gas(), 0xC0DE, 0x1000, - 0, 0, 0, 0) - throwMe := delegatecall(gas(), - 0xcccccccccccccccccccccccccccccccccccccccc, - 0, 0, 0, 0) - } - """, - fork=fork, - ), - ), - } - - blocks: List[Block] = [ - Block( - coinbase="0xba5e000000000000000000000000000000000000", - txs=[ - Transaction( - data="0x01", - nonce=0, - gas_limit=1000000, - max_priority_fee_per_gas=1, - max_fee_per_gas=1000, - to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", - ), - ], - ), - Block( - coinbase="0xba5e000000000000000000000000000000000000", - txs=[ - Transaction( - data="0x0201", - nonce=1, - gas_limit=1000000, - max_priority_fee_per_gas=10, - max_fee_per_gas=1000, - to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", - ), - Transaction( - data="0x0202", - nonce=2, - gas_limit=1000000, - max_priority_fee_per_gas=100, - max_fee_per_gas=1000, - to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD", - ), - Transaction( - data="0x0203", - nonce=3, - gas_limit=1000000, - max_priority_fee_per_gas=100, - max_fee_per_gas=1000, - to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCE", - ), - ], - ), - Block( - coinbase="0xba5e000000000000000000000000000000000000", - txs=[ - Transaction( - data="0x0301", - nonce=4, - gas_limit=1000000, - max_priority_fee_per_gas=1000, - max_fee_per_gas=1000, - to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", - ), - Transaction( - data="0x0302", - nonce=5, - gas_limit=1000000, - max_priority_fee_per_gas=100000, - max_fee_per_gas=1000, - to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD", - error="TR_TipGtFeeCap", - ), - ], - exception="invalid transaction", - ), - Block( - coinbase="0xba5e000000000000000000000000000000000000", - txs=[ - Transaction( - data="0x0301", - nonce=4, - gas_limit=1000000, - max_priority_fee_per_gas=1000, - max_fee_per_gas=1000, - to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", - ), - Transaction( - data="0x0303", - nonce=5, - gas_limit=1000000, - max_priority_fee_per_gas=100, - max_fee_per_gas=1000, - to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCE", - ), - Transaction( - data="0x0304", - nonce=6, - gas_limit=1000000, - max_priority_fee_per_gas=100000, - max_fee_per_gas=100000, - to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD", - ), - ], - ), - Block( - coinbase="0xba5e000000000000000000000000000000000000", - txs=[ - Transaction( - data="0x0401", - nonce=7, - gas_limit=1000000, - max_priority_fee_per_gas=1000, - max_fee_per_gas=1000, - to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", - ), - Transaction( - data="0x0402", - nonce=8, - gas_limit=1000000, - max_priority_fee_per_gas=100000, - max_fee_per_gas=1000, - to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD", - error="TR_TipGtFeeCap", - ), - ], - exception="invalid transaction", - ), - Block( - coinbase="0xba5e000000000000000000000000000000000000", - txs=[ - Transaction( - data="0x0401", - nonce=7, - gas_limit=1000000, - max_priority_fee_per_gas=1000, - max_fee_per_gas=1000, - to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", - ), - Transaction( - data="0x0403", - nonce=8, - gas_limit=1000000, - max_priority_fee_per_gas=100, - max_fee_per_gas=1000, - to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCE", - ), - Transaction( - data="0x0404", - nonce=9, - gas_limit=1000000, - max_priority_fee_per_gas=100000, - max_fee_per_gas=100000, - to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD", - ), - ], - ), - ] - - post = { - "0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC": Account( - storage={ - # BASEFEE and the tip in block 1 - 0x0001: 875, # BASEFEE - 0x1001: 1, # tip - # Block 2 - 0x0002: 766, # BASEFEE - 0x1002: 10, # tip - # Block 3 - 0x0003: 671, - 0x1003: 329, - # Block 4 - 0x0004: 588, - 0x1004: 412, - # SELFBALANCE, always the same - 0x2001: 0x010000000000, - 0x2002: 0x010000000000, - 0x2003: 0x010000000000, - 0x2004: 0x010000000000, - } - ), - "0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD": Account( - storage={ - # Block 2 - 0x0002: 766, # BASEFEE - 0x1002: 100, # tip - # Block 3 - 0x0003: 671, - 0x1003: 99329, - # Block 4 - 0x0004: 588, - 0x1004: 99412, - # SELFBALANCE, always the same - 0x2002: 0x020000000000, - 0x2003: 0x020000000000, - 0x2004: 0x020000000000, - } - ), - "0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCE": Account( - storage={ - # Block 2 - 0x0002: 766, # BASEFEE - 0x1002: 100, # tip - 0x0003: 671, - 0x1003: 100, - 0x0004: 588, - 0x1004: 100, - # SELFBALANCE - 0x2002: 0x01FFFFFFF000, - 0x2003: 0x01FFFFFFE000, - 0x2004: 0x01FFFFFFD000, - } - ), - 0xC0DE: Account( - storage={ - # Block 2 - 0x0002: 766, - 0x1002: 100, - # Block 3 - 0x0003: 671, - 0x1003: 100, - # Block 4 - 0x0004: 588, - 0x1004: 100, - # SELFBALANCE - 0x2002: 0x1000, - 0x2003: 0x2000, - 0x2004: 0x3000, - } - ), - } - - # We start genesis with a baseFee of 1000 - genesis_environment = Environment( - base_fee=1000, - coinbase="0xba5e000000000000000000000000000000000000", - ) - - blockchain_test = BlockchainTest( - pre=pre, - post=post, - blocks=blocks, - genesis_environment=genesis_environment, - base_test_config=BaseTestConfig(enable_hive=enable_hive), - ) - - t8n = GethTransitionTool() - - fixture = { - f"000/my_blockchain_test/{fork.name()}": fill_test( - t8n=t8n, - test_spec=blockchain_test, - fork=fork, - spec=None, - ) - } - - with open( - os.path.join( - "src", - "ethereum_test_tools", - "tests", - "test_fixtures", - expected_json_file, - ) - ) as f: - expected = json.load(f) - - fixture_json = to_json(fixture) - remove_info(fixture_json) - - if solc_version >= SOLC_PADDING_VERSION: - expected = expected["solc=padding_version"] - else: - expected = expected[f"solc={solc_version}"] - - assert fixture_json == expected diff --git a/src/ethereum_test_tools/tests/test_filling/__init__.py b/src/ethereum_test_tools/tests/test_filling/__init__.py new file mode 100644 index 0000000000..7ae47ad3a8 --- /dev/null +++ b/src/ethereum_test_tools/tests/test_filling/__init__.py @@ -0,0 +1,3 @@ +""" +`ethereum_test_tools.filling` verification tests. +""" diff --git a/src/ethereum_test_tools/tests/test_fixtures/blockchain_london_invalid_filled.json b/src/ethereum_test_tools/tests/test_filling/fixtures/blockchain_london_invalid_filled.json similarity index 97% rename from src/ethereum_test_tools/tests/test_fixtures/blockchain_london_invalid_filled.json rename to src/ethereum_test_tools/tests/test_filling/fixtures/blockchain_london_invalid_filled.json index a5b152ef9f..228f06c6b4 100644 --- a/src/ethereum_test_tools/tests/test_fixtures/blockchain_london_invalid_filled.json +++ b/src/ethereum_test_tools/tests/test_filling/fixtures/blockchain_london_invalid_filled.json @@ -1,6 +1,9 @@ { "solc=0.8.20": { "000/my_blockchain_test/London": { + "_info": { + "hash": "0x2c3bb32517b56a755b7433abd36d4a6b761c1d79d9fbed7dac4ea532eb739c11" + }, "network": "London", "genesisRLP": "0xf90200f901fba00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a089a5be1d3306f6f05b42678ef13ac3dbc37bef9a2a80862c21eb22eee29194c2a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200008088016345785d8a0000808000a000000000000000000000000000000000000000000000000000000000000000008800000000000000008203e8c0c0", "genesisBlockHeader": { @@ -140,15 +143,15 @@ "uncleHeaders": [] }, { - "rlp": "0xf902e1f901fea00e043cb2eb0339900f6199c0ab517e5be3a81d898fa58078ed8b866ddc60b010a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a069f3a735c7a7e1ea24a03a7107eba6a880d2d0251aaf24eaa7f109ece7969bf9a0ab28cd18f912c2177d3f787591ccc9ba7742c877cdeabe0098e7263ead8893c1a0976beb67b634171d419ef326220dfdda98074e3495940240a105e17643f0a4efb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000388016345785d8a0000830155442480a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082029ff8ddb86c02f86901048203e88203e8830f424094cccccccccccccccccccccccccccccccccccccccc80820301c001a0720e2870881f8b0e285b7ec02c169f1165847bcb5f36ea5f33f3db6079854f63a04448266b715d7d99acd1e31dcab50d7119faa620d44c69b3f64f97d636634169b86d02f86a0105830186a08203e8830f424094cccccccccccccccccccccccccccccccccccccccd80820302c080a06c7fb2be7e001a210d72480522b9ebecade52d721360ce5242e34a6c05a02715a01220e3cb7418cd6294443b38d05f5ed9f2967b182d25c784e11e7863454b8f9bc0", - "expectException": "invalid transaction", + "rlp": "0xf902e1f901fea00e043cb2eb0339900f6199c0ab517e5be3a81d898fa58078ed8b866ddc60b010a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a069f3a735c7a7e1ea24a03a7107eba6a880d2d0251aaf24eaa7f109ece7969bf9a07c6d7fe1d1734fca072880e563f763405dc362222d37487cb098a006f7db3b2ca0976beb67b634171d419ef326220dfdda98074e3495940240a105e17643f0a4efb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000388016345785d8a0000830155442480a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082029ff8ddb86c02f86901048203e88203e8830f424094cccccccccccccccccccccccccccccccccccccccc80820301c001a0720e2870881f8b0e285b7ec02c169f1165847bcb5f36ea5f33f3db6079854f63a04448266b715d7d99acd1e31dcab50d7119faa620d44c69b3f64f97d636634169b86d02f86a0105830186a08203e8830f424094cccccccccccccccccccccccccccccccccccccccd80820302c080a06c7fb2be7e001a210d72480522b9ebecade52d721360ce5242e34a6c05a02715a01220e3cb7418cd6294443b38d05f5ed9f2967b182d25c784e11e7863454b8f9bc0", + "expectException": "TransactionException.PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS", "rlp_decoded": { "blockHeader": { "parentHash": "0x0e043cb2eb0339900f6199c0ab517e5be3a81d898fa58078ed8b866ddc60b010", "uncleHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", "coinbase": "0xba5e000000000000000000000000000000000000", "stateRoot": "0x69f3a735c7a7e1ea24a03a7107eba6a880d2d0251aaf24eaa7f109ece7969bf9", - "transactionsTrie": "0xab28cd18f912c2177d3f787591ccc9ba7742c877cdeabe0098e7263ead8893c1", + "transactionsTrie": "0x7c6d7fe1d1734fca072880e563f763405dc362222d37487cb098a006f7db3b2c", "receiptTrie": "0x976beb67b634171d419ef326220dfdda98074e3495940240a105e17643f0a4ef", "bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "difficulty": "0x020000", @@ -160,8 +163,9 @@ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "nonce": "0x0000000000000000", "baseFeePerGas": "0x029f", - "hash": "0x821a3c612a905a071d07151519a2ad225f0438b4b956c46edd12b6bf50e2239c" + "hash": "0x0cb9b60de1bb3893d7b7b806562a78aca5e9fbff47bf62893a5f6c0afcc73b48" }, + "blocknumber": "3", "transactions": [ { "type": "0x02", @@ -274,15 +278,15 @@ "uncleHeaders": [] }, { - "rlp": "0xf902e1f901fea05c66e5b6d6513ec98e9d8ee88137f1a2418542550977ea02015439acd2bf8f8ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0e834ba6cd27f2702b0adf2ef6a85e2fbc340fb948c96e75b674e9a73a5dbc3d1a04ed2c2147e0a0d1c248330338f51778f350af8c209c528799278ac980786632ea0976beb67b634171d419ef326220dfdda98074e3495940240a105e17643f0a4efb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000488016345785d8a0000830155443080a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082024cf8ddb86c02f86901078203e88203e8830f424094cccccccccccccccccccccccccccccccccccccccc80820401c001a0113c54f83e1b1e5c689ba86d288ec0ce2877f350b71821c4c7a3f7073b46602ca0548848e711b86ceeb657fd0a0bf44b792f6665ed18ec8a04f498471e811f8f97b86d02f86a0108830186a08203e8830f424094cccccccccccccccccccccccccccccccccccccccd80820402c001a0ebc8ad530ec3d510998aa2485763fcd1c6958c900c8d8ae6eaf86e1eddde8b23a0341e4a021f7b77da28d853c07d11253b92331ab640ad3f28f5d7b2cdbc7ceca7c0", - "expectException": "invalid transaction", + "rlp": "0xf902e1f901fea05c66e5b6d6513ec98e9d8ee88137f1a2418542550977ea02015439acd2bf8f8ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0e834ba6cd27f2702b0adf2ef6a85e2fbc340fb948c96e75b674e9a73a5dbc3d1a04722f7b17f27aee5dfa0d92ba40e16de960374a98ec63e728acaa1564d8a54f3a0976beb67b634171d419ef326220dfdda98074e3495940240a105e17643f0a4efb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000488016345785d8a0000830155443080a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082024cf8ddb86c02f86901078203e88203e8830f424094cccccccccccccccccccccccccccccccccccccccc80820401c001a0113c54f83e1b1e5c689ba86d288ec0ce2877f350b71821c4c7a3f7073b46602ca0548848e711b86ceeb657fd0a0bf44b792f6665ed18ec8a04f498471e811f8f97b86d02f86a0108830186a08203e8830f424094cccccccccccccccccccccccccccccccccccccccd80820402c001a0ebc8ad530ec3d510998aa2485763fcd1c6958c900c8d8ae6eaf86e1eddde8b23a0341e4a021f7b77da28d853c07d11253b92331ab640ad3f28f5d7b2cdbc7ceca7c0", + "expectException": "TransactionException.PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS", "rlp_decoded": { "blockHeader": { "parentHash": "0x5c66e5b6d6513ec98e9d8ee88137f1a2418542550977ea02015439acd2bf8f8e", "uncleHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", "coinbase": "0xba5e000000000000000000000000000000000000", "stateRoot": "0xe834ba6cd27f2702b0adf2ef6a85e2fbc340fb948c96e75b674e9a73a5dbc3d1", - "transactionsTrie": "0x4ed2c2147e0a0d1c248330338f51778f350af8c209c528799278ac980786632e", + "transactionsTrie": "0x4722f7b17f27aee5dfa0d92ba40e16de960374a98ec63e728acaa1564d8a54f3", "receiptTrie": "0x976beb67b634171d419ef326220dfdda98074e3495940240a105e17643f0a4ef", "bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "difficulty": "0x020000", @@ -294,8 +298,9 @@ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "nonce": "0x0000000000000000", "baseFeePerGas": "0x024c", - "hash": "0xe0216dfed41475b9f321bcee40fca139957a9310454b868d2e5d3c9b1111e7bf" + "hash": "0x1f01f6d8ff3a461486c4c4334c94a05f114d161b1ac082c7374ad7ac51eea7f2" }, + "blocknumber": "4", "transactions": [ { "type": "0x02", @@ -539,6 +544,9 @@ }, "solc=padding_version": { "000/my_blockchain_test/London": { + "_info": { + "hash": "0x0aede6c01648a4a89633be93ad0dcc5ddb48dc27c38cddeac0629edf5ea2b7b5" + }, "network": "London", "genesisRLP": "0xf90200f901fba00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0de1557ffdf9765e61095937bf835742ca427008f33714bee743010ab2d1e0ba6a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200008088016345785d8a0000808000a000000000000000000000000000000000000000000000000000000000000000008800000000000000008203e8c0c0", "genesisBlockHeader": { @@ -678,15 +686,15 @@ "uncleHeaders": [] }, { - "rlp": "0xf902e1f901fea015676cbd68ac93fede6f8192b19868145f17d2f89e231de456925dea93664e2da01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0c12121517d65ac698ab8a67e75e208a9c11c3f02c1d380fc370375306e16971ea0ab28cd18f912c2177d3f787591ccc9ba7742c877cdeabe0098e7263ead8893c1a0976beb67b634171d419ef326220dfdda98074e3495940240a105e17643f0a4efb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000388016345785d8a0000830155442480a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082029ff8ddb86c02f86901048203e88203e8830f424094cccccccccccccccccccccccccccccccccccccccc80820301c001a0720e2870881f8b0e285b7ec02c169f1165847bcb5f36ea5f33f3db6079854f63a04448266b715d7d99acd1e31dcab50d7119faa620d44c69b3f64f97d636634169b86d02f86a0105830186a08203e8830f424094cccccccccccccccccccccccccccccccccccccccd80820302c080a06c7fb2be7e001a210d72480522b9ebecade52d721360ce5242e34a6c05a02715a01220e3cb7418cd6294443b38d05f5ed9f2967b182d25c784e11e7863454b8f9bc0", - "expectException": "invalid transaction", + "rlp": "0xf902e1f901fea015676cbd68ac93fede6f8192b19868145f17d2f89e231de456925dea93664e2da01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0c12121517d65ac698ab8a67e75e208a9c11c3f02c1d380fc370375306e16971ea07c6d7fe1d1734fca072880e563f763405dc362222d37487cb098a006f7db3b2ca0976beb67b634171d419ef326220dfdda98074e3495940240a105e17643f0a4efb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000388016345785d8a0000830155442480a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082029ff8ddb86c02f86901048203e88203e8830f424094cccccccccccccccccccccccccccccccccccccccc80820301c001a0720e2870881f8b0e285b7ec02c169f1165847bcb5f36ea5f33f3db6079854f63a04448266b715d7d99acd1e31dcab50d7119faa620d44c69b3f64f97d636634169b86d02f86a0105830186a08203e8830f424094cccccccccccccccccccccccccccccccccccccccd80820302c080a06c7fb2be7e001a210d72480522b9ebecade52d721360ce5242e34a6c05a02715a01220e3cb7418cd6294443b38d05f5ed9f2967b182d25c784e11e7863454b8f9bc0", + "expectException": "TransactionException.PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS", "rlp_decoded": { "blockHeader": { "parentHash": "0x15676cbd68ac93fede6f8192b19868145f17d2f89e231de456925dea93664e2d", "uncleHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", "coinbase": "0xba5e000000000000000000000000000000000000", "stateRoot": "0xc12121517d65ac698ab8a67e75e208a9c11c3f02c1d380fc370375306e16971e", - "transactionsTrie": "0xab28cd18f912c2177d3f787591ccc9ba7742c877cdeabe0098e7263ead8893c1", + "transactionsTrie": "0x7c6d7fe1d1734fca072880e563f763405dc362222d37487cb098a006f7db3b2c", "receiptTrie": "0x976beb67b634171d419ef326220dfdda98074e3495940240a105e17643f0a4ef", "bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "difficulty": "0x020000", @@ -698,8 +706,9 @@ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "nonce": "0x0000000000000000", "baseFeePerGas": "0x029f", - "hash": "0x915ca54d6df004476300024f553c021e3fbbb69f6c81b9a1f74b1ec211209681" + "hash": "0xf3ad606edcdfb24e7b24e32328334b3ddf5149ecd6c45ccbd4d39628a4ef2a85" }, + "blocknumber": "3", "transactions": [ { "type": "0x02", @@ -812,15 +821,15 @@ "uncleHeaders": [] }, { - "rlp": "0xf902e1f901fea00817157aaf7981caa63e995d4d45ee7e30c0b26e52fe668e1f8bcd2b457a79cea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a04a631519f4a7675eb6edb98719287ab1d1896111acd02dde544386ef63445fdaa04ed2c2147e0a0d1c248330338f51778f350af8c209c528799278ac980786632ea0976beb67b634171d419ef326220dfdda98074e3495940240a105e17643f0a4efb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000488016345785d8a0000830155443080a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082024cf8ddb86c02f86901078203e88203e8830f424094cccccccccccccccccccccccccccccccccccccccc80820401c001a0113c54f83e1b1e5c689ba86d288ec0ce2877f350b71821c4c7a3f7073b46602ca0548848e711b86ceeb657fd0a0bf44b792f6665ed18ec8a04f498471e811f8f97b86d02f86a0108830186a08203e8830f424094cccccccccccccccccccccccccccccccccccccccd80820402c001a0ebc8ad530ec3d510998aa2485763fcd1c6958c900c8d8ae6eaf86e1eddde8b23a0341e4a021f7b77da28d853c07d11253b92331ab640ad3f28f5d7b2cdbc7ceca7c0", - "expectException": "invalid transaction", + "rlp": "0xf902e1f901fea00817157aaf7981caa63e995d4d45ee7e30c0b26e52fe668e1f8bcd2b457a79cea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a04a631519f4a7675eb6edb98719287ab1d1896111acd02dde544386ef63445fdaa04722f7b17f27aee5dfa0d92ba40e16de960374a98ec63e728acaa1564d8a54f3a0976beb67b634171d419ef326220dfdda98074e3495940240a105e17643f0a4efb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000488016345785d8a0000830155443080a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082024cf8ddb86c02f86901078203e88203e8830f424094cccccccccccccccccccccccccccccccccccccccc80820401c001a0113c54f83e1b1e5c689ba86d288ec0ce2877f350b71821c4c7a3f7073b46602ca0548848e711b86ceeb657fd0a0bf44b792f6665ed18ec8a04f498471e811f8f97b86d02f86a0108830186a08203e8830f424094cccccccccccccccccccccccccccccccccccccccd80820402c001a0ebc8ad530ec3d510998aa2485763fcd1c6958c900c8d8ae6eaf86e1eddde8b23a0341e4a021f7b77da28d853c07d11253b92331ab640ad3f28f5d7b2cdbc7ceca7c0", + "expectException": "TransactionException.PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS", "rlp_decoded": { "blockHeader": { "parentHash": "0x0817157aaf7981caa63e995d4d45ee7e30c0b26e52fe668e1f8bcd2b457a79ce", "uncleHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", "coinbase": "0xba5e000000000000000000000000000000000000", "stateRoot": "0x4a631519f4a7675eb6edb98719287ab1d1896111acd02dde544386ef63445fda", - "transactionsTrie": "0x4ed2c2147e0a0d1c248330338f51778f350af8c209c528799278ac980786632e", + "transactionsTrie": "0x4722f7b17f27aee5dfa0d92ba40e16de960374a98ec63e728acaa1564d8a54f3", "receiptTrie": "0x976beb67b634171d419ef326220dfdda98074e3495940240a105e17643f0a4ef", "bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "difficulty": "0x020000", @@ -832,8 +841,9 @@ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "nonce": "0x0000000000000000", "baseFeePerGas": "0x024c", - "hash": "0x6b86f7ac310b740894a89e718891fe3169d35e5e770493fe0f788c1fa2ee7d04" + "hash": "0xc80ae3f610a2adb971179fc1e1bc120f3b38c88ff388cf059809a579be6e5f2c" }, + "blocknumber": "4", "transactions": [ { "type": "0x02", @@ -1075,4 +1085,4 @@ "sealEngine": "NoProof" } } -} +} \ No newline at end of file diff --git a/src/ethereum_test_tools/tests/test_fixtures/blockchain_london_valid_filled.json b/src/ethereum_test_tools/tests/test_filling/fixtures/blockchain_london_valid_filled.json similarity index 99% rename from src/ethereum_test_tools/tests/test_fixtures/blockchain_london_valid_filled.json rename to src/ethereum_test_tools/tests/test_filling/fixtures/blockchain_london_valid_filled.json index 8f2c0126ca..35abf6f9d1 100644 --- a/src/ethereum_test_tools/tests/test_fixtures/blockchain_london_valid_filled.json +++ b/src/ethereum_test_tools/tests/test_filling/fixtures/blockchain_london_valid_filled.json @@ -1,6 +1,9 @@ { "solc=0.8.20": { "000/my_blockchain_test/London": { + "_info": { + "hash": "0xf3750dd67158fb66466c4deb195073bb5a4621f3b3ee315c9d3fa443d7d8c445" + }, "network": "London", "genesisRLP": "0xf90200f901fba00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a089a5be1d3306f6f05b42678ef13ac3dbc37bef9a2a80862c21eb22eee29194c2a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200008088016345785d8a0000808000a000000000000000000000000000000000000000000000000000000000000000008800000000000000008203e8c0c0", "genesisBlockHeader": { @@ -419,6 +422,9 @@ }, "solc=padding_version": { "000/my_blockchain_test/London": { + "_info": { + "hash": "0xb31303cc3ecdd1cac8b0669e43f70e1f8784aa8659f457ed4e1654935c6e986b" + }, "blocks": [ { "rlp": "0xf9026ef901fea0c552af8a2644e24df2f54d14aa70f207146dda49b746cc2e0af88e185f043d2ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a06bbd44292c9016cf53472d8ef579a1805a9008b898c5f159248ed106532b667ba0586f963eea0fb4726f0f91f895f2aa5d67bffb5207a529b40d781244a0c7017ba029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000188016345785d8a0000830155340c80a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082036bf86ab86802f8650180018203e8830f424094cccccccccccccccccccccccccccccccccccccccc8001c080a03351b6993208fc7b03fd770c8c06440cfb0d75b29aafee0a4c64c8ba20a80e58a067817fdb3058e75c5d26e51a33d1e338346bc7d406e115447a4bb5f7ab01625bc0", diff --git a/src/ethereum_test_tools/tests/test_fixtures/blockchain_shanghai_invalid_filled_hive.json b/src/ethereum_test_tools/tests/test_filling/fixtures/blockchain_shanghai_invalid_filled_hive.json similarity index 97% rename from src/ethereum_test_tools/tests/test_fixtures/blockchain_shanghai_invalid_filled_hive.json rename to src/ethereum_test_tools/tests/test_filling/fixtures/blockchain_shanghai_invalid_filled_hive.json index fe81c071a0..467266edf7 100644 --- a/src/ethereum_test_tools/tests/test_fixtures/blockchain_shanghai_invalid_filled_hive.json +++ b/src/ethereum_test_tools/tests/test_filling/fixtures/blockchain_shanghai_invalid_filled_hive.json @@ -1,6 +1,9 @@ { "solc=0.8.20": { "000/my_blockchain_test/Shanghai": { + "_info": { + "hash": "0x7c6f019488aef8bebd4582f188e579bc5eb994790f2ac77dc4cef67229cf96d9" + }, "network": "Shanghai", "genesisBlockHeader": { "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", @@ -43,7 +46,6 @@ ], "withdrawals": [] }, - "valid": true, "version": "2" }, { @@ -68,7 +70,6 @@ ], "withdrawals": [] }, - "valid": true, "version": "2" }, { @@ -85,14 +86,14 @@ "extraData": "0x", "prevRandao": "0x0000000000000000000000000000000000000000000000000000000000000000", "baseFeePerGas": "0x29f", - "blockHash": "0x6504f75aa7b88dd9e059088d2db4d911934a5c0e3d076a48f6aeef9129df1472", + "blockHash": "0xeaa67ef33964d925aabc53e217e3f5f143615723970bfa07b80c46ef946ca293", "transactions": [ "0x02f86901048203e88203e8830f424094cccccccccccccccccccccccccccccccccccccccc80820301c001a0720e2870881f8b0e285b7ec02c169f1165847bcb5f36ea5f33f3db6079854f63a04448266b715d7d99acd1e31dcab50d7119faa620d44c69b3f64f97d636634169", "0x02f86a0105830186a08203e8830f424094cccccccccccccccccccccccccccccccccccccccd80820302c080a06c7fb2be7e001a210d72480522b9ebecade52d721360ce5242e34a6c05a02715a01220e3cb7418cd6294443b38d05f5ed9f2967b182d25c784e11e7863454b8f9b" ], "withdrawals": [] }, - "valid": false, + "validationError": "TransactionException.PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS", "version": "2" }, { @@ -117,7 +118,6 @@ ], "withdrawals": [] }, - "valid": true, "version": "2" }, { @@ -134,14 +134,14 @@ "extraData": "0x", "prevRandao": "0x0000000000000000000000000000000000000000000000000000000000000000", "baseFeePerGas": "0x24c", - "blockHash": "0xa8eec4a7460bdc0d813ab931562ca3a3b4e25c4482b9039003fdb293c3b05c96", + "blockHash": "0xa8b4cee5dcb437faf9d815cbe99986f9000e32cf5ea86613b944ac285cac0187", "transactions": [ "0x02f86901078203e88203e8830f424094cccccccccccccccccccccccccccccccccccccccc80820401c001a0113c54f83e1b1e5c689ba86d288ec0ce2877f350b71821c4c7a3f7073b46602ca0548848e711b86ceeb657fd0a0bf44b792f6665ed18ec8a04f498471e811f8f97", "0x02f86a0108830186a08203e8830f424094cccccccccccccccccccccccccccccccccccccccd80820402c001a0ebc8ad530ec3d510998aa2485763fcd1c6958c900c8d8ae6eaf86e1eddde8b23a0341e4a021f7b77da28d853c07d11253b92331ab640ad3f28f5d7b2cdbc7ceca7" ], "withdrawals": [] }, - "valid": false, + "validationError": "TransactionException.PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS", "version": "2" }, { @@ -166,7 +166,6 @@ ], "withdrawals": [] }, - "valid": true, "version": "2" } ], @@ -300,6 +299,9 @@ }, "solc=padding_version": { "000/my_blockchain_test/Shanghai": { + "_info": { + "hash": "0x55f9bcc040c2183e1f82e80ec5aeeff33c5e7454c05ab694e6e4153a59ea3a11" + }, "network": "Shanghai", "genesisBlockHeader": { "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", @@ -342,7 +344,6 @@ ], "withdrawals": [] }, - "valid": true, "version": "2" }, { @@ -367,7 +368,6 @@ ], "withdrawals": [] }, - "valid": true, "version": "2" }, { @@ -384,14 +384,14 @@ "extraData": "0x", "prevRandao": "0x0000000000000000000000000000000000000000000000000000000000000000", "baseFeePerGas": "0x29f", - "blockHash": "0xec8258ae1312e560e07e7d0fd208237e515c3e7d709f92fdc7a5b7316da25bdc", + "blockHash": "0x5c16738a8a828e396bc356b54716694ac63cce50e27c4cb270727af80b6a6a8a", "transactions": [ "0x02f86901048203e88203e8830f424094cccccccccccccccccccccccccccccccccccccccc80820301c001a0720e2870881f8b0e285b7ec02c169f1165847bcb5f36ea5f33f3db6079854f63a04448266b715d7d99acd1e31dcab50d7119faa620d44c69b3f64f97d636634169", "0x02f86a0105830186a08203e8830f424094cccccccccccccccccccccccccccccccccccccccd80820302c080a06c7fb2be7e001a210d72480522b9ebecade52d721360ce5242e34a6c05a02715a01220e3cb7418cd6294443b38d05f5ed9f2967b182d25c784e11e7863454b8f9b" ], "withdrawals": [] }, - "valid": false, + "validationError": "TransactionException.PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS", "version": "2" }, { @@ -416,7 +416,6 @@ ], "withdrawals": [] }, - "valid": true, "version": "2" }, { @@ -433,14 +432,14 @@ "extraData": "0x", "prevRandao": "0x0000000000000000000000000000000000000000000000000000000000000000", "baseFeePerGas": "0x24c", - "blockHash": "0x30c73027954c5b8e8d633775e6cf4f1362fb15bf7d41be2424d757d2cc9d5219", + "blockHash": "0x1498909af4d98e8ad23020f9a8055ce5ec7cc6264ca84c710a8bc2a93cffeffc", "transactions": [ "0x02f86901078203e88203e8830f424094cccccccccccccccccccccccccccccccccccccccc80820401c001a0113c54f83e1b1e5c689ba86d288ec0ce2877f350b71821c4c7a3f7073b46602ca0548848e711b86ceeb657fd0a0bf44b792f6665ed18ec8a04f498471e811f8f97", "0x02f86a0108830186a08203e8830f424094cccccccccccccccccccccccccccccccccccccccd80820402c001a0ebc8ad530ec3d510998aa2485763fcd1c6958c900c8d8ae6eaf86e1eddde8b23a0341e4a021f7b77da28d853c07d11253b92331ab640ad3f28f5d7b2cdbc7ceca7" ], "withdrawals": [] }, - "valid": false, + "validationError": "TransactionException.PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS", "version": "2" }, { @@ -465,7 +464,6 @@ ], "withdrawals": [] }, - "valid": true, "version": "2" } ], @@ -597,4 +595,4 @@ } } } -} +} \ No newline at end of file diff --git a/src/ethereum_test_tools/tests/test_fixtures/blockchain_shanghai_valid_filled_hive.json b/src/ethereum_test_tools/tests/test_filling/fixtures/blockchain_shanghai_valid_filled_hive.json similarity index 99% rename from src/ethereum_test_tools/tests/test_fixtures/blockchain_shanghai_valid_filled_hive.json rename to src/ethereum_test_tools/tests/test_filling/fixtures/blockchain_shanghai_valid_filled_hive.json index b66dbbc879..37d2265f6a 100644 --- a/src/ethereum_test_tools/tests/test_fixtures/blockchain_shanghai_valid_filled_hive.json +++ b/src/ethereum_test_tools/tests/test_filling/fixtures/blockchain_shanghai_valid_filled_hive.json @@ -1,6 +1,9 @@ { "solc=0.8.20": { "000/my_blockchain_test/Shanghai": { + "_info": { + "hash": "0x65ff879b8bfe661b2ab4cfbc45bb28d9a07110455eff98972dfba3988632cff5" + }, "network": "Shanghai", "genesisBlockHeader": { "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", @@ -43,7 +46,6 @@ ], "withdrawals": [] }, - "valid": true, "version": "2" }, { @@ -68,7 +70,6 @@ ], "withdrawals": [] }, - "valid": true, "version": "2" }, { @@ -93,7 +94,6 @@ ], "withdrawals": [] }, - "valid": true, "version": "2" }, { @@ -118,7 +118,6 @@ ], "withdrawals": [] }, - "valid": true, "version": "2" } ], @@ -252,6 +251,9 @@ }, "solc=padding_version": { "000/my_blockchain_test/Shanghai": { + "_info": { + "hash": "0xeb5c65b76f939bbd0b89d2f8b4cc075bb2c3ac6f00b07c67f8f77af160444470" + }, "network": "Shanghai", "genesisBlockHeader": { "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", @@ -294,7 +296,6 @@ ], "withdrawals": [] }, - "valid": true, "version": "2" }, { @@ -319,7 +320,6 @@ ], "withdrawals": [] }, - "valid": true, "version": "2" }, { @@ -344,7 +344,6 @@ ], "withdrawals": [] }, - "valid": true, "version": "2" }, { @@ -369,7 +368,6 @@ ], "withdrawals": [] }, - "valid": true, "version": "2" } ], diff --git a/src/ethereum_test_tools/tests/test_fixtures/chainid_istanbul_filled.json b/src/ethereum_test_tools/tests/test_filling/fixtures/chainid_istanbul_blockchain_test.json similarity index 98% rename from src/ethereum_test_tools/tests/test_fixtures/chainid_istanbul_filled.json rename to src/ethereum_test_tools/tests/test_filling/fixtures/chainid_istanbul_blockchain_test.json index 6967e81333..ba751ade50 100644 --- a/src/ethereum_test_tools/tests/test_fixtures/chainid_istanbul_filled.json +++ b/src/ethereum_test_tools/tests/test_filling/fixtures/chainid_istanbul_blockchain_test.json @@ -1,5 +1,8 @@ { "000/my_chain_id_test/Istanbul": { + "_info": { + "hash": "0x10c6e62b41fea4dbe1d9652536b662efdcdc54248297cf7f8aa1fa64fca9def5" + }, "network": "Istanbul", "genesisRLP": "0xf901faf901f5a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0aff9f63320a482f8c4e4f15f659e6a7ac382138fbbb6919243b0cba4c5988a5aa056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0", "genesisBlockHeader": { @@ -41,6 +44,7 @@ "nonce": "0x0000000000000000", "hash": "0xc413245fffae8b7c6392bcd3dfbbdee24118e94d9a58722a7abd91a4e1d048b7" }, + "blocknumber": "1", "transactions": [ { "type": "0x00", diff --git a/src/ethereum_test_tools/tests/test_fixtures/chainid_london_filled.json b/src/ethereum_test_tools/tests/test_filling/fixtures/chainid_london_blockchain_test.json similarity index 98% rename from src/ethereum_test_tools/tests/test_fixtures/chainid_london_filled.json rename to src/ethereum_test_tools/tests/test_filling/fixtures/chainid_london_blockchain_test.json index c2556c1048..fbea97a17c 100644 --- a/src/ethereum_test_tools/tests/test_fixtures/chainid_london_filled.json +++ b/src/ethereum_test_tools/tests/test_filling/fixtures/chainid_london_blockchain_test.json @@ -1,5 +1,8 @@ { "000/my_chain_id_test/London": { + "_info": { + "hash": "0xe2dd2b58a176c4f55a91d96ed10e9ae7857a77f10ba5da427a27bcc24bd92d0f" + }, "network": "London", "genesisRLP": "0xf901fbf901f6a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0aff9f63320a482f8c4e4f15f659e6a7ac382138fbbb6919243b0cba4c5988a5aa056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a0000000000000000000000000000000000000000000000000000000000000000088000000000000000007c0c0", "genesisBlockHeader": { @@ -43,6 +46,7 @@ "baseFeePerGas": "0x07", "hash": "0xe05293fe6050385e463d93c310bc52f87715f509aeb036455bbe4597cf36706a" }, + "blocknumber": "1", "transactions": [ { "type": "0x00", diff --git a/src/ethereum_test_tools/tests/test_fixtures/chainid_merge_filled_hive.json b/src/ethereum_test_tools/tests/test_filling/fixtures/chainid_paris_blockchain_test_hive.json similarity index 96% rename from src/ethereum_test_tools/tests/test_fixtures/chainid_merge_filled_hive.json rename to src/ethereum_test_tools/tests/test_filling/fixtures/chainid_paris_blockchain_test_hive.json index ab96ff9a91..1b01baf98c 100644 --- a/src/ethereum_test_tools/tests/test_fixtures/chainid_merge_filled_hive.json +++ b/src/ethereum_test_tools/tests/test_filling/fixtures/chainid_paris_blockchain_test_hive.json @@ -1,5 +1,8 @@ { - "000/my_chain_id_test/Merge": { + "000/my_chain_id_test/Paris": { + "_info": { + "hash": "0x61b10b97625c41f23bee558102248e8690f127041d1dc7262566c90e207e41d4" + }, "network": "Merge", "genesisBlockHeader": { "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", @@ -40,8 +43,7 @@ "0xf861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509b" ] }, - "version": "1", - "valid": true + "version": "1" } ], "engineFcuVersion": "1", @@ -82,4 +84,4 @@ } } } -} +} \ No newline at end of file diff --git a/src/ethereum_test_tools/tests/test_filling/fixtures/chainid_paris_state_test.json b/src/ethereum_test_tools/tests/test_filling/fixtures/chainid_paris_state_test.json new file mode 100644 index 0000000000..7ccba7ae4b --- /dev/null +++ b/src/ethereum_test_tools/tests/test_filling/fixtures/chainid_paris_state_test.json @@ -0,0 +1,60 @@ +{ + "000/my_chain_id_test/Paris": { + "_info": { + "hash": "0x6cbfbd0c9bd8b4739ce34df0d25047cfbdcf5b12964b824c921b159ddca4ccfe" + }, + "env": { + "currentCoinbase": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentGasLimit": "0x02540be400", + "currentNumber": "0x01", + "currentTimestamp": "0x03e8", + "currentRandom": "0x0000000000000000000000000000000000000000000000000000000000000000", + "currentDifficulty": "0x00", + "currentBaseFee": "0x07" + }, + "pre": { + "0x1000000000000000000000000000000000000000": { + "nonce": "0x00", + "balance": "0x00", + "code": "0x4660015500", + "storage": {} + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "nonce": "0x00", + "balance": "0x3635c9adc5dea00000", + "code": "0x", + "storage": {} + } + }, + "transaction": { + "nonce": "0x00", + "gasPrice": "0x0a", + "gasLimit": [ + "0x05f5e100" + ], + "to": "0x1000000000000000000000000000000000000000", + "value": [ + "0x00" + ], + "data": [ + "0x" + ], + "sender": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + }, + "post": { + "Merge": [ + { + "hash": "0x19919608275963e6e20a1191996f5b19db8208dd8df54097cfd2b9cb14f682b6", + "logs": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "txbytes": "0xf861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509b", + "indexes": { + "data": 0, + "gas": 0, + "value": 0 + } + } + ] + } + } +} \ No newline at end of file diff --git a/src/ethereum_test_tools/tests/test_fixtures/chainid_shanghai_filled_hive.json b/src/ethereum_test_tools/tests/test_filling/fixtures/chainid_shanghai_blockchain_test_hive.json similarity index 97% rename from src/ethereum_test_tools/tests/test_fixtures/chainid_shanghai_filled_hive.json rename to src/ethereum_test_tools/tests/test_filling/fixtures/chainid_shanghai_blockchain_test_hive.json index b0cc9bf987..67b0008738 100644 --- a/src/ethereum_test_tools/tests/test_fixtures/chainid_shanghai_filled_hive.json +++ b/src/ethereum_test_tools/tests/test_filling/fixtures/chainid_shanghai_blockchain_test_hive.json @@ -1,5 +1,8 @@ { "000/my_chain_id_test/Shanghai": { + "_info": { + "hash": "0x0ae35254c197f727c0a4254d37851b95967713f80d7c57a7717f99bd036e54e5" + }, "network": "Shanghai", "genesisBlockHeader": { "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", @@ -42,8 +45,7 @@ ], "withdrawals": [] }, - "version": "2", - "valid": true + "version": "2" } ], "engineFcuVersion": "2", diff --git a/src/ethereum_test_tools/tests/test_filling/fixtures/chainid_shanghai_state_test.json b/src/ethereum_test_tools/tests/test_filling/fixtures/chainid_shanghai_state_test.json new file mode 100644 index 0000000000..3c56c9f6b4 --- /dev/null +++ b/src/ethereum_test_tools/tests/test_filling/fixtures/chainid_shanghai_state_test.json @@ -0,0 +1,60 @@ +{ + "000/my_chain_id_test/Shanghai": { + "_info": { + "hash": "0x20cae66b10db3f94fafdbb343c95ade6ea4f650379d3383234709e3dbc802318" + }, + "env": { + "currentCoinbase": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentGasLimit": "0x02540be400", + "currentNumber": "0x01", + "currentTimestamp": "0x03e8", + "currentRandom": "0x0000000000000000000000000000000000000000000000000000000000000000", + "currentDifficulty": "0x00", + "currentBaseFee": "0x07" + }, + "pre": { + "0x1000000000000000000000000000000000000000": { + "nonce": "0x00", + "balance": "0x00", + "code": "0x4660015500", + "storage": {} + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "nonce": "0x00", + "balance": "0x3635c9adc5dea00000", + "code": "0x", + "storage": {} + } + }, + "transaction": { + "nonce": "0x00", + "gasPrice": "0x0a", + "gasLimit": [ + "0x05f5e100" + ], + "to": "0x1000000000000000000000000000000000000000", + "value": [ + "0x00" + ], + "data": [ + "0x" + ], + "sender": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + }, + "post": { + "Shanghai": [ + { + "hash": "0x19919608275963e6e20a1191996f5b19db8208dd8df54097cfd2b9cb14f682b6", + "logs": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "txbytes": "0xf861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509b", + "indexes": { + "data": 0, + "gas": 0, + "value": 0 + } + } + ] + } + } +} \ No newline at end of file diff --git a/src/ethereum_test_tools/tests/test_filling/test_expect.py b/src/ethereum_test_tools/tests/test_filling/test_expect.py new file mode 100644 index 0000000000..9456a09cf1 --- /dev/null +++ b/src/ethereum_test_tools/tests/test_filling/test_expect.py @@ -0,0 +1,235 @@ +""" +Test fixture post state (expect section) during state fixture generation. +""" +from typing import Any, Mapping + +import pytest + +from ethereum_test_forks import Fork, get_deployed_forks +from evm_transition_tool import FixtureFormats, GethTransitionTool + +from ...common import Account, Address, Environment, Transaction +from ...common.types import Storage +from ...spec import StateTest + +ADDRESS_UNDER_TEST = Address(0x01) + + +@pytest.fixture +def pre(request) -> Mapping[Any, Any]: + """ + The pre state: Set from the test's indirectly parametrized `pre` parameter. + """ + return request.param + + +@pytest.fixture +def post(request) -> Mapping[Any, Any]: # noqa: D103 + """ + The post state: Set from the test's indirectly parametrized `post` parameter. + """ + return request.param + + +@pytest.fixture +def fork() -> Fork: # noqa: D103 + return get_deployed_forks()[-1] + + +@pytest.fixture +def state_test( # noqa: D103 + fork: Fork, pre: Mapping[Any, Any], post: Mapping[Any, Any] +) -> StateTest: + return StateTest( + env=Environment(), + pre=pre, + post=post, + tx=Transaction(), + tag="post_value_mismatch", + fixture_format=FixtureFormats.STATE_TEST, + ) + + +@pytest.fixture +def t8n() -> GethTransitionTool: # noqa: D103 + return GethTransitionTool() + + +# Storage value mismatch tests +@pytest.mark.parametrize( + "pre,post,expected_exception", + [ + ( # mismatch_1: 1:1 vs 1:2 + {ADDRESS_UNDER_TEST: Account(storage={"0x01": "0x01"}, nonce=1)}, + {ADDRESS_UNDER_TEST: Account(storage={"0x01": "0x02"})}, + Storage.KeyValueMismatch(address=ADDRESS_UNDER_TEST, key=1, want=2, got=1), + ), + ( # mismatch_2: 1:1 vs 2:1 + {ADDRESS_UNDER_TEST: Account(storage={"0x01": "0x01"}, nonce=1)}, + {ADDRESS_UNDER_TEST: Account(storage={"0x02": "0x01"})}, + Storage.KeyValueMismatch(address=ADDRESS_UNDER_TEST, key=1, want=0, got=1), + ), + ( # mismatch_2_a: 1:1 vs 0:0 + {ADDRESS_UNDER_TEST: Account(storage={"0x01": "0x01"}, nonce=1)}, + {ADDRESS_UNDER_TEST: Account(storage={"0x00": "0x00"})}, + Storage.KeyValueMismatch(address=ADDRESS_UNDER_TEST, key=1, want=0, got=1), + ), + ( # mismatch_2_b: 1:1 vs empty + {ADDRESS_UNDER_TEST: Account(storage={"0x01": "0x01"}, nonce=1)}, + {ADDRESS_UNDER_TEST: Account(storage={})}, + Storage.KeyValueMismatch(address=ADDRESS_UNDER_TEST, key=1, want=0, got=1), + ), + ( # mismatch_3: 0:0 vs 1:2 + {ADDRESS_UNDER_TEST: Account(storage={"0x00": "0x00"}, nonce=1)}, + {ADDRESS_UNDER_TEST: Account(storage={"0x01": "0x02"})}, + Storage.KeyValueMismatch(address=ADDRESS_UNDER_TEST, key=1, want=2, got=0), + ), + ( # mismatch_3_a: empty vs 1:2 + {ADDRESS_UNDER_TEST: Account(storage={}, nonce=1)}, + {ADDRESS_UNDER_TEST: Account(storage={"0x01": "0x02"})}, + Storage.KeyValueMismatch(address=ADDRESS_UNDER_TEST, key=1, want=2, got=0), + ), + ( # mismatch_4: 0:3, 1:2 vs 1:2 + {ADDRESS_UNDER_TEST: Account(storage={"0x00": "0x03", "0x01": "0x02"}, nonce=1)}, + {ADDRESS_UNDER_TEST: Account(storage={"0x01": "0x02"})}, + Storage.KeyValueMismatch(address=ADDRESS_UNDER_TEST, key=0, want=0, got=3), + ), + ( # mismatch_5: 1:2, 2:3 vs 1:2 + {ADDRESS_UNDER_TEST: Account(storage={"0x01": "0x02", "0x02": "0x03"}, nonce=1)}, + {ADDRESS_UNDER_TEST: Account(storage={"0x01": "0x02"})}, + Storage.KeyValueMismatch(address=ADDRESS_UNDER_TEST, key=2, want=0, got=3), + ), + ( # mismatch_6: 1:2 vs 1:2, 2:3 + {ADDRESS_UNDER_TEST: Account(storage={"0x01": "0x02"}, nonce=1)}, + {ADDRESS_UNDER_TEST: Account(storage={"0x01": "0x02", "0x02": "0x03"})}, + Storage.KeyValueMismatch(address=ADDRESS_UNDER_TEST, key=2, want=3, got=0), + ), + ], +) +def test_post_storage_value_mismatch(pre, post, expected_exception, state_test, t8n, fork): + """ + Test post state `Account.storage` exceptions during state test fixture generation. + """ + with pytest.raises(Storage.KeyValueMismatch) as e_info: + state_test.generate(t8n=t8n, fork=fork) + assert e_info.value == expected_exception + + +# Nonce value mismatch tests +@pytest.mark.parametrize( + "pre,post", + [ + ({ADDRESS_UNDER_TEST: Account(nonce=1)}, {ADDRESS_UNDER_TEST: Account(nonce=2)}), + ({ADDRESS_UNDER_TEST: Account(nonce=1)}, {ADDRESS_UNDER_TEST: Account(nonce=0)}), + ({ADDRESS_UNDER_TEST: Account(nonce=1)}, {ADDRESS_UNDER_TEST: Account(nonce=None)}), + ], +) +def test_post_nonce_value_mismatch(pre, post, state_test, t8n, fork): + """ + Test post state `Account.nonce` verification and exceptions during state test + fixture generation. + """ + pre_nonce = pre[ADDRESS_UNDER_TEST].nonce + post_nonce = post[ADDRESS_UNDER_TEST].nonce + if post_nonce is None: # no exception + state_test.generate(t8n=t8n, fork=fork) + return + with pytest.raises(Account.NonceMismatch) as e_info: + state_test.generate(t8n=t8n, fork=fork) + assert e_info.value == Account.NonceMismatch( + address=ADDRESS_UNDER_TEST, want=post_nonce, got=pre_nonce + ) + + +# Code value mismatch tests +@pytest.mark.parametrize( + "pre,post", + [ + ({ADDRESS_UNDER_TEST: Account(code="0x02")}, {ADDRESS_UNDER_TEST: Account(code="0x01")}), + ({ADDRESS_UNDER_TEST: Account(code="0x02")}, {ADDRESS_UNDER_TEST: Account(code="0x")}), + ({ADDRESS_UNDER_TEST: Account(code="0x02")}, {ADDRESS_UNDER_TEST: Account(code=None)}), + ], + indirect=["pre", "post"], +) +def test_post_code_value_mismatch(pre, post, state_test, t8n, fork): + """ + Test post state `Account.code` verification and exceptions during state test + fixture generation. + """ + pre_code = pre[ADDRESS_UNDER_TEST].code + post_code = post[ADDRESS_UNDER_TEST].code + if post_code is None: # no exception + state_test.generate(t8n=t8n, fork=fork) + return + with pytest.raises(Account.CodeMismatch) as e_info: + state_test.generate(t8n=t8n, fork=fork) + assert e_info.value == Account.CodeMismatch( + address=ADDRESS_UNDER_TEST, want=post_code, got=pre_code + ) + + +# Balance value mismatch tests +@pytest.mark.parametrize( + "pre,post", + [ + ({ADDRESS_UNDER_TEST: Account(balance=1)}, {ADDRESS_UNDER_TEST: Account(balance=2)}), + ({ADDRESS_UNDER_TEST: Account(balance=1)}, {ADDRESS_UNDER_TEST: Account(balance=0)}), + ({ADDRESS_UNDER_TEST: Account(balance=1)}, {ADDRESS_UNDER_TEST: Account(balance=None)}), + ], + indirect=["pre", "post"], +) +def test_post_balance_value_mismatch(pre, post, state_test, t8n, fork): + """ + Test post state `Account.balance` verification and exceptions during state test + fixture generation. + """ + pre_balance = pre[ADDRESS_UNDER_TEST].balance + post_balance = post[ADDRESS_UNDER_TEST].balance + if post_balance is None: # no exception + state_test.generate(t8n=t8n, fork=fork) + return + with pytest.raises(Account.BalanceMismatch) as e_info: + state_test.generate(t8n=t8n, fork=fork) + assert e_info.value == Account.BalanceMismatch( + address=ADDRESS_UNDER_TEST, want=post_balance, got=pre_balance + ) + + +# Account mismatch tests +@pytest.mark.parametrize( + "pre,post,error_str", + [ + ( + {ADDRESS_UNDER_TEST: Account(balance=1)}, + {ADDRESS_UNDER_TEST: Account()}, + None, + ), + ( + {ADDRESS_UNDER_TEST: Account(balance=1)}, + {ADDRESS_UNDER_TEST: Account(balance=1), Address(0x02): Account(balance=1)}, + "expected account not found", + ), + ( + {ADDRESS_UNDER_TEST: Account(balance=1)}, + {}, + None, + ), + ( + {ADDRESS_UNDER_TEST: Account(balance=1)}, + {ADDRESS_UNDER_TEST: Account.NONEXISTENT}, + "found unexpected account", + ), + ], + indirect=["pre", "post"], +) +def test_post_account_mismatch(state_test, t8n, fork, error_str): + """ + Test post state `Account` verification and exceptions during state test + fixture generation. + """ + if error_str is None: + state_test.generate(t8n=t8n, fork=fork) + return + with pytest.raises(Exception) as e_info: + state_test.generate(t8n=t8n, fork=fork) + assert error_str in str(e_info.value) diff --git a/src/ethereum_test_tools/tests/test_filling/test_fixtures.py b/src/ethereum_test_tools/tests/test_filling/test_fixtures.py new file mode 100644 index 0000000000..61fc0a0a98 --- /dev/null +++ b/src/ethereum_test_tools/tests/test_filling/test_fixtures.py @@ -0,0 +1,901 @@ +""" +Test suite for `ethereum_test_tools.filling` fixture generation. +""" + +import json +import os +from typing import Any, List, Mapping + +import pytest +from semver import Version + +from ethereum_test_forks import Berlin, Fork, Istanbul, London, Paris, Shanghai +from evm_transition_tool import FixtureFormats, GethTransitionTool + +from ... import Header +from ...code import Yul +from ...common import Account, Environment, Hash, TestAddress, Transaction, to_json +from ...exceptions import TransactionException +from ...spec import BlockchainTest, StateTest +from ...spec.blockchain.types import Block +from ...spec.blockchain.types import Fixture as BlockchainFixture +from ...spec.blockchain.types import FixtureCommon as BlockchainFixtureCommon +from ..conftest import SOLC_PADDING_VERSION + + +def remove_info_metadata(fixture_json): # noqa: D103 + for t in fixture_json: + if "_info" in fixture_json[t]: + info_keys = list(fixture_json[t]["_info"].keys()) + for key in info_keys: + if key != "hash": # remove keys that are not 'hash' + del fixture_json[t]["_info"][key] + + +@pytest.fixture() +def hash(request: pytest.FixtureRequest, solc_version: Version): + """ + Set the hash based on the fork and solc version. + """ + if solc_version == Version.parse("0.8.20"): + if request.node.funcargs["fork"] == Berlin: + return bytes.fromhex("193e550de3") + elif request.node.funcargs["fork"] == London: + return bytes.fromhex("b053deac0e") + else: + if request.node.funcargs["fork"] == Berlin: + return bytes.fromhex("f3a35d34f6") + elif request.node.funcargs["fork"] == London: + return bytes.fromhex("c5fa75d7f6") + + +@pytest.mark.parametrize( + "fork,hash", + [ + (Berlin, "set using indirect & hash fixture"), + (London, "set using indirect & hash fixture"), + ], + indirect=["hash"], +) +def test_make_genesis(fork: Fork, hash: bytes): # noqa: D103 + env = Environment() + + pre = { + "0x1000000000000000000000000000000000000000": Account( + balance=0x0BA1A9CE0BA1A9CE, + code=Yul( + """ + { + function f(a, b) -> c { + c := add(a, b) + } + + sstore(0, f(1, 2)) + return(0, 32) + } + """, + fork=fork, + ), + ), + TestAddress: Account(balance=0x0BA1A9CE0BA1A9CE), + } + + t8n = GethTransitionTool() + fixture = BlockchainTest( + genesis_environment=env, + pre=pre, + post={}, + blocks=[], + tag="some_state_test", + fixture_format=FixtureFormats.BLOCKCHAIN_TEST, + ).generate(t8n, fork) + assert isinstance(fixture, BlockchainFixture) + assert fixture.genesis is not None + + assert fixture.genesis.hash is not None + assert fixture.genesis.hash.startswith(hash) + + +@pytest.mark.parametrize( + "fork,fixture_format", + [ + (Istanbul, FixtureFormats.BLOCKCHAIN_TEST), + (London, FixtureFormats.BLOCKCHAIN_TEST), + (Paris, FixtureFormats.BLOCKCHAIN_TEST_HIVE), + (Shanghai, FixtureFormats.BLOCKCHAIN_TEST_HIVE), + (Paris, FixtureFormats.STATE_TEST), + (Shanghai, FixtureFormats.STATE_TEST), + ], +) +def test_fill_state_test( + fork: Fork, + fixture_format: FixtureFormats, +): + """ + Test `ethereum_test.filler.fill_fixtures` with `StateTest`. + """ + env = Environment( + coinbase="0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + difficulty=0x20000, + gas_limit=10000000000, + number=1, + timestamp=1000, + ) + + pre = { + 0x1000000000000000000000000000000000000000: Account(code="0x4660015500"), + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": Account(balance=1000000000000000000000), + } + + tx = Transaction( + ty=0x0, + chain_id=0x0, + nonce=0, + to="0x1000000000000000000000000000000000000000", + gas_limit=100000000, + gas_price=10, + protected=False, + ) + + post = { + "0x1000000000000000000000000000000000000000": Account( + code="0x4660015500", storage={"0x01": "0x01"} + ), + } + + t8n = GethTransitionTool() + generated_fixture = StateTest( + env=env, + pre=pre, + post=post, + tx=tx, + tag="my_chain_id_test", + fixture_format=fixture_format, + ).generate( + t8n=t8n, + fork=fork, + ) + assert generated_fixture.format() == fixture_format + fixture = { + f"000/my_chain_id_test/{fork}": generated_fixture.to_json(), + } + + expected_json_file = f"chainid_{fork.name().lower()}_{fixture_format.value}.json" + with open( + os.path.join( + "src", + "ethereum_test_tools", + "tests", + "test_filling", + "fixtures", + expected_json_file, + ) + ) as f: + expected = json.load(f) + + fixture_json = to_json(fixture) + remove_info_metadata(fixture_json) + assert fixture_json == expected + + +class TestFillBlockchainValidTxs: + """ + Test `BlockchainTest.generate()` and blockchain fixtures. + """ + + @pytest.fixture + def fork(self, request): # noqa: D102 + return request.param + + @pytest.fixture + def check_hive(self, fork): # noqa: D102 + return fork == Shanghai + + @pytest.fixture + def expected_json_file(self, fork: Fork, check_hive: bool): # noqa: D102 + if fork == London and not check_hive: + return "blockchain_london_valid_filled.json" + elif fork == Shanghai and check_hive: + return "blockchain_shanghai_valid_filled_hive.json" + raise ValueError(f"Unexpected fork/check_hive combination: {fork}/{check_hive}") + + @pytest.fixture + def pre(self, fork: Fork): # noqa: D102 + pre = { + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": Account(balance=0x1000000000000000000), + "0xd02d72E067e77158444ef2020Ff2d325f929B363": Account( + balance=0x1000000000000000000, nonce=1 + ), + "0xcccccccccccccccccccccccccccccccccccccccc": Account( + balance=0x10000000000, + nonce=1, + code=Yul( + """ + { + sstore(number(), basefee()) + sstore(add(number(), 0x1000), sub(gasprice(), basefee())) + sstore(add(number(), 0x2000), selfbalance()) + stop() + } + """, + fork=fork, + ), + ), + "0xcccccccccccccccccccccccccccccccccccccccd": Account( + balance=0x20000000000, + nonce=1, + code=Yul( + """ + { + let throwMe := delegatecall(gas(), + 0xcccccccccccccccccccccccccccccccccccccccc, + 0, 0, 0, 0) + } + """, + fork=fork, + ), + ), + 0xC0DE: Account( + balance=0, + nonce=1, + code=Yul( + """ + { + let throwMe := delegatecall(gas(), + 0xcccccccccccccccccccccccccccccccccccccccc, + 0, 0, 0, 0) + } + """, + fork=fork, + ), + ), + "0xccccccccccccccccccccccccccccccccccccccce": Account( + balance=0x20000000000, + nonce=1, + code=Yul( + """ + { + let throwMe := call(gas(), 0xC0DE, 0x1000, + 0, 0, 0, 0) + throwMe := delegatecall(gas(), + 0xcccccccccccccccccccccccccccccccccccccccc, + 0, 0, 0, 0) + } + """, + fork=fork, + ), + ), + } + return pre + + @pytest.fixture + def blocks(self): # noqa: D102 + blocks: List[Block] = [ + Block( + coinbase="0xba5e000000000000000000000000000000000000", + txs=[ + Transaction( + data="0x01", + nonce=0, + gas_limit=1000000, + max_priority_fee_per_gas=1, + max_fee_per_gas=1000, + to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", + ), + ], + ), + Block( + coinbase="0xba5e000000000000000000000000000000000000", + txs=[ + Transaction( + data="0x0201", + nonce=1, + gas_limit=1000000, + max_priority_fee_per_gas=10, + max_fee_per_gas=1000, + to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", + ), + Transaction( + data="0x0202", + nonce=2, + gas_limit=1000000, + max_priority_fee_per_gas=100, + max_fee_per_gas=1000, + to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD", + ), + Transaction( + data="0x0203", + nonce=3, + gas_limit=1000000, + max_priority_fee_per_gas=100, + max_fee_per_gas=1000, + to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCE", + ), + ], + ), + Block( + coinbase="0xba5e000000000000000000000000000000000000", + txs=[ + Transaction( + data="0x0301", + nonce=4, + gas_limit=1000000, + max_priority_fee_per_gas=1000, + max_fee_per_gas=1000, + to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", + ), + Transaction( + data="0x0303", + nonce=5, + gas_limit=1000000, + max_priority_fee_per_gas=100, + max_fee_per_gas=1000, + to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCE", + ), + Transaction( + data="0x0304", + nonce=6, + gas_limit=1000000, + max_priority_fee_per_gas=100000, + max_fee_per_gas=100000, + to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD", + ), + ], + ), + Block( + coinbase="0xba5e000000000000000000000000000000000000", + txs=[ + Transaction( + data="0x0401", + nonce=7, + gas_limit=1000000, + max_priority_fee_per_gas=1000, + max_fee_per_gas=1000, + to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", + ), + Transaction( + data="0x0403", + nonce=8, + gas_limit=1000000, + max_priority_fee_per_gas=100, + max_fee_per_gas=1000, + to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCE", + ), + Transaction( + data="0x0404", + nonce=9, + gas_limit=1000000, + max_priority_fee_per_gas=100000, + max_fee_per_gas=100000, + to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD", + ), + ], + ), + ] + return blocks + + @pytest.fixture + def post(self): # noqa: D102 + post = { + "0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC": Account( + storage={ + # BASEFEE and the tip in block 1 + 0x0001: 875, # BASEFEE + 0x1001: 1, # tip + # Block 2 + 0x0002: 766, # BASEFEE + 0x1002: 10, # tip + # Block 3 + 0x0003: 671, + 0x1003: 329, + # Block 4 + 0x0004: 588, + 0x1004: 412, + # SELFBALANCE, always the same + 0x2001: 0x010000000000, + 0x2002: 0x010000000000, + 0x2003: 0x010000000000, + 0x2004: 0x010000000000, + } + ), + "0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD": Account( + storage={ + # Block 2 + 0x0002: 766, # BASEFEE + 0x1002: 100, # tip + # Block 3 + 0x0003: 671, + 0x1003: 99329, + # Block 4 + 0x0004: 588, + 0x1004: 99412, + # SELFBALANCE, always the same + 0x2002: 0x020000000000, + 0x2003: 0x020000000000, + 0x2004: 0x020000000000, + } + ), + "0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCE": Account( + storage={ + # Block 2 + 0x0002: 766, # BASEFEE + 0x1002: 100, # tip + 0x0003: 671, + 0x1003: 100, + 0x0004: 588, + 0x1004: 100, + # SELFBALANCE + 0x2002: 0x01FFFFFFF000, + 0x2003: 0x01FFFFFFE000, + 0x2004: 0x01FFFFFFD000, + } + ), + 0xC0DE: Account( + storage={ + # Block 2 + 0x0002: 766, + 0x1002: 100, + # Block 3 + 0x0003: 671, + 0x1003: 100, + # Block 4 + 0x0004: 588, + 0x1004: 100, + # SELFBALANCE + 0x2002: 0x1000, + 0x2003: 0x2000, + 0x2004: 0x3000, + } + ), + } + return post + + @pytest.fixture + def genesis_environment(self): # noqa: D102 + return Environment( + base_fee=1000, + coinbase="0xba5e000000000000000000000000000000000000", + ) + + @pytest.fixture + def fixture_format(self, check_hive: bool): # noqa: D102 + return ( + FixtureFormats.BLOCKCHAIN_TEST_HIVE if check_hive else FixtureFormats.BLOCKCHAIN_TEST + ) + + @pytest.fixture + def blockchain_test_fixture( # noqa: D102 + self, + check_hive: bool, + fork: Fork, + pre: Mapping[Any, Any], + post: Mapping[Any, Any], + blocks: List[Block], + genesis_environment: Environment, + fixture_format: FixtureFormats, + ): + t8n = GethTransitionTool() + return BlockchainTest( + pre=pre, + post=post, + blocks=blocks, + genesis_environment=genesis_environment, + tag="my_blockchain_test_valid_txs", + fixture_format=fixture_format, + ).generate( + t8n=t8n, + fork=fork, + ) + + @pytest.mark.parametrize("fork", [London, Shanghai], indirect=True) + def test_fill_blockchain_valid_txs( # noqa: D102 + self, + fork: Fork, + solc_version: str, + check_hive: bool, + fixture_format: FixtureFormats, + expected_json_file: str, + blockchain_test_fixture: BlockchainFixture, + ): + assert blockchain_test_fixture.format() == fixture_format + assert isinstance(blockchain_test_fixture, BlockchainFixtureCommon) + + fixture = { + f"000/my_blockchain_test/{fork.name()}": blockchain_test_fixture.to_json(), + } + + with open( + os.path.join( + "src", + "ethereum_test_tools", + "tests", + "test_filling", + "fixtures", + expected_json_file, + ) + ) as f: + expected = json.load(f) + + fixture_json = to_json(fixture) + remove_info_metadata(fixture_json) + + if solc_version >= SOLC_PADDING_VERSION: + expected = expected["solc=padding_version"] + else: + expected = expected[f"solc={solc_version}"] + + assert fixture_json == expected + + @pytest.mark.parametrize("fork", [London], indirect=True) + def test_fixture_header_join(self, blockchain_test_fixture: BlockchainFixture): + """ + Test `FixtureHeader.join()`. + """ + block = blockchain_test_fixture.blocks[0] + new_difficulty = block.block_header.difficulty - 1 # type: ignore + + new_state_root = Hash(12345) + # See description of https://github.com/ethereum/execution-spec-tests/pull/398 + new_transactions_root = "0x100" + header_new_fields = Header( + difficulty=new_difficulty, + state_root=new_state_root, + transactions_root=new_transactions_root, + ) + + updated_block_header = block.block_header.join(header_new_fields) # type: ignore + assert updated_block_header.difficulty == new_difficulty + assert updated_block_header.state_root == new_state_root + assert updated_block_header.transactions_root == Hash(new_transactions_root) + assert updated_block_header.hash == block.block_header.hash # type: ignore + assert isinstance(updated_block_header.transactions_root, Hash) + + +@pytest.mark.parametrize( + "fork,check_hive,expected_json_file", + [ + (London, False, "blockchain_london_invalid_filled.json"), + (Shanghai, True, "blockchain_shanghai_invalid_filled_hive.json"), + ], +) +def test_fill_blockchain_invalid_txs( + fork: Fork, solc_version: str, check_hive: bool, expected_json_file: str +): + """ + Test `ethereum_test.filler.fill_fixtures` with `BlockchainTest`. + """ + pre = { + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": Account(balance=0x1000000000000000000), + "0xd02d72E067e77158444ef2020Ff2d325f929B363": Account( + balance=0x1000000000000000000, nonce=1 + ), + "0xcccccccccccccccccccccccccccccccccccccccc": Account( + balance=0x10000000000, + nonce=1, + code=Yul( + """ + { + sstore(number(), basefee()) + sstore(add(number(), 0x1000), sub(gasprice(), basefee())) + sstore(add(number(), 0x2000), selfbalance()) + stop() + } + """, + fork=fork, + ), + ), + "0xcccccccccccccccccccccccccccccccccccccccd": Account( + balance=0x20000000000, + nonce=1, + code=Yul( + """ + { + let throwMe := delegatecall(gas(), + 0xcccccccccccccccccccccccccccccccccccccccc, + 0, 0, 0, 0) + } + """, + fork=fork, + ), + ), + 0xC0DE: Account( + balance=0, + nonce=1, + code=Yul( + """ + { + let throwMe := delegatecall(gas(), + 0xcccccccccccccccccccccccccccccccccccccccc, + 0, 0, 0, 0) + } + """, + fork=fork, + ), + ), + "0xccccccccccccccccccccccccccccccccccccccce": Account( + balance=0x20000000000, + nonce=1, + code=Yul( + """ + { + let throwMe := call(gas(), 0xC0DE, 0x1000, + 0, 0, 0, 0) + throwMe := delegatecall(gas(), + 0xcccccccccccccccccccccccccccccccccccccccc, + 0, 0, 0, 0) + } + """, + fork=fork, + ), + ), + } + + blocks: List[Block] = [ + Block( + coinbase="0xba5e000000000000000000000000000000000000", + txs=[ + Transaction( + data="0x01", + nonce=0, + gas_limit=1000000, + max_priority_fee_per_gas=1, + max_fee_per_gas=1000, + to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", + ), + ], + ), + Block( + coinbase="0xba5e000000000000000000000000000000000000", + txs=[ + Transaction( + data="0x0201", + nonce=1, + gas_limit=1000000, + max_priority_fee_per_gas=10, + max_fee_per_gas=1000, + to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", + ), + Transaction( + data="0x0202", + nonce=2, + gas_limit=1000000, + max_priority_fee_per_gas=100, + max_fee_per_gas=1000, + to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD", + ), + Transaction( + data="0x0203", + nonce=3, + gas_limit=1000000, + max_priority_fee_per_gas=100, + max_fee_per_gas=1000, + to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCE", + ), + ], + ), + Block( + coinbase="0xba5e000000000000000000000000000000000000", + txs=[ + Transaction( + data="0x0301", + nonce=4, + gas_limit=1000000, + max_priority_fee_per_gas=1000, + max_fee_per_gas=1000, + to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", + ), + Transaction( + data="0x0302", + nonce=5, + gas_limit=1000000, + max_priority_fee_per_gas=100000, + max_fee_per_gas=1000, + to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD", + error=TransactionException.PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS, + ), + ], + exception=TransactionException.PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS, + ), + Block( + coinbase="0xba5e000000000000000000000000000000000000", + txs=[ + Transaction( + data="0x0301", + nonce=4, + gas_limit=1000000, + max_priority_fee_per_gas=1000, + max_fee_per_gas=1000, + to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", + ), + Transaction( + data="0x0303", + nonce=5, + gas_limit=1000000, + max_priority_fee_per_gas=100, + max_fee_per_gas=1000, + to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCE", + ), + Transaction( + data="0x0304", + nonce=6, + gas_limit=1000000, + max_priority_fee_per_gas=100000, + max_fee_per_gas=100000, + to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD", + ), + ], + ), + Block( + coinbase="0xba5e000000000000000000000000000000000000", + txs=[ + Transaction( + data="0x0401", + nonce=7, + gas_limit=1000000, + max_priority_fee_per_gas=1000, + max_fee_per_gas=1000, + to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", + ), + Transaction( + data="0x0402", + nonce=8, + gas_limit=1000000, + max_priority_fee_per_gas=100000, + max_fee_per_gas=1000, + to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD", + error=TransactionException.PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS, + ), + ], + exception=TransactionException.PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS, + ), + Block( + coinbase="0xba5e000000000000000000000000000000000000", + txs=[ + Transaction( + data="0x0401", + nonce=7, + gas_limit=1000000, + max_priority_fee_per_gas=1000, + max_fee_per_gas=1000, + to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", + ), + Transaction( + data="0x0403", + nonce=8, + gas_limit=1000000, + max_priority_fee_per_gas=100, + max_fee_per_gas=1000, + to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCE", + ), + Transaction( + data="0x0404", + nonce=9, + gas_limit=1000000, + max_priority_fee_per_gas=100000, + max_fee_per_gas=100000, + to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD", + ), + ], + ), + ] + + post = { + "0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC": Account( + storage={ + # BASEFEE and the tip in block 1 + 0x0001: 875, # BASEFEE + 0x1001: 1, # tip + # Block 2 + 0x0002: 766, # BASEFEE + 0x1002: 10, # tip + # Block 3 + 0x0003: 671, + 0x1003: 329, + # Block 4 + 0x0004: 588, + 0x1004: 412, + # SELFBALANCE, always the same + 0x2001: 0x010000000000, + 0x2002: 0x010000000000, + 0x2003: 0x010000000000, + 0x2004: 0x010000000000, + } + ), + "0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD": Account( + storage={ + # Block 2 + 0x0002: 766, # BASEFEE + 0x1002: 100, # tip + # Block 3 + 0x0003: 671, + 0x1003: 99329, + # Block 4 + 0x0004: 588, + 0x1004: 99412, + # SELFBALANCE, always the same + 0x2002: 0x020000000000, + 0x2003: 0x020000000000, + 0x2004: 0x020000000000, + } + ), + "0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCE": Account( + storage={ + # Block 2 + 0x0002: 766, # BASEFEE + 0x1002: 100, # tip + 0x0003: 671, + 0x1003: 100, + 0x0004: 588, + 0x1004: 100, + # SELFBALANCE + 0x2002: 0x01FFFFFFF000, + 0x2003: 0x01FFFFFFE000, + 0x2004: 0x01FFFFFFD000, + } + ), + 0xC0DE: Account( + storage={ + # Block 2 + 0x0002: 766, + 0x1002: 100, + # Block 3 + 0x0003: 671, + 0x1003: 100, + # Block 4 + 0x0004: 588, + 0x1004: 100, + # SELFBALANCE + 0x2002: 0x1000, + 0x2003: 0x2000, + 0x2004: 0x3000, + } + ), + } + + # We start genesis with a baseFee of 1000 + genesis_environment = Environment( + base_fee=1000, + coinbase="0xba5e000000000000000000000000000000000000", + ) + + t8n = GethTransitionTool() + fixture_format = ( + FixtureFormats.BLOCKCHAIN_TEST_HIVE if check_hive else FixtureFormats.BLOCKCHAIN_TEST + ) + generated_fixture = BlockchainTest( + pre=pre, + post=post, + blocks=blocks, + genesis_environment=genesis_environment, + fixture_format=fixture_format, + ).generate( + t8n=t8n, + fork=fork, + ) + assert generated_fixture.format() == fixture_format + assert isinstance(generated_fixture, BlockchainFixtureCommon) + fixture = { + f"000/my_blockchain_test/{fork.name()}": generated_fixture.to_json(), + } + + with open( + os.path.join( + "src", + "ethereum_test_tools", + "tests", + "test_filling", + "fixtures", + expected_json_file, + ) + ) as f: + expected = json.load(f) + + fixture_json = to_json(fixture) + remove_info_metadata(fixture_json) + + if solc_version >= SOLC_PADDING_VERSION: + expected = expected["solc=padding_version"] + else: + expected = expected[f"solc={solc_version}"] + + assert fixture_json == expected diff --git a/src/ethereum_test_tools/tests/test_helpers.py b/src/ethereum_test_tools/tests/test_helpers.py index b8227174b4..bf65d4e81d 100644 --- a/src/ethereum_test_tools/tests/test_helpers.py +++ b/src/ethereum_test_tools/tests/test_helpers.py @@ -4,19 +4,21 @@ import pytest -from ..common import compute_create2_address, compute_create_address, to_address +from ..common import Address, compute_create2_address, compute_create_address -def test_to_address(): +def test_address(): """ - Test `ethereum_test.helpers.to_address`. + Test `ethereum_test.base_types.Address`. """ - assert to_address("0x0") == "0x0000000000000000000000000000000000000000" - assert to_address(0) == "0x0000000000000000000000000000000000000000" - assert to_address(1) == "0x0000000000000000000000000000000000000001" - assert to_address(10) == "0x000000000000000000000000000000000000000a" - assert to_address("0x10") == "0x0000000000000000000000000000000000000010" - assert to_address(2 ** (20 * 8) - 1) == "0xffffffffffffffffffffffffffffffffffffffff" + assert Address("0x0") == "0x0000000000000000000000000000000000000000" + assert Address(0) == "0x0000000000000000000000000000000000000000" + assert Address(1) == "0x0000000000000000000000000000000000000001" + assert Address(10) == "0x000000000000000000000000000000000000000a" + assert Address("0x10") == "0x0000000000000000000000000000000000000010" + assert Address(2 ** (20 * 8) - 1) == "0xffffffffffffffffffffffffffffffffffffffff" + assert Address(0) == Address(0) + assert Address(0) != Address(1) @pytest.mark.parametrize( diff --git a/src/ethereum_test_tools/tests/test_types.py b/src/ethereum_test_tools/tests/test_types.py index d725354d8d..73c13ecf05 100644 --- a/src/ethereum_test_tools/tests/test_types.py +++ b/src/ethereum_test_tools/tests/test_types.py @@ -17,19 +17,15 @@ to_json, withdrawals_root, ) +from ..common.base_types import Address, Bloom, Bytes, Hash, HeaderNonce, ZeroPaddedHexNumber from ..common.constants import TestPrivateKey -from ..common.types import ( - Address, - Alloc, - Bloom, - Bytes, +from ..common.types import Alloc +from ..exceptions import BlockException, TransactionException +from ..spec.blockchain.types import ( FixtureEngineNewPayload, FixtureExecutionPayload, FixtureHeader, FixtureTransaction, - Hash, - HeaderNonce, - ZeroPaddedHexNumber, ) @@ -39,23 +35,27 @@ def test_storage(): """ s = Storage({"10": "0x10"}) - assert 10 in s.data - assert s.data[10] == 16 + assert 10 in s + assert s[10] == 16 s = Storage({"10": "10"}) - assert 10 in s.data - assert s.data[10] == 10 + assert 10 in s + assert s[10] == 10 s = Storage({10: 10}) - assert 10 in s.data - assert s.data[10] == 10 + assert 10 in s + assert s[10] == 10 + + iter_s = iter(Storage({10: 20, "11": "21"})) + assert next(iter_s) == 10 + assert next(iter_s) == 11 s["10"] = "0x10" s["0x10"] = "10" - assert s.data[10] == 16 - assert s.data[16] == 10 + assert s[10] == 16 + assert s[16] == 10 assert "10" in s assert "0xa" in s @@ -67,8 +67,8 @@ def test_storage(): assert 10 not in s s = Storage({-1: -1, -2: -2}) - assert s.data[-1] == -1 - assert s.data[-2] == -2 + assert s[-1] == -1 + assert s[-2] == -2 d = to_json(s) assert ( d["0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"] @@ -278,10 +278,10 @@ def test_storage(): ) def test_account_check_alloc(account: Account, alloc: Dict[Any, Any], should_pass: bool): if should_pass: - account.check_alloc("test", alloc) + account.check_alloc(Address(1), alloc) else: with pytest.raises(Exception) as _: - account.check_alloc("test", alloc) + account.check_alloc(Address(1), alloc) @pytest.mark.parametrize( @@ -941,7 +941,7 @@ def test_account_merge( ], withdrawals=[Withdrawal(index=0, validator=1, address=0x1234, amount=2)], ), - valid=False, + validation_error=TransactionException.INTRINSIC_GAS_TOO_LOW, version=1, ), { @@ -985,7 +985,7 @@ def test_account_merge( to_json(Withdrawal(index=0, validator=1, address=0x1234, amount=2)) ], }, - "valid": False, + "validationError": "TransactionException.INTRINSIC_GAS_TOO_LOW", "version": "1", }, id="fixture_engine_new_payload_1", @@ -1034,7 +1034,7 @@ def test_account_merge( withdrawals=[Withdrawal(index=0, validator=1, address=0x1234, amount=2)], ), version=1, - valid=True, + validation_error=BlockException.INCORRECT_BLOCK_FORMAT, blob_versioned_hashes=[bytes([0]), bytes([1])], error_code=EngineAPIError.InvalidRequest, ), @@ -1080,7 +1080,7 @@ def test_account_merge( ], }, "version": "1", - "valid": True, + "validationError": "BlockException.INCORRECT_BLOCK_FORMAT", "expectedBlobVersionedHashes": [ "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000001", diff --git a/src/ethereum_test_tools/tests/test_types_blockchain_test.py b/src/ethereum_test_tools/tests/test_types_blockchain_test.py new file mode 100644 index 0000000000..1f809ca0e9 --- /dev/null +++ b/src/ethereum_test_tools/tests/test_types_blockchain_test.py @@ -0,0 +1,103 @@ +""" +Test the blockchain test types. +""" +from dataclasses import replace + +import pytest + +from ..common.base_types import Address, Bloom, Bytes, Hash, HeaderNonce +from ..spec.blockchain.types import FixtureHeader, Header + +fixture_header_ones = FixtureHeader( + parent_hash=Hash(1), + ommers_hash=Hash(1), + coinbase=Address(1), + state_root=Hash(1), + transactions_root=Hash(1), + receipt_root=Hash(1), + bloom=Bloom(1), + difficulty=1, + number=1, + gas_limit=1, + gas_used=1, + timestamp=1, + extra_data=Bytes([1]), + mix_digest=Hash(1), + nonce=HeaderNonce(1), + base_fee=1, + withdrawals_root=Hash(1), + blob_gas_used=1, + excess_blob_gas=1, + hash=Hash(1), +) + + +@pytest.mark.parametrize( + "fixture_header,modifier,fixture_header_expected", + [ + pytest.param( + fixture_header_ones, + Header(), + fixture_header_ones, + id="default_header", + ), + pytest.param( + fixture_header_ones, + Header(state_root="0x100"), + replace(fixture_header_ones, state_root=Hash("0x100")), + id="state_root_as_str", + ), + pytest.param( + fixture_header_ones, + Header(state_root=100), + replace(fixture_header_ones, state_root=Hash(100)), + id="state_root_as_int", + ), + pytest.param( + fixture_header_ones, + Header(state_root=Hash(100)), + replace(fixture_header_ones, state_root=Hash(100)), + id="state_root_as_hash", + ), + pytest.param( + fixture_header_ones, + Header(withdrawals_root=Header.REMOVE_FIELD), # state_root is not removable + replace(fixture_header_ones, withdrawals_root=None), + id="state_root_as_header_remove_field", + ), + pytest.param( + fixture_header_ones, + Header(state_root=None), + fixture_header_ones, + id="state_root_as_none", + ), + pytest.param( + fixture_header_ones, + Header(bloom="0x100"), + replace(fixture_header_ones, bloom=Bloom("0x100")), + id="bloom_as_str", + ), + pytest.param( + fixture_header_ones, + Header(bloom=100), + replace(fixture_header_ones, bloom=Bloom(100)), + id="bloom_as_int", + ), + pytest.param( + fixture_header_ones, + Header(bloom=Hash(100)), + replace(fixture_header_ones, bloom=Bloom(100)), + id="bloom_as_hash", + ), + pytest.param( + fixture_header_ones, + Header(state_root="0x100", bloom=Hash(200), difficulty=300), + replace(fixture_header_ones, state_root=Hash(0x100), bloom=Bloom(200), difficulty=300), + id="multiple_fields", + ), + ], +) +def test_fixture_header_join( + fixture_header: FixtureHeader, modifier: Header, fixture_header_expected: FixtureHeader +): + assert fixture_header.join(modifier) == fixture_header_expected diff --git a/src/ethereum_test_tools/tests/test_vm.py b/src/ethereum_test_tools/tests/test_vm.py index 18d18b19e4..dc71ae91ed 100644 --- a/src/ethereum_test_tools/tests/test_vm.py +++ b/src/ethereum_test_tools/tests/test_vm.py @@ -4,6 +4,8 @@ import pytest +from ..common.base_types import Address +from ..vm.opcode import Macros as Om from ..vm.opcode import Opcodes as Op @@ -106,6 +108,44 @@ + [0x55] ), ), + ( + Op.CALL(Op.GAS, Op.PUSH20(0x1234), 0, 0, 0, 0, 32), + b"\x60\x20\x60\x00\x60\x00\x60\x00\x60\x00\x73\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x12\x34\x5A\xF1", + ), + ( + Op.CALL(Op.GAS, Address(0x1234), 0, 0, 0, 0, 32), + b"\x60\x20\x60\x00\x60\x00\x60\x00\x60\x00\x73\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x12\x34\x5A\xF1", + ), + (Op.ADD(1, 2), bytes([0x60, 0x02, 0x60, 0x01, 0x01])), + (Op.ADD(Op.ADD(1, 2), 3), bytes([0x60, 0x03, 0x60, 0x02, 0x60, 0x01, 0x01, 0x01])), + ( + Op.CALL(1, 123, 4, 5, 6, 7, 8), + b"\x60\x08\x60\x07\x60\x06\x60\x05\x60\x04\x60\x7b\x60\x01\xf1", + ), + ( + Op.CALL(1, Address(0x0123), 4, 5, 6, 7, 8), + b"\x60\x08\x60\x07\x60\x06\x60\x05\x60\x04\x73\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x01\x23\x60\x01\xf1", + ), + ( + Op.CALL(1, 0x0123, 4, 5, 6, 7, 8), + b"\x60\x08\x60\x07\x60\x06\x60\x05\x60\x04\x61\x01\x23\x60\x01\xf1", + ), + ( + Op.CALL(1, 123, 4, 5, 6, 7, 8), + b"\x60\x08\x60\x07\x60\x06\x60\x05\x60\x04\x60\x7b\x60\x01\xf1", + ), + ( + Op.CREATE(1, Address(12), 4, 5, 6, 7, 8), + b"\x60\x08\x60\x07\x60\x06\x60\x05\x60\x04\x73\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x60\x01\xf0", + ), + ( + Om.OOG(), + bytes([0x64, 0x17, 0x48, 0x76, 0xE8, 0x00, 0x60, 0x00, 0x20]), + ), ], ) def test_opcodes(opcodes: bytes, expected: bytes): @@ -121,4 +161,14 @@ def test_opcodes_repr(): """ assert f"{Op.CALL}" == "CALL" assert f"{Op.DELEGATECALL}" == "DELEGATECALL" + assert f"{Om.OOG}" == "OOG" assert str(Op.ADD) == "ADD" + + +def test_macros(): + """ + Test opcode and macros interaction + """ + assert (Op.PUSH1(1) + Om.OOG) == (Op.PUSH1(1) + Op.SHA3(0, 100000000000)) + for opcode in Op: + assert opcode != Om.OOG diff --git a/src/ethereum_test_tools/vm/__init__.py b/src/ethereum_test_tools/vm/__init__.py index 9b7afda48e..c1042ef82b 100644 --- a/src/ethereum_test_tools/vm/__init__.py +++ b/src/ethereum_test_tools/vm/__init__.py @@ -1,10 +1,12 @@ """ Ethereum Virtual Machine related definitions and utilities. """ -from .opcode import Opcode, OpcodeCallArg, Opcodes + +from .opcode import Macro, Opcode, OpcodeCallArg, Opcodes __all__ = ( "Opcode", + "Macro", "OpcodeCallArg", "Opcodes", ) diff --git a/src/ethereum_test_tools/vm/opcode.py b/src/ethereum_test_tools/vm/opcode.py index 2cf78c0623..e2bbb168ea 100644 --- a/src/ethereum_test_tools/vm/opcode.py +++ b/src/ethereum_test_tools/vm/opcode.py @@ -1,17 +1,24 @@ """ Ethereum Virtual Machine opcode definitions. + +Acknowledgments: The individual opcode documentation below is due to the work by +[smlXL](https://github.com/smlxl) on [evm.codes](https://www.evm.codes/), available as open +source [github.com/smlxl/evm.codes](https://github.com/smlxl/evm.codes) - thank you! And thanks +to @ThreeHrSleep for integrating it in the docstrings. """ + from enum import Enum from typing import List, Union +from ..common.base_types import FixedSizeBytes + def _get_int_size(n: int) -> int: """ Returns the size of an integer in bytes. """ if n < 0: - # Negative numbers in the EVM are represented as two's complement - # of 32 bytes + # Negative numbers in the EVM are represented as two's complement of 32 bytes return 32 byte_count = 0 while n: @@ -23,10 +30,58 @@ def _get_int_size(n: int) -> int: _push_opcodes_byte_list = [bytes([0x5F + x]) for x in range(33)] -class Opcode(bytes): +class OpcodeMacroBase(bytes): + """ + Base class for Macro and Opcode, inherits from bytes. + + This class is designed to represent a base structure for individual evm opcodes + and opcode macros. + """ + + _name_: str + + def __new__(cls, *args): + """ + Since OpcodeMacroBase is never instantiated directly but through + subclassing, this method simply forwards the arguments to the + bytes constructor. + """ + return super().__new__(cls, *args) + + def __call__(self, *_: Union[int, bytes, str, "Opcode", FixedSizeBytes]) -> bytes: + """ + Make OpcodeMacroBase callable, so that arguments can directly be + provided to an Opcode in order to more conveniently generate + bytecode (implemented in the subclass). + """ + # ignore opcode arguments + return bytes(self) + + def __str__(self) -> str: + """ + Return the name of the opcode, assigned at Enum creation. + """ + return self._name_ + + def __eq__(self, other): + """ + Allows comparison between OpcodeMacroBase instances and bytes objects. + + Raises: + - NotImplementedError: if the comparison is not between an OpcodeMacroBase + or a bytes object. + """ + if isinstance(other, OpcodeMacroBase): + return self._name_ == other._name_ + if isinstance(other, bytes): + return bytes(self) == other + raise NotImplementedError(f"Unsupported type for comparison f{type(other)}") + + +class Opcode(OpcodeMacroBase): """ - Represents a single Opcode instruction in the EVM, with extra - metadata useful to parametrize tests. + Represents a single Opcode instruction in the EVM, with extra metadata useful to parametrize + tests. Parameters ---------- @@ -41,7 +96,6 @@ class Opcode(bytes): pushed_stack_items: int min_stack_height: int data_portion_length: int - _name_: str def __new__( cls, @@ -56,8 +110,8 @@ def __new__( Creates a new opcode instance. """ if type(opcode_or_byte) is Opcode: - # Required because Enum class calls the base class with the - # instantiated object as parameter. + # Required because Enum class calls the base class with the instantiated object as + # parameter. return opcode_or_byte elif isinstance(opcode_or_byte, int): obj = super().__new__(cls, [opcode_or_byte]) @@ -66,44 +120,45 @@ def __new__( obj.min_stack_height = min_stack_height obj.data_portion_length = data_portion_length return obj + raise TypeError("Opcode constructor '__new__' didn't return an instance!") - def __call__(self, *args_t: Union[int, bytes, str, "Opcode"]) -> bytes: + def __call__(self, *args_t: Union[int, bytes, str, "Opcode", FixedSizeBytes]) -> bytes: """ - Makes all opcode instances callable to return formatted bytecode, - which constitutes a data portion, that is located after the opcode - byte, and pre-opcode bytecode, which is normally used to set up the - stack. + Makes all opcode instances callable to return formatted bytecode, which constitutes a data + portion, that is located after the opcode byte, and pre-opcode bytecode, which is normally + used to set up the stack. + + This useful to automatically format, e.g., push opcodes and their data sections as + `Opcodes.PUSH1(0x00)`. - This useful to automatically format, e.g., push opcodes and their - data sections as `Opcodes.PUSH1(0x00)`. + Data sign is automatically detected but for this reason the range of the input must be: + `[-2^(data_portion_bits-1), 2^(data_portion_bits)]` where: `data_portion_bits == + data_portion_length * 8` - Data sign is automatically detected but for this reason the range - of the input must be: - `[-2^(data_portion_bits-1), 2^(data_portion_bits)]` - where: - `data_portion_bits == data_portion_length * 8` + For the stack, the arguments are set up in the opposite order they are given, so the first + argument is the last item pushed to the stack. - For the stack, the arguments are set up in the opposite order they are - given, so the first argument is the last item pushed to the stack. + The resulting stack arrangement does not take into account opcode stack element + consumption, so the stack height is not guaranteed to be correct and the user must take + this into consideration. - The resulting stack arrangement does not take into account opcode stack - element consumption, so the stack height is not guaranteed to be - correct and the user must take this into consideration. + Integers can also be used as stack elements, in which case they are automatically converted + to PUSH operations, and negative numbers always use a PUSH32 operation. - Integers can also be used as stack elements, in which case they are - automatically converted to PUSH operations, and negative numbers always - use a PUSH32 operation. + `FixedSizeBytes` can also be used as stack elements, which includes `Address` and `Hash` + types, for each of which a PUSH operation is automatically generated, `PUSH20` and `PUSH32` + respectively. Hex-strings will automatically be converted to bytes. """ - args: List[Union[int, bytes, str, "Opcode"]] = list(args_t) + args: List[Union[int, bytes, str, "Opcode", FixedSizeBytes]] = list(args_t) pre_opcode_bytecode = bytes() data_portion = bytes() if self.data_portion_length > 0: - # For opcodes with a data portion, the first argument is the data - # and the rest of the arguments form the stack. + # For opcodes with a data portion, the first argument is the data and the rest of the + # arguments form the stack. if len(args) == 0: raise ValueError("Opcode with data portion requires at least one argument") data = args.pop(0) @@ -127,29 +182,35 @@ def __call__(self, *args_t: Union[int, bytes, str, "Opcode"]) -> bytes: # The rest of the arguments conform the stack. while len(args) > 0: data = args.pop() - if isinstance(data, bytes) or isinstance(data, str): + if isinstance(data, int) or isinstance(data, FixedSizeBytes): + # We are going to push a constant to the stack. + data_size = 0 + if isinstance(data, int): + signed = data < 0 + data_size = _get_int_size(data) + if data_size > 32: + raise ValueError("Opcode stack data must be less than 32 bytes") + elif data_size == 0: + # Pushing 0 is done with the PUSH1 opcode for compatibility reasons. + data_size = 1 + data = data.to_bytes( + length=data_size, + byteorder="big", + signed=signed, + ) + elif isinstance(data, FixedSizeBytes): + data_size = data.byte_length + + assert isinstance(data, bytes) + assert data_size > 0 + pre_opcode_bytecode += _push_opcodes_byte_list[data_size] + pre_opcode_bytecode += data + elif isinstance(data, bytes) or isinstance(data, str): if isinstance(data, str): if data.startswith("0x"): data = data[2:] data = bytes.fromhex(data) pre_opcode_bytecode += data - elif isinstance(data, int): - # We are going to push a constant to the stack. - signed = data < 0 - data_size = _get_int_size(data) - if data_size > 32: - raise ValueError("Opcode stack data must be less than 32 bytes") - elif data_size == 0: - # Pushing 0 is done with the PUSH1 opcode for compatibility - # reasons. - data_size = 1 - - pre_opcode_bytecode += _push_opcodes_byte_list[data_size] - pre_opcode_bytecode += data.to_bytes( - length=data_size, - byteorder="big", - signed=signed, - ) else: raise TypeError("Opcode stack data must be either an int or a bytes/hex string") @@ -158,8 +219,7 @@ def __call__(self, *args_t: Union[int, bytes, str, "Opcode"]) -> bytes: def __len__(self) -> int: """ - Returns the total bytecode length of the opcode, taking into account - its data portion. + Returns the total bytecode length of the opcode, taking into account its data portion. """ return self.data_portion_length + 1 @@ -167,13 +227,28 @@ def int(self) -> int: """ Returns the integer representation of the opcode. """ - return int.from_bytes(bytes=self, byteorder="big") + return int.from_bytes(self, byteorder="big") - def __str__(self) -> str: + +class Macro(OpcodeMacroBase): + """ + Represents opcode macro replacement, basically holds bytes + """ + + def __new__( + cls, + macro_or_bytes: Union[bytes, "Macro"], + ): """ - Return the name of the opcode, assigned at Enum creation. + Creates a new opcode macro instance. """ - return self._name_ + if type(macro_or_bytes) is Macro: + # Required because Enum class calls the base class with the instantiated object as + # parameter. + return macro_or_bytes + else: + instance = super().__new__(cls, macro_or_bytes) + return instance OpcodeCallArg = Union[int, bytes, Opcode] @@ -185,178 +260,4703 @@ class Opcodes(Opcode, Enum): Contains deprecated and not yet implemented opcodes. - This enum is !! NOT !! meant to be iterated over by the tests. Instead, - create a list with cherry-picked opcodes from this Enum within the test - if iteration is needed. + This enum is !! NOT !! meant to be iterated over by the tests. Instead, create a list with + cherry-picked opcodes from this Enum within the test if iteration is needed. Do !! NOT !! remove or modify existing opcodes from this list. """ STOP = Opcode(0x00) + """ + STOP() + ---- + + Description + ---- + Stop execution + + Inputs + ---- + - None + + Outputs + ---- + - None + + Fork + ---- + Frontier + + Gas + ---- + 0 + + Source: [evm.codes/#00](https://www.evm.codes/#00) + """ + ADD = Opcode(0x01, popped_stack_items=2, pushed_stack_items=1) + """ + ADD(a, b) = c + ---- + + Description + ---- + Addition operation + + Inputs + ---- + - a: first integer value to add + - b: second integer value to add + + Outputs + ---- + - c: integer result of the addition modulo 2**256 + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#01](https://www.evm.codes/#01) + """ + MUL = Opcode(0x02, popped_stack_items=2, pushed_stack_items=1) + """ + MUL(a, b) = c + ---- + + Description + ---- + Multiplication operation + + Inputs + ---- + - a: first integer value to multiply + - b: second integer value to multiply + + Outputs + ---- + - c: integer result of the multiplication modulo 2**256 + + Fork + ---- + Frontier + + Gas + ---- + 5 + + Source: [evm.codes/#02](https://www.evm.codes/#02) + """ + SUB = Opcode(0x03, popped_stack_items=2, pushed_stack_items=1) + """ + SUB(a, b) = c + ---- + + Description + ---- + Subtraction operation + + Inputs + ---- + - a: first integer value + - b: second integer value + + Outputs + ---- + - c: integer result of the subtraction modulo 2**256 + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#03](https://www.evm.codes/#03) + """ + DIV = Opcode(0x04, popped_stack_items=2, pushed_stack_items=1) + """ + DIV(a, b) = c + ---- + + Description + ---- + Division operation + + Inputs + ---- + - a: numerator + - b: denominator (must be non-zero) + + Outputs + ---- + - c: integer result of the division + + Fork + ---- + Frontier + + Gas + ---- + 5 + + Source: [evm.codes/#04](https://www.evm.codes/#04) + """ + SDIV = Opcode(0x05, popped_stack_items=2, pushed_stack_items=1) + """ + SDIV(a, b) = c + ---- + + Description + ---- + Signed division operation + + Inputs + ---- + - a: signed numerator + - b: signed denominator + + Outputs + ---- + - c: signed integer result of the division. If the denominator is 0, the result will be 0 + ---- + + Fork + ---- + Frontier + + Gas + ---- + 5 + + Source: [evm.codes/#05](https://www.evm.codes/#05) + """ + MOD = Opcode(0x06, popped_stack_items=2, pushed_stack_items=1) + """ + MOD(a, b) = c + ---- + + Description + ---- + Modulo operation + + Inputs + ---- + - a: integer numerator + - b: integer denominator + + Outputs + ---- + - a % b: integer result of the integer modulo. If the denominator is 0, the result will be 0 + + Fork + ---- + Frontier + + Gas + ---- + 5 + + Source: [evm.codes/#06](https://www.evm.codes/#06) + """ + SMOD = Opcode(0x07, popped_stack_items=2, pushed_stack_items=1) + """ + SMOD(a, b) = c + ---- + + Description + ---- + Signed modulo remainder operation + + Inputs + ---- + - a: integer numerator + - b: integer denominator + + Outputs + ---- + - a % b: integer result of the signed integer modulo. If the denominator is 0, the result will + be 0 + + Fork + ---- + Frontier + + Gas + ---- + 5 + + Source: [evm.codes/#07](https://www.evm.codes/#07) + """ + ADDMOD = Opcode(0x08, popped_stack_items=3, pushed_stack_items=1) + """ + ADDMOD(a, b, c) = d + ---- + + Description + ---- + Modular addition operation with overflow check + + Inputs + ---- + - a: first integer value + - b: second integer value + - c: integer denominator + + Outputs + ---- + - (a + b) % N: integer result of the addition followed by a modulo. If the denominator is 0, + the result will be 0 + + Fork + ---- + Frontier + + Gas + ---- + 8 + + Source: [evm.codes/#08](https://www.evm.codes/#08) + """ + MULMOD = Opcode(0x09, popped_stack_items=3, pushed_stack_items=1) + """ + MULMOD(a, b, N) = d + ---- + + Description + ---- + Modulo multiplication operation + + Inputs + ---- + - a: first integer value to multiply + - b: second integer value to multiply + - N: integer denominator + + Outputs + ---- + - (a * b) % N: integer result of the multiplication followed by a modulo. If the denominator + is 0, the result will be 0 + + Fork + ---- + Frontier + + Gas + ---- + 8 + + Source: [evm.codes/#09](https://www.evm.codes/#09) + """ + EXP = Opcode(0x0A, popped_stack_items=2, pushed_stack_items=1) + """ + EXP(a, exponent) = a ** exponent + ---- + + Description + ---- + Exponential operation + + Inputs + ---- + - a: integer base + - exponent: integer exponent + + Outputs + ---- + - a ** exponent: integer result of the exponential operation modulo 2**256 + + Fork + ---- + Frontier + + Gas + ---- + - static_gas = 10 + - dynamic_gas = 50 * exponent_byte_size + + Source: [evm.codes/#0A](https://www.evm.codes/#0A) + """ + SIGNEXTEND = Opcode(0x0B, popped_stack_items=2, pushed_stack_items=1) + """ + SIGNEXTEND(b, x) = y + ---- + + Description + ---- + Sign extension operation + + Inputs + ---- + - b: size in byte - 1 of the integer to sign extend + - x: integer value to sign extend + + Outputs + ---- + - y: integer result of the sign extend + + Fork + ---- + Frontier + + Gas + ---- + 5 + + Source: [evm.codes/#0B](https://www.evm.codes/#0B) + """ LT = Opcode(0x10, popped_stack_items=2, pushed_stack_items=1) + """ + LT(a, b) = a < b + ---- + + Description + ---- + Less-than comparison + + Inputs + ---- + - a: left side integer value + - b: right side integer value + + Outputs + ---- + - a < b: 1 if the left side is smaller, 0 otherwise + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#10](https://www.evm.codes/#10) + """ + GT = Opcode(0x11, popped_stack_items=2, pushed_stack_items=1) + """ + GT(a, b) = a > b + ---- + + Description + ---- + Greater-than comparison + + Inputs + ---- + - a: left side integer + - b: right side integer + + Outputs + ---- + - a > b: 1 if the left side is bigger, 0 otherwise + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#11](https://www.evm.codes/#11) + """ + SLT = Opcode(0x12, popped_stack_items=2, pushed_stack_items=1) + """ + SLT(a, b) = a < b + ---- + + Description + ---- + Signed less-than comparison + + Inputs + ---- + - a: left side signed integer + - b: right side signed integer + + Outputs + ---- + - a < b: 1 if the left side is smaller, 0 otherwise + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#12](https://www.evm.codes/#12) + """ + SGT = Opcode(0x13, popped_stack_items=2, pushed_stack_items=1) + """ + SGT(a, b) = a > b + ---- + + Description + ---- + Signed greater-than comparison + + Inputs + ---- + - a: left side signed integer + - b: right side signed integer + + Outputs + ---- + - a > b: 1 if the left side is bigger, 0 otherwise + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#13](https://www.evm.codes/#13) + """ + EQ = Opcode(0x14, popped_stack_items=2, pushed_stack_items=1) + """ + EQ(a, b) = a == b + ---- + + Description + ---- + Equality comparison + + Inputs + ---- + - a: left side integer + - b: right side integer + + Outputs + ---- + - a == b: 1 if the left side is equal to the right side, 0 otherwise + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#14](https://www.evm.codes/#14) + """ + ISZERO = Opcode(0x15, popped_stack_items=1, pushed_stack_items=1) + """ + ISZERO(a) = a == 0 + ---- + + Description + ---- + Is-zero comparison + + Inputs + ---- + - a: integer + + Outputs + ---- + - a == 0: 1 if a is 0, 0 otherwise + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#15](https://www.evm.codes/#15) + """ + AND = Opcode(0x16, popped_stack_items=2, pushed_stack_items=1) + """ + AND(a, b) = a & b + ---- + + Description + ---- + Bitwise AND operation + + Inputs + ---- + - a: first binary value + - b: second binary value + + Outputs + ---- + - a & b: the bitwise AND result + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#16](https://www.evm.codes/#16) + """ + OR = Opcode(0x17, popped_stack_items=2, pushed_stack_items=1) + """ + OR(a, b) = a | b + ---- + + Description + ---- + Bitwise OR operation + + Inputs + ---- + - a: first binary value + - b: second binary value + + Outputs + ---- + - a | b: the bitwise OR result + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#17](https://www.evm.codes/#17) + """ + XOR = Opcode(0x18, popped_stack_items=2, pushed_stack_items=1) + """ + XOR(a, b) = a ^ b + ---- + + Description + ---- + Bitwise XOR operation + + Inputs + ---- + - a: first binary value + - b: second binary value + + Outputs + ---- + - a ^ b: the bitwise XOR result + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#18](https://www.evm.codes/#18) + """ + NOT = Opcode(0x19, popped_stack_items=1, pushed_stack_items=1) + """ + NOT(a) = ~a + ---- + + Description + ---- + Bitwise NOT operation + + Inputs + ---- + - a: binary value + + Outputs + ---- + - ~a: the bitwise NOT result + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#19](https://www.evm.codes/#19) + """ + BYTE = Opcode(0x1A, popped_stack_items=2, pushed_stack_items=1) - SHL = Opcode(0x1B, popped_stack_items=2, pushed_stack_items=1) - SHR = Opcode(0x1C, popped_stack_items=2, pushed_stack_items=1) - SAR = Opcode(0x1D, popped_stack_items=2, pushed_stack_items=1) + """ + BYTE(i, x) = y + ---- - SHA3 = Opcode(0x20, popped_stack_items=2, pushed_stack_items=1) + Description + ---- + Extract a byte from the given position in the value + + Inputs + ---- + - i: byte offset starting from the most significant byte + - x: 32-byte value + + Outputs + ---- + - y: the indicated byte at the least significant position. If the byte offset is out of range, + the result is 0 + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#1A](https://www.evm.codes/#1A) + """ + + SHL = Opcode(0x1B, popped_stack_items=2, pushed_stack_items=1) + """ + SHL(shift, value) = value << shift + ---- + + Description + ---- + Shift left operation + + Inputs + ---- + - shift: number of bits to shift to the left + - value: 32 bytes to shift + + Outputs + ---- + - value << shift: the shifted value. If shift is bigger than 255, returns 0 + + Fork + ---- + Constantinople + + Gas + ---- + 3 + + Source: [evm.codes/#1B](https://www.evm.codes/#1B) + """ + + SHR = Opcode(0x1C, popped_stack_items=2, pushed_stack_items=1) + """ + SHR(shift, value) = value >> shift + ---- + + Description + ---- + Logical shift right operation + + Inputs + ---- + - shift: number of bits to shift to the right. + - value: 32 bytes to shift + + Outputs + ---- + - value >> shift: the shifted value. If shift is bigger than 255, returns 0 + + Fork + ---- + Constantinople + + Gas + ---- + 3 + + Source: [evm.codes/#1C](https://www.evm.codes/#1C) + """ + + SAR = Opcode(0x1D, popped_stack_items=2, pushed_stack_items=1) + """ + SAR(shift, value) = value >> shift + ---- + + Description + ---- + Arithmetic shift right operation + + Inputs + ---- + - shift: number of bits to shift to the right + - value: integer to shift + + Outputs + ---- + - value >> shift: the shifted value + + Fork + ---- + Constantinople + + Gas + ---- + 3 + + Source: [evm.codes/#1D](https://www.evm.codes/#1D) + """ + + SHA3 = Opcode(0x20, popped_stack_items=2, pushed_stack_items=1) + """ + SHA3(start, length) = hash + ---- + + Description + ---- + Compute Keccak-256 hash + + Inputs + ---- + - offset: byte offset in the memory + - size: byte size to read in the memory + + Outputs + ---- + - hash: Keccak-256 hash of the given data in memory + + Fork + ---- + Frontier + + Gas + ---- + - minimum_word_size = (size + 31) / 32 + - static_gas = 30 + - dynamic_gas = 6 * minimum_word_size + memory_expansion_cost + + Source: [evm.codes/#20](https://www.evm.codes/#20) + """ + + ADDRESS = Opcode(0x30, pushed_stack_items=1) + """ + ADDRESS() = address + ---- + + Description + ---- + Get address of currently executing account + + Inputs + ---- + - None + + Outputs + ---- + - address: the 20-byte address of the current account + + Fork + ---- + Frontier + + Gas + ---- + 2 + + Source: [evm.codes/#30](https://www.evm.codes/#30) + """ + + BALANCE = Opcode(0x31, popped_stack_items=1, pushed_stack_items=1) + """ + BALANCE(address) = balance + ---- + + Description + ---- + Get the balance of the specified account + + Inputs + ---- + - address: 20-byte address of the account to check + + Outputs + ---- + - balance: balance of the given account in wei. Returns 0 if the account doesn't exist + + Fork + ---- + Frontier + + Gas + ---- + - static_gas = 0 + - dynamic_gas = 100 if warm_address, 2600 if cold_address + + Source: [evm.codes/#31](https://www.evm.codes/#31) + """ + + ORIGIN = Opcode(0x32, pushed_stack_items=1) + """ + ORIGIN() = address + ---- + + Description + ---- + Get execution origination address + + Inputs + ---- + - None + + Outputs + ---- + - address: the 20-byte address of the sender of the transaction. It can only be an account + without code + + Fork + ---- + Frontier + + Gas + ---- + 2 + + Source: [evm.codes/#32](https://www.evm.codes/#32) + """ - ADDRESS = Opcode(0x30, pushed_stack_items=1) - BALANCE = Opcode(0x31, popped_stack_items=1, pushed_stack_items=1) - ORIGIN = Opcode(0x32, pushed_stack_items=1) CALLER = Opcode(0x33, pushed_stack_items=1) + """ + CALLER() = address + ---- + + Description + ---- + Get caller address + + Inputs + ---- + - None + + Outputs + ---- + - address: the 20-byte address of the caller account. This is the account that did the last + call (except delegate call) + + Fork + ---- + Frontier + + Gas + ---- + 2 + + Source: [evm.codes/#33](https://www.evm.codes/#33) + """ + CALLVALUE = Opcode(0x34, pushed_stack_items=1) + """ + CALLVALUE() = value + ---- + + Description + ---- + Get deposited value by the instruction/transaction responsible for this execution + + Inputs + ---- + - None + + Outputs + ---- + - value: the value of the current call in wei + + Fork + ---- + Frontier + + Gas + ---- + 2 + + Source: [evm.codes/#34](https://www.evm.codes/#34) + """ + CALLDATALOAD = Opcode(0x35, popped_stack_items=1, pushed_stack_items=1) + """ + CALLDATALOAD(i) = data[i] + ---- + + Description + ---- + Get input data of current environment + + Inputs + ---- + - i: byte offset in the calldata + + Outputs + ---- + - data[i]: 32-byte value starting from the given offset of the calldata. All bytes after the + end of the calldata are set to 0 + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#35](https://www.evm.codes/#35) + """ + CALLDATASIZE = Opcode(0x36, pushed_stack_items=1) + """ + CALLDATASIZE() = size + ---- + + Description + ---- + Get size of input data in current environment + + Inputs + ---- + - None + + Outputs + ---- + - size: byte size of the calldata + + Fork + ---- + Frontier + + Gas + ---- + 2 + + Source: [evm.codes/#36](https://www.evm.codes/#36) + """ + CALLDATACOPY = Opcode(0x37, popped_stack_items=3) + """ + CALLDATACOPY(destOffset, offset, size) + ---- + + Description + ---- + Copy input data in current environment to memory + + Inputs + ---- + - destOffset: byte offset in the memory where the result will be copied + - offset: byte offset in the calldata to copy + - size: byte size to copy + + Outputs + ---- + - None + + Fork + ---- + Frontier + + Gas + ---- + - minimum_word_size = (size + 31) / 32 + - static_gas = 3 + - dynamic_gas = 3 * minimum_word_size + memory_expansion_cost + + Source: [evm.codes/#37](https://www.evm.codes/#37) + """ + CODESIZE = Opcode(0x38, pushed_stack_items=1) + """ + CODESIZE() = size + ---- + + Description + ---- + Get size of code running in current environment + + Inputs + ---- + - None + + Outputs + ---- + - size: byte size of the code + + Fork + ---- + Frontier + + Gas + ---- + 2 + + Source: [evm.codes/#38](https://www.evm.codes/#38) + """ + CODECOPY = Opcode(0x39, popped_stack_items=3) + """ + CODECOPY(destOffset, offset, size) + ---- + + Description + ---- + Copy code running in current environment to memory + + Inputs + ---- + - destOffset: byte offset in the memory where the result will be copied. + - offset: byte offset in the code to copy. + - size: byte size to copy + + Fork + ---- + Frontier + + Gas + ---- + - minimum_word_size = (size + 31) / 32 + - static_gas = 3 + - dynamic_gas = 3 * minimum_word_size + memory_expansion_cost + + Source: [evm.codes/#39](https://www.evm.codes/#39) + """ + GASPRICE = Opcode(0x3A, pushed_stack_items=1) + """ + GASPRICE() = price + ---- + + Description + ---- + Get price of gas in current environment + + Outputs + ---- + - price: gas price in wei per gas + + Fork + ---- + Frontier + + Gas + ---- + 2 + + Source: [evm.codes/#3A](https://www.evm.codes/#3A) + """ + EXTCODESIZE = Opcode(0x3B, popped_stack_items=1, pushed_stack_items=1) + """ + EXTCODESIZE(account) = size + ---- + + Description + ---- + Get size of an account's code + + Inputs + ---- + - address: 20-byte address of the contract to query + + Outputs + ---- + - size: byte size of the code + + Fork + ---- + Frontier + + Gas + ---- + - static_gas = 0 + - dynamic_gas = 100 if warm_address, 2600 if cold_address + + Source: [evm.codes/#3B](https://www.evm.codes/#3B) + """ + EXTCODECOPY = Opcode(0x3C, popped_stack_items=4) + """ + EXTCODECOPY(addr, destOffset, offset, size) + ---- + + Description + ---- + Copy an account's code to memory + + Inputs + ---- + - address: 20-byte address of the contract to query + - destOffset: byte offset in the memory where the result will be copied + - offset: byte offset in the code to copy + - size: byte size to copy + + Outputs + ---- + - None + + Fork + ---- + Frontier + + Gas + ---- + - minimum_word_size = (size + 31) / 32 + - static_gas = 0 + - dynamic_gas = 3 * minimum_word_size + memory_expansion_cost + address_access_cost + + Source: [evm.codes/#3C](https://www.evm.codes/#3C) + """ + RETURNDATASIZE = Opcode(0x3D, pushed_stack_items=1) + """ + RETURNDATASIZE() = size + ---- + + Description + ---- + Get size of output data from the previous call from the current environment + + Outputs + ---- + - size: byte size of the return data from the last executed sub context + + Fork + ---- + Byzantium + + Gas + ---- + 2 + + Source: [evm.codes/#3D](https://www.evm.codes/#3D) + """ + RETURNDATACOPY = Opcode(0x3E, popped_stack_items=3) + """ + RETURNDATACOPY(destOffset, offset, size) + ---- + + Description + ---- + Copy output data from the previous call to memory + + Inputs + ---- + - destOffset: byte offset in the memory where the result will be copied + - offset: byte offset in the return data from the last executed sub context to copy + - size: byte size to copy + + Fork + ---- + Byzantium + + Gas + ---- + - minimum_word_size = (size + 31) / 32 + - static_gas = 3 + - dynamic_gas = 3 * minimum_word_size + memory_expansion_cost + + Source: [evm.codes/#3E](https://www.evm.codes/#3E) + """ + EXTCODEHASH = Opcode(0x3F, popped_stack_items=1, pushed_stack_items=1) + """ + EXTCODEHASH(address) = hash + ---- + + Description + ---- + Get hash of an account's code + + Inputs + ---- + - address: 20-byte address of the account + + Outputs + ---- + - hash: hash of the chosen account's code, the empty hash (0xc5d24601...) if the account has no + code, or 0 if the account does not exist or has been destroyed + + Fork + ---- + Constantinople + + Gas + ---- + - static_gas = 0 + - dynamic_gas = 100 if warm_address, 2600 if cold_address + + Source: [evm.codes/#3F](https://www.evm.codes/#3F) + """ BLOCKHASH = Opcode(0x40, popped_stack_items=1, pushed_stack_items=1) + """ + BLOCKHASH(block_number) = hash + ---- + + Description + ---- + Get the hash of one of the 256 most recent complete blocks + + Inputs + ---- + - blockNumber: block number to get the hash from. Valid range is the last 256 blocks (not + including the current one). Current block number can be queried with NUMBER + + Outputs + ---- + - hash: hash of the chosen block, or 0 if the block number is not in the valid range + + Fork + ---- + Frontier + + Gas + ---- + 20 + + Source: [evm.codes/#40](https://www.evm.codes/#40) + """ + COINBASE = Opcode(0x41, pushed_stack_items=1) + """ + COINBASE() = address + ---- + + Description + ---- + Get the block's beneficiary address + + Inputs + ---- + - None + + Outputs + ---- + - address: miner's 20-byte address + + Fork + ---- + Frontier + + Gas + ---- + 2 + + Source: [evm.codes/#41](https://www.evm.codes/#41) + """ + TIMESTAMP = Opcode(0x42, pushed_stack_items=1) + """ + TIMESTAMP() = timestamp + ---- + + Description + ---- + Get the block's timestamp + + Inputs + ---- + - None + + Outputs + ---- + - timestamp: unix timestamp of the current block + + Fork + ---- + Frontier + + Gas + ---- + 2 + + Source: [evm.codes/#42](https://www.evm.codes/#42) + """ + NUMBER = Opcode(0x43, pushed_stack_items=1) + """ + NUMBER() = blockNumber + ---- + + Description + ---- + Get the block's number + + Inputs + ---- + - None + + Outputs + ---- + - blockNumber: current block number + + Fork + ---- + Frontier + + Gas + ---- + 2 + + Source: [evm.codes/#43](https://www.evm.codes/#43) + """ + PREVRANDAO = Opcode(0x44, pushed_stack_items=1) + """ + PREVRANDAO() = prevRandao + ---- + + Description + ---- + Get the previous block's RANDAO mix + + Inputs + ---- + - None + + Outputs + ---- + - prevRandao: previous block's RANDAO mix + + Fork + ---- + Merge + + Gas + ---- + 2 + + Source: [evm.codes/#44](https://www.evm.codes/#44) + """ + GASLIMIT = Opcode(0x45, pushed_stack_items=1) + """ + GASLIMIT() = gasLimit + ---- + + Description + ---- + Get the block's gas limit + + Inputs + ---- + - None + + Outputs + ---- + - gasLimit: gas limit + + Fork + ---- + Frontier + + Gas + ---- + 2 + + Source: [evm.codes/#45](https://www.evm.codes/#45) + """ + CHAINID = Opcode(0x46, pushed_stack_items=1) + """ + CHAINID() = chainId + ---- + + Description + ---- + Get the chain ID + + Inputs + ---- + - None + + Outputs + ---- + - chainId: chain id of the network + + Fork + ---- + Istanbul + + Gas + ---- + 2 + + Source: [evm.codes/#46](https://www.evm.codes/#46) + """ + SELFBALANCE = Opcode(0x47, pushed_stack_items=1) + """ + SELFBALANCE() = balance + ---- + + Description + ---- + Get balance of currently executing account + + Inputs + ---- + - None + + Outputs + ---- + - balance: balance of the current account in wei + + Fork + ---- + Istanbul + + Gas + ---- + 5 + + Source: [evm.codes/#47](https://www.evm.codes/#47) + """ + BASEFEE = Opcode(0x48, pushed_stack_items=1) + """ + BASEFEE() = baseFee + ---- + + Description + ---- + Get the base fee + + Outputs + ---- + - baseFee: base fee in wei + + Fork + ---- + London + + Gas + ---- + 2 + + Source: [evm.codes/#48](https://www.evm.codes/#48) + """ + BLOBHASH = Opcode(0x49, popped_stack_items=1, pushed_stack_items=1) + """ + BLOBHASH(index) = versionedHash + ---- + + Description + ---- + Returns the versioned hash of a single blob contained in the type-3 transaction + + Inputs + ---- + - index: index of the blob + + Outputs + ---- + - versionedHash: versioned hash of the blob + + Fork + ---- + Cancun + + Gas + ---- + 3 + + Source: [eips.ethereum.org/EIPS/eip-4844](https://eips.ethereum.org/EIPS/eip-4844) + """ + BLOBBASEFEE = Opcode(0x4A, popped_stack_items=0, pushed_stack_items=1) + """ + BLOBBASEFEE() = fee + ---- + + Description + ---- + Returns the value of the blob base fee of the block it is executing in + + Inputs + ---- + - None + + Outputs + ---- + - baseFeePerBlobGas: base fee for the blob gas in wei + + Fork + ---- + Cancun + + Gas + ---- + 2 + + Source: [eips.ethereum.org/EIPS/eip-7516](https://eips.ethereum.org/EIPS/eip-7516) + """ POP = Opcode(0x50, popped_stack_items=1) + """ + POP() + ---- + + Description + ---- + Remove item from stack + + Inputs + ---- + - None + + Outputs + ---- + - None + + Fork + ---- + Frontier + + Gas + ---- + 2 + + Source: [evm.codes/#50](https://www.evm.codes/#50) + """ + MLOAD = Opcode(0x51, popped_stack_items=1, pushed_stack_items=1) + """ + MLOAD(offset) = value + ---- + + Description + ---- + Load word from memory + + Inputs + ---- + - offset: offset in the memory in bytes + + Outputs + ---- + - value: the 32 bytes in memory starting at that offset. If it goes beyond its current size + (see MSIZE), writes 0s + + Fork + ---- + Frontier + + Gas + ---- + - static_gas = 3 + - dynamic_gas = memory_expansion_cost + + Source: [evm.codes/#51](https://www.evm.codes/#51) + """ + MSTORE = Opcode(0x52, popped_stack_items=2) + """ + MSTORE(offset, value) + ---- + + Description + ---- + Save word to memory + + Inputs + ---- + - offset: offset in the memory in bytes + - value: 32-byte value to write in the memory + + Outputs + ---- + - None + + Fork + ---- + Frontier + + Gas + ---- + - static_gas = 3 + - dynamic_gas = memory_expansion_cost + + Source: [evm.codes/#52](https://www.evm.codes/#52) + """ + MSTORE8 = Opcode(0x53, popped_stack_items=2) + """ + MSTORE8(offset, value) + ---- + + Description + ---- + Save byte to memory + + Inputs + ---- + - offset: offset in the memory in bytes + - value: 1-byte value to write in the memory (the least significant byte of the 32-byte stack + value) + + Fork + ---- + Frontier + + Gas + ---- + - static_gas = 3 + - dynamic_gas = memory_expansion_cost + + Source: [evm.codes/#53](https://www.evm.codes/#53) + """ + SLOAD = Opcode(0x54, popped_stack_items=1, pushed_stack_items=1) + """ + SLOAD(key) = value + ---- + + Description + ---- + Load word from storage + + Inputs + ---- + - key: 32-byte key in storage + + Outputs + ---- + - value: 32-byte value corresponding to that key. 0 if that key was never written before + + Fork + ---- + Frontier + + Gas + ---- + - static_gas = 0 + - dynamic_gas = 100 if warm_address, 2600 if cold_address + + Source: [evm.codes/#54](https://www.evm.codes/#54) + """ + SSTORE = Opcode(0x55, popped_stack_items=2) + """ + SSTORE(key, value) + ---- + + Description + ---- + Save word to storage + + Inputs + ---- + - key: 32-byte key in storage + - value: 32-byte value to store + + Outputs + ---- + - None + + Fork + ---- + Frontier + + Gas + ---- + ``` + static_gas = 0 + + if value == current_value + if key is warm + base_dynamic_gas = 100 + else + base_dynamic_gas = 100 + else if current_value == original_value + if original_value == 0 + base_dynamic_gas = 20000 + else + base_dynamic_gas = 2900 + else + base_dynamic_gas = 100 + + if key is cold: + base_dynamic_gas += 2100 + ``` + + Source: [evm.codes/#55](https://www.evm.codes/#55) + """ + JUMP = Opcode(0x56, popped_stack_items=1) + """ + JUMP(counter) + ---- + + Description + ---- + Alter the program counter + + Inputs + ---- + - counter: byte offset in the deployed code where execution will continue from. Must be a + JUMPDEST instruction + + Outputs + ---- + - None + + Fork + ---- + Frontier + + Gas + ---- + 8 + + Source: [evm.codes/#56](https://www.evm.codes/#56) + """ + JUMPI = Opcode(0x57, popped_stack_items=2) + """ + JUMPI(counter, b) + ---- + + Description + ---- + Conditionally alter the program counter + + Inputs + ---- + - counter: byte offset in the deployed code where execution will continue from. Must be a + JUMPDEST instruction + - b: the program counter will be altered with the new value only if this value is different + from 0. Otherwise, the program counter is simply incremented and the next instruction will + be executed + + Fork + ---- + Frontier + + Gas + ---- + 10 + + Source: [evm.codes/#57](https://www.evm.codes/#57) + """ + PC = Opcode(0x58, pushed_stack_items=1) + """ + PC() = counter + ---- + + Description + ---- + Get the value of the program counter prior to the increment corresponding to this instruction + + Inputs + ---- + - None + + Outputs + ---- + - counter: PC of this instruction in the current program. + + Fork + ---- + Frontier + + Gas + ---- + 2 + + Source: [evm.codes/#58](https://www.evm.codes/#58) + """ + MSIZE = Opcode(0x59, pushed_stack_items=1) + """ + MSIZE() = size + ---- + + Description + ---- + Get the size of active memory in bytes + + Outputs + ---- + - size: current memory size in bytes (higher offset accessed until now + 1) + + Fork + ---- + Frontier + + Gas + ---- + 2 + + Source: [evm.codes/#59](https://www.evm.codes/#59) + """ + GAS = Opcode(0x5A, pushed_stack_items=1) + """ + GAS() = gas_remaining + ---- + + Description + ---- + Get the amount of available gas, including the corresponding reduction for the cost of this + instruction + + Inputs + ---- + - None + + Outputs + ---- + - gas: remaining gas (after this instruction) + + Fork + ---- + Frontier + + Gas + ---- + 2 + + Source: [evm.codes/#5A](https://www.evm.codes/#5A) + """ + JUMPDEST = Opcode(0x5B) + """ + JUMPDEST() + ---- + + Description + ---- + Mark a valid destination for jumps + + Inputs + ---- + - None + + Outputs + ---- + - None + + Fork + ---- + Frontier + + Gas + ---- + 1 + + Source: [evm.codes/#5B](https://www.evm.codes/#5B) + """ + TLOAD = Opcode(0x5C, popped_stack_items=1, pushed_stack_items=1) + """ + TLOAD(key) = value + ---- + + Description + ---- + Load word from transient storage + + Inputs + ---- + - key: 32-byte key in transient storage + + Outputs + ---- + - value: 32-byte value corresponding to that key. 0 if that key was never written + + Fork + ---- + Cancun + + Gas + ---- + 100 + + Source: [eips.ethereum.org/EIPS/eip-1153](https://eips.ethereum.org/EIPS/eip-1153) + """ + TSTORE = Opcode(0x5D, popped_stack_items=2) + """ + TSTORE(key, value) + ---- + + Description + ---- + Save word to transient storage + + Inputs + ---- + - key: 32-byte key in transient storage + - value: 32-byte value to store + + Fork + ---- + Cancun + + Gas + ---- + 100 + + Source: [eips.ethereum.org/EIPS/eip-1153](https://eips.ethereum.org/EIPS/eip-1153) + """ + MCOPY = Opcode(0x5E, popped_stack_items=3) - RETF = Opcode(0x49) + """ + MCOPY(dst, src, length) + ---- + + Description + ---- + Copies areas in memory + + Inputs + ---- + - dst: byte offset in the memory where the result will be copied + - src: byte offset in the calldata to copy + - length: byte size to copy + + Outputs + ---- + - None + + Fork + ---- + Cancun + + Gas + ---- + - minimum_word_size = (length + 31) / 32 + - static_gas = 3 + - dynamic_gas = 3 * minimum_word_size + memory_expansion_cost + + Source: [eips.ethereum.org/EIPS/eip-5656](https://eips.ethereum.org/EIPS/eip-5656) + """ PUSH0 = Opcode(0x5F, pushed_stack_items=1) + """ + PUSH0() = value + ---- + + Description + ---- + Place value 0 on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, equal to 0 + + Fork + ---- + Shanghai + + Gas + ---- + 2 + + Source: [evm.codes/#5F](https://www.evm.codes/#5F) + """ + PUSH1 = Opcode(0x60, pushed_stack_items=1, data_portion_length=1) + """ + PUSH1() = value + ---- + + Description + ---- + Place 1 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#60](https://www.evm.codes/#60) + """ + PUSH2 = Opcode(0x61, pushed_stack_items=1, data_portion_length=2) + """ + PUSH2() = value + ---- + + Description + ---- + Place 2 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#61](https://www.evm.codes/#61) + """ + PUSH3 = Opcode(0x62, pushed_stack_items=1, data_portion_length=3) + """ + PUSH3() = value + ---- + + Description + ---- + Place 3 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#62](https://www.evm.codes/#62) + """ + PUSH4 = Opcode(0x63, pushed_stack_items=1, data_portion_length=4) + """ + PUSH4() = value + ---- + + Description + ---- + Place 4 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#63](https://www.evm.codes/#63) + """ + PUSH5 = Opcode(0x64, pushed_stack_items=1, data_portion_length=5) + """ + PUSH5() = value + ---- + + Description + ---- + Place 5 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#64](https://www.evm.codes/#64) + """ + PUSH6 = Opcode(0x65, pushed_stack_items=1, data_portion_length=6) + """ + PUSH6() = value + ---- + + Description + ---- + Place 6 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#65](https://www.evm.codes/#65) + """ + PUSH7 = Opcode(0x66, pushed_stack_items=1, data_portion_length=7) + """ + PUSH7() = value + ---- + + Description + ---- + Place 7 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#66](https://www.evm.codes/#66) + """ + PUSH8 = Opcode(0x67, pushed_stack_items=1, data_portion_length=8) + """ + PUSH8() = value + ---- + + Description + ---- + Place 8 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#67](https://www.evm.codes/#67) + """ + PUSH9 = Opcode(0x68, pushed_stack_items=1, data_portion_length=9) + """ + PUSH9() = value + ---- + + Description + ---- + Place 9 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#68](https://www.evm.codes/#68) + """ + PUSH10 = Opcode(0x69, pushed_stack_items=1, data_portion_length=10) + """ + PUSH10() = value + ---- + + Description + ---- + Place 10 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#69](https://www.evm.codes/#69) + """ + PUSH11 = Opcode(0x6A, pushed_stack_items=1, data_portion_length=11) + """ + PUSH11() = value + ---- + + Description + ---- + Place 11 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#6A](https://www.evm.codes/#6A) + """ + PUSH12 = Opcode(0x6B, pushed_stack_items=1, data_portion_length=12) + """ + PUSH12() = value + ---- + + Description + ---- + Place 12 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#6B](https://www.evm.codes/#6B) + """ + PUSH13 = Opcode(0x6C, pushed_stack_items=1, data_portion_length=13) + """ + PUSH13() = value + ---- + + Description + ---- + Place 13 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#6C](https://www.evm.codes/#6C) + """ + PUSH14 = Opcode(0x6D, pushed_stack_items=1, data_portion_length=14) + """ + PUSH14() = value + ---- + + Description + ---- + Place 14 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + + Gas + ---- + 3 + + Source: [evm.codes/#6D](https://www.evm.codes/#6D) + """ + PUSH15 = Opcode(0x6E, pushed_stack_items=1, data_portion_length=15) + """ + PUSH15() = value + ---- + + Description + ---- + Place 15 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#6E](https://www.evm.codes/#6E) + """ + PUSH16 = Opcode(0x6F, pushed_stack_items=1, data_portion_length=16) + """ + PUSH16() = value + ---- + + Description + ---- + Place 16 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#6F](https://www.evm.codes/#6F) + """ + PUSH17 = Opcode(0x70, pushed_stack_items=1, data_portion_length=17) + """ + PUSH17() = value + ---- + + Description + ---- + Place 17 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#70](https://www.evm.codes/#70) + """ + PUSH18 = Opcode(0x71, pushed_stack_items=1, data_portion_length=18) + """ + PUSH18() = value + ---- + + Description + ---- + Place 18 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#71](https://www.evm.codes/#71) + """ + PUSH19 = Opcode(0x72, pushed_stack_items=1, data_portion_length=19) + """ + PUSH19() = value + ---- + + Description + ---- + Place 19 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#72](https://www.evm.codes/#72) + """ + PUSH20 = Opcode(0x73, pushed_stack_items=1, data_portion_length=20) + """ + PUSH20() = value + ---- + + Description + ---- + Place 20 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#73](https://www.evm.codes/#73) + """ + PUSH21 = Opcode(0x74, pushed_stack_items=1, data_portion_length=21) + """ + PUSH21() = value + ---- + + Description + ---- + Place 21 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#74](https://www.evm.codes/#74) + """ + PUSH22 = Opcode(0x75, pushed_stack_items=1, data_portion_length=22) + """ + PUSH22() = value + ---- + + Description + ---- + Place 22 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#75](https://www.evm.codes/#75) + """ + PUSH23 = Opcode(0x76, pushed_stack_items=1, data_portion_length=23) + """ + PUSH23() = value + ---- + + Description + ---- + Place 23 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#76](https://www.evm.codes/#76) + """ + PUSH24 = Opcode(0x77, pushed_stack_items=1, data_portion_length=24) + """ + PUSH24() = value + ---- + + Description + ---- + Place 24 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#77](https://www.evm.codes/#77) + """ + PUSH25 = Opcode(0x78, pushed_stack_items=1, data_portion_length=25) + """ + PUSH25() = value + ---- + + Description + ---- + Place 25 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#78](https://www.evm.codes/#78) + """ + PUSH26 = Opcode(0x79, pushed_stack_items=1, data_portion_length=26) + """ + PUSH26() = value + ---- + + Description + ---- + Place 26 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#79](https://www.evm.codes/#79) + """ + PUSH27 = Opcode(0x7A, pushed_stack_items=1, data_portion_length=27) + """ + PUSH27() = value + ---- + + Description + ---- + Place 27 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#7A](https://www.evm.codes/#7A) + """ + PUSH28 = Opcode(0x7B, pushed_stack_items=1, data_portion_length=28) + """ + PUSH28() = value + ---- + + Description + ---- + Place 28 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#7B](https://www.evm.codes/#7B) + """ + PUSH29 = Opcode(0x7C, pushed_stack_items=1, data_portion_length=29) + """ + PUSH29() = value + ---- + + Description + ---- + Place 29 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#7C](https://www.evm.codes/#7C) + """ + PUSH30 = Opcode(0x7D, pushed_stack_items=1, data_portion_length=30) + """ + PUSH30() = value + ---- + + Description + ---- + Place 30 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#7D](https://www.evm.codes/#7D) + """ + PUSH31 = Opcode(0x7E, pushed_stack_items=1, data_portion_length=31) + """ + PUSH31() = value + ---- + + Description + ---- + Place 31 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#7E](https://www.evm.codes/#7E) + """ + PUSH32 = Opcode(0x7F, pushed_stack_items=1, data_portion_length=32) + """ + PUSH32() = value + ---- + + Description + ---- + Place 32 byte item on stack + + Inputs + ---- + - None + + Outputs + ---- + - value: pushed value, aligned to the right (put in the lowest significant bytes) + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#7F](https://www.evm.codes/#7F) + """ DUP1 = Opcode(0x80, pushed_stack_items=1, min_stack_height=1) + """ + DUP1(value) = value, value + ---- + + Description + ---- + Duplicate 1st stack item + + Inputs + ---- + - value: value to duplicate + + Outputs + ---- + - value: duplicated value + - value: original value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#80](https://www.evm.codes/#80) + """ + DUP2 = Opcode(0x81, pushed_stack_items=1, min_stack_height=2) + """ + DUP2(v1, v2) = v2, v1, v2 + ---- + + Description + ---- + Duplicate 2nd stack item + + Inputs + ---- + - v1: ignored value + - v2: value to duplicate + + Outputs + ---- + - v2: duplicated value + - v1: ignored value + - v2: original value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#81](https://www.evm.codes/#81) + """ + DUP3 = Opcode(0x82, pushed_stack_items=1, min_stack_height=3) + """ + DUP3(v1, v2, v3) = v3, v1, v2, v3 + ---- + + Description + ---- + Duplicate 3rd stack item + + Inputs + ---- + - v1: ignored value + - v2: ignored value + - v3: value to duplicate + + Outputs + ---- + - v3: duplicated value + - v1: ignored value + - v2: ignored value + - v3: original value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#82](https://www.evm.codes/#82) + """ + DUP4 = Opcode(0x83, pushed_stack_items=1, min_stack_height=4) + """ + DUP4(v1, v2, v3, v4) = v4, v1, v2, v3, v4 + ---- + + Description + ---- + Duplicate 4th stack item + + Inputs + ---- + - v1: ignored value + - v2: ignored value + - v3: ignored value + - v4: value to duplicate + + Outputs + ---- + - v4: duplicated value + - v1: ignored value + - v2: ignored value + - v3: ignored value + - v4: original value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#83](https://www.evm.codes/#83) + """ + DUP5 = Opcode(0x84, pushed_stack_items=1, min_stack_height=5) + """ + DUP5(v1, v2, v3, v4, v5) = v5, v1, v2, v3, v4, v5 + ---- + + Description + ---- + Duplicate 5th stack item + + Inputs + ---- + - v1: ignored value + - v2: ignored value + - v3: ignored value + - v4: ignored value + - v5: value to duplicate + + Outputs + ---- + - v5: duplicated value + - v1: ignored value + - v2: ignored value + - v3: ignored value + - v4: ignored value + - v5: original value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#84](https://www.evm.codes/#84) + """ + DUP6 = Opcode(0x85, pushed_stack_items=1, min_stack_height=6) + """ + DUP6(v1, v2, ..., v5, v6) = v6, v1, v2, ..., v5, v6 + ---- + + Description + ---- + Duplicate 6th stack item + + Inputs + ---- + - v1: ignored value + - v2: ignored value + - ... + - v5: ignored value + - v6: value to duplicate + + Outputs + ---- + - v6: duplicated value + - v1: ignored value + - v2: ignored value + - ... + - v5: ignored value + - v6: original value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#85](https://www.evm.codes/#85) + """ + DUP7 = Opcode(0x86, pushed_stack_items=1, min_stack_height=7) + """ + DUP7(v1, v2, ..., v6, v7) = v7, v1, v2, ..., v6, v7 + ---- + + Description + ---- + Duplicate 7th stack item + + Inputs + ---- + - v1: ignored value + - v2: ignored value + - ... + - v6: ignored value + - v7: value to duplicate + + Outputs + ---- + - v7: duplicated value + - v1: ignored value + - v2: ignored value + - ... + - v6: ignored value + - v7: original value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#86](https://www.evm.codes/#86) + """ + DUP8 = Opcode(0x87, pushed_stack_items=1, min_stack_height=8) + """ + DUP8(v1, v2, ..., v7, v8) = v8, v1, v2, ..., v7, v8 + ---- + + Description + ---- + Duplicate 8th stack item + + Inputs + ---- + - v1: ignored value + - v2: ignored value + - ... + - v7: ignored value + - v8: value to duplicate + + Outputs + ---- + - v8: duplicated value + - v1: ignored value + - v2: ignored value + - ... + - v7: ignored value + - v8: original value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#87](https://www.evm.codes/#87) + """ + DUP9 = Opcode(0x88, pushed_stack_items=1, min_stack_height=9) + """ + DUP9(v1, v2, ..., v8, v9) = v9, v1, v2, ..., v8, v9 + ---- + + Description + ---- + Duplicate 9th stack item + + Inputs + ---- + - v1: ignored value + - v2: ignored value + - ... + - v8: ignored value + - v9: value to duplicate + + Outputs + ---- + - v9: duplicated value + - v1: ignored value + - v2: ignored value + - ... + - v8: ignored value + - v9: original value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#88](https://www.evm.codes/#88) + """ DUP10 = Opcode(0x89, pushed_stack_items=1, min_stack_height=10) + """ + DUP10(v1, v2, ..., v9, v10) = v10, v1, v2, ..., v9, v10 + ---- + + Description + ---- + Duplicate 10th stack item + + Inputs + ---- + - v1: ignored value + - v2: ignored value + - ... + - v9: ignored value + - v10: value to duplicate + + Outputs + ---- + - v10: duplicated value + - v1: ignored value + - v2: ignored value + - ... + - v9: ignored value + - v10: original value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#89](https://www.evm.codes/#89) + """ + DUP11 = Opcode(0x8A, pushed_stack_items=1, min_stack_height=11) + """ + DUP11(v1, v2, ..., v10, v11) = v11, v1, v2, ..., v10, v11 + ---- + + Description + ---- + Duplicate 11th stack item + + Inputs + ---- + - v1: ignored value + - v2: ignored value + - ... + - v10: ignored value + - v11: value to duplicate + + Outputs + ---- + - v11: duplicated value + - v1: ignored value + - v2: ignored value + - ... + - v10: ignored value + - v11: original value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#8A](https://www.evm.codes/#8A) + """ + DUP12 = Opcode(0x8B, pushed_stack_items=1, min_stack_height=12) + """ + DUP12(v1, v2, ..., v11, v12) = v12, v1, v2, ..., v11, v12 + ---- + + Description + ---- + Duplicate 12th stack item + + Inputs + ---- + - v1: ignored value + - v2: ignored value + - ... + - v11: ignored value + - v12: value to duplicate + + Outputs + ---- + - v12: duplicated value + - v1: ignored value + - v2: ignored value + - ... + - v11: ignored value + - v12: original value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#8B](https://www.evm.codes/#8B) + """ + DUP13 = Opcode(0x8C, pushed_stack_items=1, min_stack_height=13) + """ + DUP13(v1, v2, ..., v12, v13) = v13, v1, v2, ..., v12, v13 + ---- + + Description + ---- + Duplicate 13th stack item + + Inputs + ---- + - v1: ignored value + - v2: ignored value + - ... + - v12: ignored value + - v13: value to duplicate + + Outputs + ---- + - v13: duplicated value + - v1: ignored value + - v2: ignored value + - ... + - v12: ignored value + - v13: original value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#8C](https://www.evm.codes/#8C) + """ + DUP14 = Opcode(0x8D, pushed_stack_items=1, min_stack_height=14) + """ + DUP14(v1, v2, ..., v13, v14) = v14, v1, v2, ..., v13, v14 + ---- + + Description + ---- + Duplicate 14th stack item + + Inputs + ---- + - v1: ignored value + - v2: ignored value + - ... + - v13: ignored value + - v14: value to duplicate + + Outputs + ---- + - v14: duplicated value + - v1: ignored value + - v2: ignored value + - ... + - v13: ignored value + - v14: original value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#8D](https://www.evm.codes/#8D) + """ + DUP15 = Opcode(0x8E, pushed_stack_items=1, min_stack_height=15) + """ + DUP15(v1, v2, ..., v14, v15) = v15, v1, v2, ..., v14, v15 + ---- + + Description + ---- + Duplicate 15th stack item + + Inputs + ---- + - v1: ignored value + - v2: ignored value + - ... + - v14: ignored value + - v15: value to duplicate + + Outputs + ---- + - v15: duplicated value + - v1: ignored value + - v2: ignored value + - ... + - v14: ignored value + - v15: original value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#8E](https://www.evm.codes/#8E) + """ + DUP16 = Opcode(0x8F, pushed_stack_items=1, min_stack_height=16) + """ + DUP16(v1, v2, ..., v15, v16) = v16, v1, v2, ..., v15, v16 + ---- + + Description + ---- + Duplicate 16th stack item + + Inputs + ---- + - v1: ignored value + - v2: ignored value + - ... + - v15: ignored value + - v16: value to duplicate + + Outputs + ---- + - v16: duplicated value + - v1: ignored value + - v2: ignored value + - ... + - v15: ignored value + - v16: original value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#8F](https://www.evm.codes/#8F) + """ SWAP1 = Opcode(0x90, min_stack_height=2) + """ + SWAP1(v1, v2) = v2, v1 + ---- + + Description + ---- + Exchange the top stack item with the second stack item. + + Inputs + ---- + - v1: value to swap + - v2: value to swap + + Outputs + ---- + - v1: swapped value + - v2: swapped value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#90](https://www.evm.codes/#90) + """ + SWAP2 = Opcode(0x91, min_stack_height=3) + """ + SWAP2(v1, v2, v3) = v3, v2, v1 + ---- + + Description + ---- + Exchange 1st and 3rd stack items + + Inputs + ---- + - v1: value to swap + - v2: ignored value + - v3: value to swap + + Outputs + ---- + - v3: swapped value + - v2: ignored value + - v1: swapped value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#91](https://www.evm.codes/#91) + """ + SWAP3 = Opcode(0x92, min_stack_height=4) + """ + SWAP3(v1, v2, v3, v4) = v4, v2, v3, v1 + ---- + + Description + ---- + Exchange 1st and 4th stack items + + Inputs + ---- + - v1: value to swap + - v2: ignored value + - v3: ignored value + - v4: value to swap + + Outputs + ---- + - v4: swapped value + - v2: ignored value + - v3: ignored value + - v1: swapped value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#92](https://www.evm.codes/#92) + """ + SWAP4 = Opcode(0x93, min_stack_height=5) + """ + SWAP4(v1, v2, ..., v4, v5) = v5, v2, ..., v4, v1 + ---- + + Description + ---- + Exchange 1st and 5th stack items + + Inputs + ---- + - v1: value to swap + - v2: ignored value + - ... + - v4: ignored value + - v5: value to swap + + Outputs + ---- + - v5: swapped value + - v2: ignored value + - ... + - v4: ignored value + - v1: swapped value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#93](https://www.evm.codes/#93) + """ + SWAP5 = Opcode(0x94, min_stack_height=6) + """ + SWAP5(v1, v2, ..., v5, v6) = v6, v2, ..., v5, v1 + ---- + + Description + ---- + Exchange 1st and 6th stack items + + Inputs + ---- + - v1: value to swap + - v2: ignored value + - ... + - v5: ignored value + - v6: value to swap + + Outputs + ---- + - v6: swapped value + - v2: ignored value + - ... + - v5: ignored value + - v1: swapped value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#94](https://www.evm.codes/#94) + """ + SWAP6 = Opcode(0x95, min_stack_height=7) + """ + SWAP6(v1, v2, ..., v6, v7) = v7, v2, ..., v6, v1 + ---- + + Description + ---- + Exchange 1st and 7th stack items + + Inputs + ---- + - v1: value to swap + - v2: ignored value + - ... + - v6: ignored value + - v7: value to swap + + Outputs + ---- + - v7: swapped value + - v2: ignored value + - ... + - v6: ignored value + - v1: swapped value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#95](https://www.evm.codes/#95) + """ + SWAP7 = Opcode(0x96, min_stack_height=8) + """ + SWAP7(v1, v2, ..., v7, v8) = v8, v2, ..., v7, v1 + ---- + + Description + ---- + Exchange 1st and 8th stack items + + Inputs + ---- + - v1: value to swap + - v2: ignored value + - ... + - v7: ignored value + - v8: value to swap + + Outputs + ---- + - v8: swapped value + - v2: ignored value + - ... + - v7: ignored value + - v1: swapped value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#96](https://www.evm.codes/#96) + """ + SWAP8 = Opcode(0x97, min_stack_height=9) + """ + SWAP8(v1, v2, ..., v8, v9) = v9, v2, ..., v8, v1 + ---- + + Description + ---- + Exchange 1st and 9th stack items + + Inputs + ---- + - v1: value to swap + - v2: ignored value + - ... + - v8: ignored value + - v9: value to swap + + Outputs + ---- + - v9: swapped value + - v2: ignored value + - ... + - v8: ignored value + - v1: swapped value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#97](https://www.evm.codes/#97) + """ + SWAP9 = Opcode(0x98, min_stack_height=10) + """ + SWAP9(v1, v2, ..., v9, v10) = v10, v2, ..., v9, v1 + ---- + + Description + ---- + Exchange 1st and 10th stack items + + Inputs + ---- + - v1: value to swap + - v2: ignored value + - ... + - v9: ignored value + - v10: value to swap + + Outputs + ---- + - v10: swapped value + - v2: ignored value + - ... + - v9: ignored value + - v1: swapped value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#98](https://www.evm.codes/#98) + """ + SWAP10 = Opcode(0x99, min_stack_height=11) + """ + SWAP10(v1, v2, ..., v10, v11) = v11, v2, ..., v10, v1 + ---- + + Description + ---- + Exchange 1st and 11th stack items + + Inputs + ---- + - v1: value to swap + - v2: ignored value + - ... + - v10: ignored value + - v11: value to swap + + Outputs + ---- + - v11: swapped value + - v2: ignored value + - ... + - v10: ignored value + - v1: swapped value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#99](https://www.evm.codes/#99) + """ + SWAP11 = Opcode(0x9A, min_stack_height=12) + """ + SWAP11(v1, v2, ..., v11, v12) = v12, v2, ..., v11, v1 + ---- + + Description + ---- + Exchange 1st and 12th stack items + + Inputs + ---- + - v1: value to swap + - v2: ignored value + - ... + - v11: ignored value + - v12: value to swap + + Outputs + ---- + - v12: swapped value + - v2: ignored value + - ... + - v11: ignored value + - v1: swapped value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#9A](https://www.evm.codes/#9A) + """ + SWAP12 = Opcode(0x9B, min_stack_height=13) + """ + SWAP12(v1, v2, ..., v12, v13) = v13, v2, ..., v12, v1 + ---- + + Description + ---- + Exchange 1st and 13th stack items + + Inputs + ---- + - v1: value to swap + - v2: ignored value + - ... + - v12: ignored value + - v13: value to swap + + Outputs + ---- + - v13: swapped value + - v2: ignored value + - ... + - v12: ignored value + - v1: swapped value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#9B](https://www.evm.codes/#9B) + """ + SWAP13 = Opcode(0x9C, min_stack_height=14) + """ + SWAP13(v1, v2, ..., v13, v14) = v14, v2, ..., v13, v1 + ---- + + Description + ---- + Exchange 1st and 14th stack items + + Inputs + ---- + - v1: value to swap + - v2: ignored value + - ... + - v13: ignored value + - v14: value to swap + + Outputs + ---- + - v14: swapped value + - v2: ignored value + - ... + - v13: ignored value + - v1: swapped value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#9C](https://www.evm.codes/#9C) + """ + SWAP14 = Opcode(0x9D, min_stack_height=15) + """ + SWAP14(v1, v2, ..., v14, v15) = v15, v2, ..., v14, v1 + ---- + + Description + ---- + Exchange 1st and 15th stack items + + Inputs + ---- + - v1: value to swap + - v2: ignored value + - ... + - v14: ignored value + - v15: value to swap + + Outputs + ---- + - v15: swapped value + - v2: ignored value + - ... + - v14: ignored value + - v1: swapped value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#9D](https://www.evm.codes/#9D) + """ + SWAP15 = Opcode(0x9E, min_stack_height=16) + """ + SWAP15(v1, v2, ..., v15, v16) = v16, v2, ..., v15, v1 + ---- + + Description + ---- + Exchange 1st and 16th stack items + + Inputs + ---- + - v1: value to swap + - v2: ignored value + - ... + - v15: ignored value + - v16: value to swap + + Outputs + ---- + - v16: swapped value + - v2: ignored value + - ... + - v15: ignored value + - v1: swapped value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#9E](https://www.evm.codes/#9E) + """ + SWAP16 = Opcode(0x9F, min_stack_height=17) + """ + SWAP16(v1, v2, ..., v16, v17) = v17, v2, ..., v16, v1 + ---- + + Description + ---- + Exchange 1st and 17th stack items + + Inputs + ---- + - v1: value to swap + - v2: ignored value + - ... + - v16: ignored value + - v17: value to swap + + Outputs + ---- + - v17: swapped value + - v2: ignored value + - ... + - v16: ignored value + - v1: swapped value + + Fork + ---- + Frontier + + Gas + ---- + 3 + + Source: [evm.codes/#9F](https://www.evm.codes/#9F) + """ LOG0 = Opcode(0xA0, popped_stack_items=2) + """ + LOG0(offset, size) + ---- + + Description + ---- + Append log record with no topics + + Inputs + ---- + - offset: byte offset in the memory in bytes + - size: byte size to copy + + Outputs + ---- + - None + + Fork + ---- + Frontier + + Gas + ---- + - static_gas = 375 + - dynamic_gas = 375 * topic_count + 8 * size + memory_expansion_cost + + Source: [evm.codes/#A0](https://www.evm.codes/#A0) + """ + LOG1 = Opcode(0xA1, popped_stack_items=3) + """ + LOG1(offset, size, topic1) + ---- + + Description + ---- + Append log record with one topic + + Inputs + ---- + - offset: byte offset in the memory in bytes + - size: byte size to copy + - topic1: 32-byte value + + Outputs + ---- + - None + + Fork + ---- + Frontier + + Gas + ---- + - static_gas = 375 + - dynamic_gas = 375 * topic_count + 8 * size + memory_expansion_cost + + Source: [evm.codes/#A1](https://www.evm.codes/#A1) + """ + LOG2 = Opcode(0xA2, popped_stack_items=4) + """ + LOG2(offset, size, topic1, topic2) + ---- + + Description + ---- + Append log record with two topics + + Inputs + ---- + - offset: byte offset in the memory in bytes + - size: byte size to copy + - topic1: 32-byte value + - topic2: 32-byte value + + Outputs + ---- + - None + + Fork + ---- + Frontier + + Gas + ---- + - static_gas = 375 + - dynamic_gas = 375 * topic_count + 8 * size + memory_expansion_cost + + Source: [evm.codes/#A2](https://www.evm.codes/#A2) + """ + LOG3 = Opcode(0xA3, popped_stack_items=5) + """ + LOG3(offset, size, topic1, topic2, topic3) + ---- + + Description + ---- + Append log record with three topics + + Inputs + ---- + - offset: byte offset in the memory in bytes + - size: byte size to copy + - topic1: 32-byte value + - topic2: 32-byte value + - topic3: 32-byte value + + Outputs + ---- + - None + + Fork + ---- + Frontier + + Gas + ---- + - static_gas = 375 + - dynamic_gas = 375 * topic_count + 8 * size + memory_expansion_cost + + Source: [evm.codes/#A3](https://www.evm.codes/#A3) + """ + LOG4 = Opcode(0xA4, popped_stack_items=6) + """ + LOG4(offset, size, topic1, topic2, topic3, topic4) + ---- + + Description + ---- + Append log record with four topics + + Inputs + ---- + - offset: byte offset in the memory in bytes + - size: byte size to copy + - topic1: 32-byte value + - topic2: 32-byte value + - topic3: 32-byte value + - topic4: 32-byte value + + Outputs + ---- + - None + + Fork + ---- + Frontier + + Gas + ---- + - static_gas = 375 + - dynamic_gas = 375 * topic_count + 8 * size + memory_expansion_cost + + Source: [evm.codes/#A4](https://www.evm.codes/#A4) + """ RJUMP = Opcode(0xE0, data_portion_length=2) + """ + !!! Note: This opcode is under development + + RJUMP() + ---- + + Description + ---- + + Inputs + ---- + + Outputs + ---- + + Fork + ---- + EOF Fork + + Gas + ---- + + Source: [eips.ethereum.org/EIPS/eip-4200](https://eips.ethereum.org/EIPS/eip-4200) + """ + RJUMPI = Opcode(0xE1, popped_stack_items=1, data_portion_length=2) + """ + !!! Note: This opcode is under development + + RJUMPI() + ---- + + Description + ---- + + Inputs + ---- + + Outputs + ---- + + Fork + ---- + EOF Fork + + Gas + ---- + + Source: [eips.ethereum.org/EIPS/eip-4200](https://eips.ethereum.org/EIPS/eip-4200) + """ + RJUMPV = Opcode(0xE2) + """ + !!! Note: This opcode is under development + + RJUMPV() + ---- + + Description + ---- + + Inputs + ---- + + Outputs + ---- + + Fork + ---- + EOF Fork + + Gas + ---- + + Source: [eips.ethereum.org/EIPS/eip-4200](https://eips.ethereum.org/EIPS/eip-4200) + """ + + RETF = Opcode(0xE4) + """ + !!! Note: This opcode is under development + + RETF() + ---- + + Description + ---- + + Inputs + ---- + + Outputs + ---- + + Fork + ---- + EOF Fork + + Gas + ---- + 3 + + Source: [eips.ethereum.org/EIPS/eip-4750](https://eips.ethereum.org/EIPS/eip-4750) + """ CREATE = Opcode(0xF0, popped_stack_items=3, pushed_stack_items=1) + """ + CREATE(value, offset, length) = address + ---- + + Description + ---- + Create a new contract with the given code + + Inputs + ---- + - value: value in wei to send to the new account + - offset: byte offset in the memory in bytes, the initialization code for the new account + - size: byte size to copy (size of the initialization code) + + Outputs + ---- + - address: the address of the deployed contract, 0 if the deployment failed + + Fork + ---- + Frontier + + Gas + ---- + ``` + minimum_word_size = (size + 31) / 32 + init_code_cost = 2 * minimum_word_size + code_deposit_cost = 200 * deployed_code_size + + static_gas = 32000 + dynamic_gas = init_code_cost + memory_expansion_cost + deployment_code_execution_cost + + code_deposit_cost + ``` + + Source: [evm.codes/#F0](https://www.evm.codes/#F0) + """ + CALL = Opcode(0xF1, popped_stack_items=7, pushed_stack_items=1) + """ + CALL(gas, address, value, argsOffset, argsSize, retOffset, retSize) = success + ---- + + Description + ---- + Message-call into an account + + Inputs + ---- + - gas: amount of gas to send to the sub context to execute. The gas that is not used by the sub + context is returned to this one + - address: the account which context to execute + - value: value in wei to send to the account + - argsOffset: byte offset in the memory in bytes, the calldata of the sub context + - argsSize: byte size to copy (size of the calldata) + - retOffset: byte offset in the memory in bytes, where to store the return data of the sub + context + - retSize: byte size to copy (size of the return data) + + Outputs + ---- + - success: return 0 if the sub context reverted, 1 otherwise + + Fork + ---- + Frontier + + Gas + ---- + ``` + static_gas = 0 + dynamic_gas = memory_expansion_cost + code_execution_cost + address_access_cost + + positive_value_cost + value_to_empty_account_cost + ``` + + Source: [evm.codes/#F1](https://www.evm.codes/#F1) + """ + CALLCODE = Opcode(0xF2, popped_stack_items=7, pushed_stack_items=1) + """ + CALLCODE(gas, address, value, argsOffset, argsSize, retOffset, retSize) = success + ---- + + Description + ---- + Message-call into this account with an alternative account's code. Executes code starting at + the address to which the call is made. + + Inputs + ---- + - gas: amount of gas to send to the sub context to execute. The gas that is not used by the sub + context is returned to this one + - address: the account which code to execute + - value: value in wei to send to the account + - argsOffset: byte offset in the memory in bytes, the calldata of the sub context + - argsSize: byte size to copy (size of the calldata) + - retOffset: byte offset in the memory in bytes, where to store the return data of the sub + context + - retSize: byte size to copy (size of the return data) + + Outputs + ---- + - success: return 0 if the sub context reverted, 1 otherwise + + Fork + ---- + Frontier + + Gas + ---- + ``` + static_gas = 0 + dynamic_gas = memory_expansion_cost + code_execution_cost + address_access_cost + + positive_value_cost + ``` + + Source: [evm.codes/#F2](https://www.evm.codes/#F2) + """ + RETURN = Opcode(0xF3, popped_stack_items=2) + """ + RETURN(offset, size) + ---- + + Description + ---- + Halt execution returning output data + + Inputs + ---- + - offset: byte offset in the memory in bytes, to copy what will be the return data of this + context + - size: byte size to copy (size of the return data) + + Outputs + ---- + - None + + Fork + ---- + Frontier + + Gas + ---- + - static_gas = 0 + - dynamic_gas = memory_expansion_cost + + Source: [evm.codes/#F3](https://www.evm.codes/#F3) + """ + DELEGATECALL = Opcode(0xF4, popped_stack_items=6, pushed_stack_items=1) + """ + DELEGATECALL(gas, address, argsOffset, argsSize, retOffset, retSize) = success + ---- + + Description + ---- + Message-call into this account with an alternative account's code, but persisting the current + values for sender and value + + Inputs + ---- + - gas: amount of gas to send to the sub context to execute. The gas that is not used by the sub + context is returned to this one + - address: the account which code to execute + - argsOffset: byte offset in the memory in bytes, the calldata of the sub context + - argsSize: byte size to copy (size of the calldata) + - retOffset: byte offset in the memory in bytes, where to store the return data of the sub + context + - retSize: byte size to copy (size of the return data) + + Outputs + ---- + - success: return 0 if the sub context reverted, 1 otherwise + + Fork + ---- + Byzantium + + Gas + ---- + - static_gas = 0 + - dynamic_gas = memory_expansion_cost + code_execution_cost + address_access_cost + + Source: [evm.codes/#F4](https://www.evm.codes/#F4) + """ + CREATE2 = Opcode(0xF5, popped_stack_items=4, pushed_stack_items=1) + """ + CREATE2(value, offset, size, salt) = address + ---- + + Description + ---- + Creates a new contract + + Inputs + ---- + - value: value in wei to send to the new account + - offset: byte offset in the memory in bytes, the initialization code of the new account + - size: byte size to copy (size of the initialization code) + - salt: 32-byte value used to create the new account at a deterministic address + + Outputs + ---- + - address: the address of the deployed contract, 0 if the deployment failed + + Fork + ---- + Constantinople + + Gas + ---- + ``` + minimum_word_size = (size + 31) / 32 + init_code_cost = 2 * minimum_word_size + hash_cost = 6 * minimum_word_size + code_deposit_cost = 200 * deployed_code_size + + static_gas = 32000 + dynamic_gas = init_code_cost + hash_cost + memory_expansion_cost + + deployment_code_execution_cost + code_deposit_cost + ``` + + Source: [evm.codes/#F5](https://www.evm.codes/#F5) + """ STATICCALL = Opcode(0xFA, popped_stack_items=6, pushed_stack_items=1) + """ + STATICCALL(gas, address, argsOffset, argsSize, retOffset, retSize) = success + ---- + + Description + ---- + Static message-call into an account + + Inputs + ---- + - gas: amount of gas to send to the sub context to execute. The gas that is not used by the sub + context is returned to this one + - address: the account which context to execute + - argsOffset: byte offset in the memory in bytes, the calldata of the sub context + - argsSize: byte size to copy (size of the calldata) + - retOffset: byte offset in the memory in bytes, where to store the return data of the sub + context + - retSize: byte size to copy (size of the return data) + + Outputs + ---- + - success: return 0 if the sub context reverted, 1 otherwise + + Fork + ---- + Byzantium + + Gas + ---- + - static_gas = 0 + - dynamic_gas = memory_expansion_cost + code_execution_cost + address_access_cost + + Source: [evm.codes/#FA](https://www.evm.codes/#FA) + """ REVERT = Opcode(0xFD, popped_stack_items=2) + """ + REVERT(offset, size) + ---- + + Description + ---- + Halt execution reverting state changes but returning data and remaining gas + + Inputs + ---- + - offset: byte offset in the memory in bytes. The return data of the calling context + - size: byte size to copy (size of the return data) + + Fork + ---- + Byzantium + + Gas + ---- + static_gas = 0 + dynamic_gas = memory_expansion_cost + + Source: [evm.codes/#FD](https://www.evm.codes/#FD) + """ + INVALID = Opcode(0xFE) + """ + INVALID() + ---- + + Description + ---- + Designated invalid instruction + + Inputs + ---- + None + + Outputs + ---- + None + + Fork + ---- + Frontier + + Gas + ---- + All the remaining gas in this context is consumed + + Source: [evm.codes/#FE](https://www.evm.codes/#FE) + """ SELFDESTRUCT = Opcode(0xFF, popped_stack_items=1) - SENDALL = Opcode(0xFF, popped_stack_items=1) + """ + SELFDESTRUCT(address) + ---- + + Description + ---- + Halt execution and register the account for later deletion + + Inputs + ---- + - address: account to send the current balance to + + Fork + ---- + Frontier + + Gas + ---- + 5000 + + Source: [evm.codes/#FF](https://www.evm.codes/#FF) + """ + + +class Macros(Macro, Enum): + """ + Enum containing all macros. + """ + + OOG = Macro(Opcodes.SHA3(0, 100000000000)) + """ + OOG(args) + ---- + + Halt execution by consuming all available gas. + + Inputs + ---- + - any input arguments are ignored + + Fork + ---- + Frontier + + Gas + ---- + `SHA3(0, 100000000000)` results in 19073514453125027 gas used and an OOG + exception. + + Note: + If a value > `100000000000` is used as second argument, the resulting geth + trace reports gas `30` and an OOG exception. + `SHA3(0, SUB(0, 1))` causes a gas > u64 exception and an OOG exception. + + Bytecode + ---- + SHA3(0, 100000000000) + """ diff --git a/src/evm_transition_tool/besu.py b/src/evm_transition_tool/besu.py index fef9374372..a775928654 100644 --- a/src/evm_transition_tool/besu.py +++ b/src/evm_transition_tool/besu.py @@ -181,4 +181,4 @@ def is_fork_supported(self, fork: Fork) -> bool: """ Returns True if the fork is supported by the tool """ - return fork.fork() in self.help_string + return fork.transition_tool_name() in self.help_string diff --git a/src/evm_transition_tool/evmone.py b/src/evm_transition_tool/evmone.py index 1b912d9b5e..568c2d48cc 100644 --- a/src/evm_transition_tool/evmone.py +++ b/src/evm_transition_tool/evmone.py @@ -1,27 +1,13 @@ """ Evmone Transition tool interface. """ -import json -import os -import shutil -import subprocess -import tempfile -import textwrap from pathlib import Path from re import compile -from typing import Any, Dict, List, Optional, Tuple +from typing import Optional from ethereum_test_forks import Fork -from .transition_tool import TransitionTool, dump_files_to_directory - - -def write_json_file(data: Dict[str, Any], file_path: str) -> None: - """ - Write a JSON file to the given path. - """ - with open(file_path, "w") as f: - json.dump(data, f, ensure_ascii=False, indent=4) +from .transition_tool import TransitionTool class EvmOneTransitionTool(TransitionTool): @@ -31,6 +17,7 @@ class EvmOneTransitionTool(TransitionTool): default_binary = Path("evmone-t8n") detect_binary_pattern = compile(r"^evmone-t8n\b") + t8n_use_stream = False binary: Path cached_version: Optional[str] = None @@ -44,132 +31,6 @@ def __init__( ): super().__init__(binary=binary, trace=trace) - def evaluate( - self, - *, - alloc: Any, - txs: Any, - env: Any, - fork_name: str, - chain_id: int = 1, - reward: int = 0, - eips: Optional[List[int]] = None, - debug_output_path: str = "", - ) -> Tuple[Dict[str, Any], Dict[str, Any]]: - """ - Executes `evmone-t8n` with the specified arguments. - """ - if eips is not None: - fork_name = "+".join([fork_name] + [str(eip) for eip in eips]) - - temp_dir = tempfile.TemporaryDirectory() - os.mkdir(os.path.join(temp_dir.name, "input")) - os.mkdir(os.path.join(temp_dir.name, "output")) - - input_contents = { - "alloc": alloc, - "env": env, - "txs": txs, - } - - input_paths = { - k: os.path.join(temp_dir.name, "input", f"{k}.json") for k in input_contents.keys() - } - for key, file_path in input_paths.items(): - write_json_file(input_contents[key], file_path) - - output_paths = { - output: os.path.join("output", f"{output}.json") for output in ["alloc", "result"] - } - output_paths["body"] = os.path.join("output", "txs.rlp") - - # Construct args for evmone-t8n binary - args = [ - str(self.binary), - "--state.fork", - fork_name, - "--input.alloc", - input_paths["alloc"], - "--input.env", - input_paths["env"], - "--input.txs", - input_paths["txs"], - "--output.basedir", - temp_dir.name, - "--output.result", - output_paths["result"], - "--output.alloc", - output_paths["alloc"], - "--output.body", - output_paths["body"], - "--state.reward", - str(reward), - "--state.chainid", - str(chain_id), - ] - - if self.trace: - args.append("--trace") - - result = subprocess.run( - args, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - - if debug_output_path: - if os.path.exists(debug_output_path): - shutil.rmtree(debug_output_path) - shutil.copytree(temp_dir.name, debug_output_path) - t8n_output_base_dir = os.path.join(debug_output_path, "t8n.sh.out") - t8n_call = " ".join(args) - for file_path in input_paths.values(): # update input paths - t8n_call = t8n_call.replace( - os.path.dirname(file_path), os.path.join(debug_output_path, "input") - ) - t8n_call = t8n_call.replace( # use a new output path for basedir and outputs - temp_dir.name, - t8n_output_base_dir, - ) - t8n_script = textwrap.dedent( - f"""\ - #!/bin/bash - rm -rf {debug_output_path}/t8n.sh.out # hard-coded to avoid surprises - mkdir -p {debug_output_path}/t8n.sh.out/output - {t8n_call} - """ - ) - dump_files_to_directory( - debug_output_path, - { - "args.py": args, - "returncode.txt": result.returncode, - "stdout.txt": result.stdout.decode(), - "stderr.txt": result.stderr.decode(), - "t8n.sh+x": t8n_script, - }, - ) - - if result.returncode != 0: - raise Exception("failed to evaluate: " + result.stderr.decode()) - - for key, file_path in output_paths.items(): - output_paths[key] = os.path.join(temp_dir.name, file_path) - - output_contents = {} - for key, file_path in output_paths.items(): - if "txs.rlp" in file_path: - continue - with open(file_path, "r+") as file: - output_contents[key] = json.load(file) - - if self.trace: - self.collect_traces(output_contents["result"]["receipts"], temp_dir, debug_output_path) - - temp_dir.cleanup() - - return output_contents["alloc"], output_contents["result"] - def is_fork_supported(self, fork: Fork) -> bool: """ Returns True if the fork is supported by the tool. diff --git a/src/evm_transition_tool/file_utils.py b/src/evm_transition_tool/file_utils.py new file mode 100644 index 0000000000..8f97b49eea --- /dev/null +++ b/src/evm_transition_tool/file_utils.py @@ -0,0 +1,44 @@ +""" +Methods to work with the filesystem and json +""" + +import json +import os +import stat +from json import dump +from typing import Any, Dict + + +def write_json_file(data: Dict[str, Any], file_path: str) -> None: + """ + Write a JSON file to the given path. + """ + with open(file_path, "w") as f: + json.dump(data, f, ensure_ascii=False, indent=4) + + +def dump_files_to_directory(output_path: str, files: Dict[str, Any]) -> None: + """ + Dump the files to the given directory. + """ + os.makedirs(output_path, exist_ok=True) + for file_rel_path_flags, file_contents in files.items(): + file_rel_path, flags = ( + file_rel_path_flags.split("+") + if "+" in file_rel_path_flags + else (file_rel_path_flags, "") + ) + rel_path = os.path.dirname(file_rel_path) + if rel_path: + os.makedirs(os.path.join(output_path, rel_path), exist_ok=True) + file_path = os.path.join(output_path, file_rel_path) + with open(file_path, "w") as f: + if isinstance(file_contents, str): + f.write(file_contents) + else: + dump(file_contents, f, ensure_ascii=True, indent=4) + if flags: + file_mode = os.stat(file_path).st_mode + if "x" in flags: + file_mode |= stat.S_IEXEC + os.chmod(file_path, file_mode) diff --git a/src/evm_transition_tool/geth.py b/src/evm_transition_tool/geth.py index f5ae09de07..0ad7efd32f 100644 --- a/src/evm_transition_tool/geth.py +++ b/src/evm_transition_tool/geth.py @@ -2,12 +2,16 @@ Go-ethereum Transition tool interface. """ +import json +import shutil import subprocess import textwrap from pathlib import Path from re import compile from typing import Optional +import pytest + from ethereum_test_forks import Fork from .transition_tool import FixtureFormats, TransitionTool, dump_files_to_directory @@ -50,7 +54,18 @@ def is_fork_supported(self, fork: Fork) -> bool: If the fork is a transition fork, we want to check the fork it transitions to. """ - return fork.fork() in self.help_string + return fork.transition_tool_name() in self.help_string + + def process_statetest_result(self, result: str): + """ + Process the result of a `evm statetest` to parse as JSON and raise if any test failed. + """ + result_json = json.loads(result) + if not isinstance(result_json, list): + raise Exception(f"Unexpected result from evm statetest: {result_json}") + for test_result in result_json: + if not test_result["pass"]: + pytest.fail(f"Test failed: {test_result['name']}. Error: {test_result['error']}") def verify_fixture( self, fixture_format: FixtureFormats, fixture_path: Path, debug_output_path: Optional[Path] @@ -80,8 +95,12 @@ def verify_fixture( stderr=subprocess.PIPE, ) + if FixtureFormats.is_state_test(fixture_format): + self.process_statetest_result(result.stdout.decode()) + if debug_output_path: - debug_fixture_path = debug_output_path / fixture_path.name + debug_fixture_path = debug_output_path / "fixtures.json" + shutil.copyfile(fixture_path, debug_fixture_path) # Use the local copy of the fixture in the debug directory verify_fixtures_call = " ".join(command[:-1]) + f" {debug_fixture_path}" verify_fixtures_script = textwrap.dedent( diff --git a/src/evm_transition_tool/nimbus.py b/src/evm_transition_tool/nimbus.py index 4762f04f24..bb36070929 100644 --- a/src/evm_transition_tool/nimbus.py +++ b/src/evm_transition_tool/nimbus.py @@ -57,4 +57,4 @@ def is_fork_supported(self, fork: Fork) -> bool: If the fork is a transition fork, we want to check the fork it transitions to. """ - return fork.fork() in self.help_string + return fork.transition_tool_name() in self.help_string diff --git a/src/evm_transition_tool/tests/test_evaluate.py b/src/evm_transition_tool/tests/test_evaluate.py index 171fd5e545..dbd010d23f 100644 --- a/src/evm_transition_tool/tests/test_evaluate.py +++ b/src/evm_transition_tool/tests/test_evaluate.py @@ -7,6 +7,7 @@ import pytest from ethereum_test_forks import Berlin, Fork, Istanbul, London +from ethereum_test_tools.common import Alloc from evm_transition_tool import GethTransitionTool, TransitionTool FIXTURES_ROOT = Path(os.path.join("src", "evm_transition_tool", "tests", "fixtures")) @@ -76,7 +77,7 @@ class TestEnv: env = TestEnv() env.base_fee = base_fee - assert t8n.calc_state_root(alloc=alloc, fork=fork)[1].startswith(hash) + assert Alloc(alloc).state_root().startswith(hash) @pytest.mark.parametrize("evm_tool", [GethTransitionTool]) @@ -119,7 +120,7 @@ def test_evm_t8n(t8n: TransitionTool, test_dir: str) -> None: # noqa: D103 alloc=alloc, txs=txs, env=env_json, - fork_name=Berlin.fork( + fork_name=Berlin.transition_tool_name( block_number=int(env_json["currentNumber"], 0), timestamp=int(env_json["currentTimestamp"], 0), ), diff --git a/src/evm_transition_tool/transition_tool.py b/src/evm_transition_tool/transition_tool.py index ae727a0aa0..4344526536 100644 --- a/src/evm_transition_tool/transition_tool.py +++ b/src/evm_transition_tool/transition_tool.py @@ -5,20 +5,21 @@ import json import os import shutil -import stat import subprocess import tempfile import textwrap from abc import abstractmethod +from dataclasses import dataclass, field from enum import Enum from itertools import groupby -from json import dump from pathlib import Path from re import Pattern from typing import Any, Dict, List, Optional, Tuple, Type from ethereum_test_forks import Fork +from .file_utils import dump_files_to_directory, write_json_file + class UnknownTransitionTool(Exception): """Exception raised if an unknown t8n is encountered""" @@ -35,46 +36,19 @@ def __init__(self, message="The transition tool was not found in the path", bina super().__init__(message) -def dump_files_to_directory(output_path: str, files: Dict[str, Any]) -> None: - """ - Dump the files to the given directory. - """ - os.makedirs(output_path, exist_ok=True) - for file_rel_path_flags, file_contents in files.items(): - file_rel_path, flags = ( - file_rel_path_flags.split("+") - if "+" in file_rel_path_flags - else (file_rel_path_flags, "") - ) - rel_path = os.path.dirname(file_rel_path) - if rel_path: - os.makedirs(os.path.join(output_path, rel_path), exist_ok=True) - file_path = os.path.join(output_path, file_rel_path) - with open(file_path, "w") as f: - if isinstance(file_contents, str): - f.write(file_contents) - else: - dump(file_contents, f, ensure_ascii=True, indent=4) - if flags: - file_mode = os.stat(file_path).st_mode - if "x" in flags: - file_mode |= stat.S_IEXEC - os.chmod(file_path, file_mode) - - class FixtureFormats(Enum): """ Helper class to define fixture formats. """ + UNSET_TEST_FORMAT = "unset_test_format" STATE_TEST = "state_test" - STATE_TEST_HIVE = "state_test_hive" BLOCKCHAIN_TEST = "blockchain_test" BLOCKCHAIN_TEST_HIVE = "blockchain_test_hive" @classmethod def is_state_test(cls, format): # noqa: D102 - return format in (cls.STATE_TEST, cls.STATE_TEST_HIVE) + return format == cls.STATE_TEST @classmethod def is_blockchain_test(cls, format): # noqa: D102 @@ -82,12 +56,33 @@ def is_blockchain_test(cls, format): # noqa: D102 @classmethod def is_hive_format(cls, format): # noqa: D102 - return format in (cls.STATE_TEST_HIVE, cls.BLOCKCHAIN_TEST_HIVE) + return format == cls.BLOCKCHAIN_TEST_HIVE @classmethod def is_standard_format(cls, format): # noqa: D102 return format in (cls.STATE_TEST, cls.BLOCKCHAIN_TEST) + @classmethod + def is_verifiable(cls, format): # noqa: D102 + return format in (cls.STATE_TEST, cls.BLOCKCHAIN_TEST) + + @classmethod + def get_format_description(cls, format): + """ + Returns a description of the fixture format. + + Used to add a description to the generated pytest marks. + """ + if format == cls.UNSET_TEST_FORMAT: + return "Unknown fixture format; it has not been set." + elif format == cls.STATE_TEST: + return "Tests that generate a state test fixture." + elif format == cls.BLOCKCHAIN_TEST: + return "Tests that generate a blockchain test fixture." + elif format == cls.BLOCKCHAIN_TEST_HIVE: + return "Tests that generate a blockchain test fixture in hive format." + raise Exception(f"Unknown fixture format: {format}.") + class TransitionTool: """ @@ -106,6 +101,7 @@ class TransitionTool: statetest_subcommand: Optional[str] = None blocktest_subcommand: Optional[str] = None cached_version: Optional[str] = None + t8n_use_stream: bool = True # Abstract methods that each tool must implement @@ -284,93 +280,163 @@ def collect_traces( traces.append(tx_traces) self.append_traces(traces) - def evaluate( + @dataclass + class TransitionToolData: + """ + Transition tool files and data to pass between methods + """ + + alloc: Any + txs: Any + env: Any + fork_name: str + chain_id: int = field(default=1) + reward: int = field(default=0) + + def _evaluate_filesystem( self, *, - alloc: Any, - txs: Any, - env: Any, - fork_name: str, - chain_id: int = 1, - reward: int = 0, - eips: Optional[List[int]] = None, + t8n_data: TransitionToolData, debug_output_path: str = "", ) -> Tuple[Dict[str, Any], Dict[str, Any]]: """ - Executes `evm t8n` with the specified arguments. - - If a client's `t8n` tool varies from the default behavior, this method - should be overridden. + Executes a transition tool using the filesystem for its inputs and outputs. """ - if eips is not None: - fork_name = "+".join([fork_name] + [str(eip) for eip in eips]) + temp_dir = tempfile.TemporaryDirectory() + os.mkdir(os.path.join(temp_dir.name, "input")) + os.mkdir(os.path.join(temp_dir.name, "output")) - if int(env["currentNumber"], 0) == 0: - reward = -1 + input_contents = { + "alloc": t8n_data.alloc, + "env": t8n_data.env, + "txs": t8n_data.txs, + } - command: list[str] = [str(self.binary)] - if self.t8n_subcommand: - command.append(self.t8n_subcommand) + input_paths = { + k: os.path.join(temp_dir.name, "input", f"{k}.json") for k in input_contents.keys() + } + for key, file_path in input_paths.items(): + write_json_file(input_contents[key], file_path) - args = command + [ - "--input.alloc=stdin", - "--input.txs=stdin", - "--input.env=stdin", - "--output.result=stdout", - "--output.alloc=stdout", - "--output.body=stdout", - f"--state.fork={fork_name}", - f"--state.chainid={chain_id}", - f"--state.reward={reward}", + output_paths = { + output: os.path.join("output", f"{output}.json") for output in ["alloc", "result"] + } + output_paths["body"] = os.path.join("output", "txs.rlp") + + # Construct args for evmone-t8n binary + args = [ + str(self.binary), + "--state.fork", + t8n_data.fork_name, + "--input.alloc", + input_paths["alloc"], + "--input.env", + input_paths["env"], + "--input.txs", + input_paths["txs"], + "--output.basedir", + temp_dir.name, + "--output.result", + output_paths["result"], + "--output.alloc", + output_paths["alloc"], + "--output.body", + output_paths["body"], + "--state.reward", + str(t8n_data.reward), + "--state.chainid", + str(t8n_data.chain_id), ] if self.trace: - temp_dir = tempfile.TemporaryDirectory() args.append("--trace") - args.append(f"--output.basedir={temp_dir.name}") - stdin = { - "alloc": alloc, - "txs": txs, - "env": env, - } - - encoded_input = str.encode(json.dumps(stdin)) result = subprocess.run( args, - input=encoded_input, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) if debug_output_path: - t8n_call = " ".join(args) + if os.path.exists(debug_output_path): + shutil.rmtree(debug_output_path) + shutil.copytree(temp_dir.name, debug_output_path) t8n_output_base_dir = os.path.join(debug_output_path, "t8n.sh.out") - if self.trace: - t8n_call = t8n_call.replace(temp_dir.name, t8n_output_base_dir) + t8n_call = " ".join(args) + for file_path in input_paths.values(): # update input paths + t8n_call = t8n_call.replace( + os.path.dirname(file_path), os.path.join(debug_output_path, "input") + ) + t8n_call = t8n_call.replace( # use a new output path for basedir and outputs + temp_dir.name, + t8n_output_base_dir, + ) t8n_script = textwrap.dedent( f"""\ #!/bin/bash rm -rf {debug_output_path}/t8n.sh.out # hard-coded to avoid surprises - mkdir {debug_output_path}/t8n.sh.out # unused if tracing is not enabled - {t8n_call} < {debug_output_path}/stdin.txt + mkdir -p {debug_output_path}/t8n.sh.out/output + {t8n_call} """ ) dump_files_to_directory( debug_output_path, { "args.py": args, - "input/alloc.json": stdin["alloc"], - "input/env.json": stdin["env"], - "input/txs.json": stdin["txs"], "returncode.txt": result.returncode, - "stdin.txt": stdin, "stdout.txt": result.stdout.decode(), "stderr.txt": result.stderr.decode(), "t8n.sh+x": t8n_script, }, ) + if result.returncode != 0: + raise Exception("failed to evaluate: " + result.stderr.decode()) + + for key, file_path in output_paths.items(): + output_paths[key] = os.path.join(temp_dir.name, file_path) + + output_contents = {} + for key, file_path in output_paths.items(): + if "txs.rlp" in file_path: + continue + with open(file_path, "r+") as file: + output_contents[key] = json.load(file) + + if self.trace: + self.collect_traces(output_contents["result"]["receipts"], temp_dir, debug_output_path) + + temp_dir.cleanup() + + return output_contents["alloc"], output_contents["result"] + + def _evaluate_stream( + self, + *, + t8n_data: TransitionToolData, + debug_output_path: str = "", + ) -> Tuple[Dict[str, Any], Dict[str, Any]]: + """ + Executes a transition tool using stdin and stdout for its inputs and outputs. + """ + temp_dir = tempfile.TemporaryDirectory() + args = self.construct_args_stream(t8n_data, temp_dir) + + stdin = { + "alloc": t8n_data.alloc, + "txs": t8n_data.txs, + "env": t8n_data.env, + } + + result = subprocess.run( + args, + input=str.encode(json.dumps(stdin)), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + + self.dump_debug_stream(debug_output_path, temp_dir, stdin, args, result) + if result.returncode != 0: raise Exception("failed to evaluate: " + result.stderr.decode()) @@ -395,48 +461,107 @@ def evaluate( return output["alloc"], output["result"] - def calc_state_root( - self, *, alloc: Any, fork: Fork, debug_output_path: str = "" - ) -> Tuple[Dict, bytes]: + def construct_args_stream( + self, t8n_data: TransitionToolData, temp_dir: tempfile.TemporaryDirectory + ) -> List[str]: """ - Calculate the state root for the given `alloc`. + Construct arguments for t8n interaction via streams """ - env: Dict[str, Any] = { - "currentCoinbase": "0x0000000000000000000000000000000000000000", - "currentDifficulty": "0x0", - "currentGasLimit": "0x0", - "currentNumber": "0", - "currentTimestamp": "0", - } + command: list[str] = [str(self.binary)] + if self.t8n_subcommand: + command.append(self.t8n_subcommand) - if fork.header_base_fee_required(0, 0): - env["currentBaseFee"] = "7" + args = command + [ + "--input.alloc=stdin", + "--input.txs=stdin", + "--input.env=stdin", + "--output.result=stdout", + "--output.alloc=stdout", + "--output.body=stdout", + f"--state.fork={t8n_data.fork_name}", + f"--state.chainid={t8n_data.chain_id}", + f"--state.reward={t8n_data.reward}", + ] - if fork.header_prev_randao_required(0, 0): - env["currentRandom"] = "0" + if self.trace: + args.append("--trace") + args.append(f"--output.basedir={temp_dir.name}") + return args - if fork.header_withdrawals_required(0, 0): - env["withdrawals"] = [] + def dump_debug_stream( + self, + debug_output_path: str, + temp_dir: tempfile.TemporaryDirectory, + stdin: Dict[str, Any], + args: List[str], + result: subprocess.CompletedProcess, + ): + """ + Export debug files if requested when interacting with t8n via streams + """ + if not debug_output_path: + return - if fork.header_excess_blob_gas_required(0, 0): - env["currentExcessBlobGas"] = "0" + t8n_call = " ".join(args) + t8n_output_base_dir = os.path.join(debug_output_path, "t8n.sh.out") + if self.trace: + t8n_call = t8n_call.replace(temp_dir.name, t8n_output_base_dir) + t8n_script = textwrap.dedent( + f"""\ + #!/bin/bash + rm -rf {debug_output_path}/t8n.sh.out # hard-coded to avoid surprises + mkdir {debug_output_path}/t8n.sh.out # unused if tracing is not enabled + {t8n_call} < {debug_output_path}/stdin.txt + """ + ) + dump_files_to_directory( + debug_output_path, + { + "args.py": args, + "input/alloc.json": stdin["alloc"], + "input/env.json": stdin["env"], + "input/txs.json": stdin["txs"], + "returncode.txt": result.returncode, + "stdin.txt": stdin, + "stdout.txt": result.stdout.decode(), + "stderr.txt": result.stderr.decode(), + "t8n.sh+x": t8n_script, + }, + ) - if fork.header_beacon_root_required(0, 0): - env[ - "parentBeaconBlockRoot" - ] = "0x0000000000000000000000000000000000000000000000000000000000000000" + def evaluate( + self, + *, + alloc: Any, + txs: Any, + env: Any, + fork_name: str, + chain_id: int = 1, + reward: int = 0, + eips: Optional[List[int]] = None, + debug_output_path: str = "", + ) -> Tuple[Dict[str, Any], Dict[str, Any]]: + """ + Executes the relevant evaluate method as required by the `t8n` tool. - new_alloc, result = self.evaluate( - alloc=alloc, - txs=[], - env=env, - fork_name=fork.fork(block_number=0, timestamp=0), - debug_output_path=debug_output_path, + If a client's `t8n` tool varies from the default behavior, this method + can be overridden. + """ + if eips is not None: + fork_name = "+".join([fork_name] + [str(eip) for eip in eips]) + if int(env["currentNumber"], 0) == 0: + reward = -1 + t8n_data = TransitionTool.TransitionToolData( + alloc=alloc, txs=txs, env=env, fork_name=fork_name, chain_id=chain_id, reward=reward ) - state_root = result.get("stateRoot") - if state_root is None or not isinstance(state_root, str): - raise Exception("Unable to calculate state root") - return new_alloc, bytes.fromhex(state_root[2:]) + + if self.t8n_use_stream: + return self._evaluate_stream(t8n_data=t8n_data, debug_output_path=debug_output_path) + else: + return self._evaluate_filesystem( + t8n_data=t8n_data, + debug_output_path=debug_output_path, + ) def verify_fixture( self, fixture_format: FixtureFormats, fixture_path: Path, debug_output_path: Optional[Path] diff --git a/src/pytest_plugins/forks/forks.py b/src/pytest_plugins/forks/forks.py index 61e7b70374..18cbad1560 100644 --- a/src/pytest_plugins/forks/forks.py +++ b/src/pytest_plugins/forks/forks.py @@ -1,6 +1,7 @@ """ Pytest plugin to enable fork range configuration for the test session. """ + import itertools import sys import textwrap @@ -13,7 +14,6 @@ from ethereum_test_forks import ( Fork, ForkAttribute, - forks_from_until, get_deployed_forks, get_forks, get_transition_forks, @@ -176,14 +176,17 @@ def pytest_configure(config): for d in fork_covariant_descriptors: config.addinivalue_line("markers", f"{d.marker_name}: {d.description}") - single_fork = config.getoption("single_fork") - forks_from = config.getoption("forks_from") - forks_until = config.getoption("forks_until") + def get_fork_option(config, option_name): + """Post-process get option to allow for external fork conditions.""" + option = config.getoption(option_name) + return "Paris" if option == "Merge" else option + + single_fork = get_fork_option(config, "single_fork") + forks_from = get_fork_option(config, "forks_from") + forks_until = get_fork_option(config, "forks_until") show_fork_help = config.getoption("show_fork_help") - all_forks = get_forks() - # TODO: Tricky, this removes the *Glacier forks. - config.all_forks = forks_from_until(all_forks[0], all_forks[-1]) + config.all_forks = [fork for fork in get_forks() if not fork.ignore()] config.fork_map = {fork.name(): fork for fork in config.all_forks} config.fork_names = list(config.fork_map.keys()) @@ -428,17 +431,19 @@ def pytest_generate_tests(metafunc): ) else: pytest_params = [ - ForkParametrizer( - fork=fork, - mark=pytest.mark.skip( - reason=( - f"Fork '{fork}' unsupported by " - f"'{metafunc.config.getoption('evm_bin')}'." - ) - ), + ( + ForkParametrizer( + fork=fork, + mark=pytest.mark.skip( + reason=( + f"Fork '{fork}' unsupported by " + f"'{metafunc.config.getoption('evm_bin')}'." + ) + ), + ) + if fork.name() in metafunc.config.unsupported_forks + else ForkParametrizer(fork=fork) ) - if fork.name() in metafunc.config.unsupported_forks - else ForkParametrizer(fork=fork) for fork in intersection_range ] add_fork_covariant_parameters(metafunc, pytest_params) diff --git a/src/pytest_plugins/forks/tests/test_bad_command_line_options.py b/src/pytest_plugins/forks/tests/test_bad_command_line_options.py index 3444edd8ce..10ba0b7b14 100644 --- a/src/pytest_plugins/forks/tests/test_bad_command_line_options.py +++ b/src/pytest_plugins/forks/tests/test_bad_command_line_options.py @@ -44,8 +44,8 @@ ( "invalid_fork_range", ( - ("--from", "Merge", "--until", "Frontier"), - "--from Merge --until Frontier creates an empty fork range", + ("--from", "Paris", "--until", "Frontier"), + "--from Paris --until Frontier creates an empty fork range", ), ), ) diff --git a/src/pytest_plugins/forks/tests/test_bad_validity_markers.py b/src/pytest_plugins/forks/tests/test_bad_validity_markers.py index f6815b5c1e..f2dad60612 100644 --- a/src/pytest_plugins/forks/tests/test_bad_validity_markers.py +++ b/src/pytest_plugins/forks/tests/test_bad_validity_markers.py @@ -10,8 +10,8 @@ ( """ import pytest - @pytest.mark.valid_from("Merge") - @pytest.mark.valid_from("Merge") + @pytest.mark.valid_from("Paris") + @pytest.mark.valid_from("Paris") def test_case(state_test): assert 0 """, @@ -23,8 +23,8 @@ def test_case(state_test): ( """ import pytest - @pytest.mark.valid_until("Merge") - @pytest.mark.valid_until("Merge") + @pytest.mark.valid_until("Paris") + @pytest.mark.valid_until("Paris") def test_case(state_test): assert 0 """, @@ -36,8 +36,8 @@ def test_case(state_test): ( """ import pytest - @pytest.mark.valid_at_transition_to("Merge") - @pytest.mark.valid_at_transition_to("Merge") + @pytest.mark.valid_at_transition_to("Paris") + @pytest.mark.valid_at_transition_to("Paris") def test_case(state_test): assert 0 """, @@ -85,7 +85,7 @@ def test_case(state_test): ( """ import pytest - @pytest.mark.valid_from("Merge", "Merge") + @pytest.mark.valid_from("Paris", "Paris") def test_case(state_test): assert 0 """, @@ -97,7 +97,7 @@ def test_case(state_test): ( """ import pytest - @pytest.mark.valid_until("Merge", "Merge") + @pytest.mark.valid_until("Paris", "Paris") def test_case(state_test): assert 0 """, @@ -109,7 +109,7 @@ def test_case(state_test): ( """ import pytest - @pytest.mark.valid_at_transition_to("Merge", "Merge") + @pytest.mark.valid_at_transition_to("Paris", "Paris") def test_case(state_test): assert 0 """, @@ -158,7 +158,7 @@ def test_case(state_test): """ import pytest @pytest.mark.valid_at_transition_to("Cancun") - @pytest.mark.valid_from("Merge") + @pytest.mark.valid_from("Paris") def test_case(state_test): assert 0 """, @@ -183,7 +183,7 @@ def test_case(state_test): ( """ import pytest - @pytest.mark.valid_from("Merge") + @pytest.mark.valid_from("Paris") @pytest.mark.valid_until("Frontier") def test_case(state_test): assert 0 diff --git a/src/pytest_plugins/forks/tests/test_forks.py b/src/pytest_plugins/forks/tests/test_forks.py index ae45d7f40e..7e6c4e0f66 100644 --- a/src/pytest_plugins/forks/tests/test_forks.py +++ b/src/pytest_plugins/forks/tests/test_forks.py @@ -4,7 +4,14 @@ import pytest -from ethereum_test_forks import ArrowGlacier, forks_from_until, get_deployed_forks, get_forks +from ethereum_test_forks import ( + ArrowGlacier, + Paris, + forks_from_until, + get_deployed_forks, + get_forks, +) +from ethereum_test_tools import StateTest @pytest.fixture @@ -22,10 +29,10 @@ def test_no_options_no_validity_marker(pytester): - no fork validity marker. """ pytester.makepyfile( - """ + f""" import pytest - def test_all_forks(state_test): + def test_all_forks({StateTest.pytest_parameter_name()}): pass """ ) @@ -33,17 +40,25 @@ def test_all_forks(state_test): result = pytester.runpytest("-v") all_forks = get_deployed_forks() forks_under_test = forks_from_until(all_forks[0], all_forks[-1]) + expected_passed = len(forks_under_test) * len(StateTest.fixture_formats()) + stdout = "\n".join(result.stdout.lines) for fork in forks_under_test: - assert f":test_all_forks[fork={fork}]" in "\n".join(result.stdout.lines) + for fixture_format in StateTest.fixture_formats(): + if fixture_format.name.endswith("HIVE") and fork < Paris: + expected_passed -= 1 + assert f":test_all_forks[fork_{fork}-{fixture_format.name.lower()}]" not in stdout + continue + assert f":test_all_forks[fork_{fork}-{fixture_format.name.lower()}]" in stdout + result.assert_outcomes( - passed=len(forks_under_test), + passed=expected_passed, failed=0, skipped=0, errors=0, ) -@pytest.mark.parametrize("fork", ["London", "Merge"]) +@pytest.mark.parametrize("fork", ["London", "Paris"]) def test_from_london_option_no_validity_marker(pytester, fork_map, fork): """ Test test parametrization with: @@ -52,10 +67,10 @@ def test_from_london_option_no_validity_marker(pytester, fork_map, fork): - no fork validity marker. """ pytester.makepyfile( - """ + f""" import pytest - def test_all_forks(state_test): + def test_all_forks({StateTest.pytest_parameter_name()}): pass """ ) @@ -63,10 +78,17 @@ def test_all_forks(state_test): result = pytester.runpytest("-v", "--from", fork) all_forks = get_deployed_forks() forks_under_test = forks_from_until(fork_map[fork], all_forks[-1]) - for fork_under_test in forks_under_test: - assert f":test_all_forks[fork={fork_under_test}]" in "\n".join(result.stdout.lines) + expected_passed = len(forks_under_test) * len(StateTest.fixture_formats()) + stdout = "\n".join(result.stdout.lines) + for fork in forks_under_test: + for fixture_format in StateTest.fixture_formats(): + if fixture_format.name.endswith("HIVE") and fork < Paris: + expected_passed -= 1 + assert f":test_all_forks[fork_{fork}-{fixture_format.name.lower()}]" not in stdout + continue + assert f":test_all_forks[fork_{fork}-{fixture_format.name.lower()}]" in stdout result.assert_outcomes( - passed=len(forks_under_test), + passed=expected_passed, failed=0, skipped=0, errors=0, @@ -81,22 +103,61 @@ def test_from_london_until_shanghai_option_no_validity_marker(pytester, fork_map - no fork validity marker. """ pytester.makepyfile( - """ + f""" import pytest - def test_all_forks(state_test): + def test_all_forks({StateTest.pytest_parameter_name()}): pass """ ) pytester.copy_example(name="pytest.ini") result = pytester.runpytest("-v", "--from", "London", "--until", "Shanghai") forks_under_test = forks_from_until(fork_map["London"], fork_map["Shanghai"]) + expected_passed = len(forks_under_test) * len(StateTest.fixture_formats()) + stdout = "\n".join(result.stdout.lines) if ArrowGlacier in forks_under_test: forks_under_test.remove(ArrowGlacier) - for fork_under_test in forks_under_test: - assert f":test_all_forks[fork={fork_under_test}]" in "\n".join(result.stdout.lines) + expected_passed -= len(StateTest.fixture_formats()) + for fork in forks_under_test: + for fixture_format in StateTest.fixture_formats(): + if fixture_format.name.endswith("HIVE") and fork < Paris: + expected_passed -= 1 + assert f":test_all_forks[fork_{fork}-{fixture_format.name.lower()}]" not in stdout + continue + assert f":test_all_forks[fork_{fork}-{fixture_format.name.lower()}]" in stdout + result.assert_outcomes( + passed=expected_passed, + failed=0, + skipped=0, + errors=0, + ) + + +def test_from_merge_until_merge_option_no_validity_marker(pytester, fork_map): + """ + Test test parametrization with: + - --from Merge command-line option, + - --until Merge command-line option, + - no fork validity marker. + """ + pytester.makepyfile( + f""" + import pytest + + def test_all_forks({StateTest.pytest_parameter_name()}): + pass + """ + ) + pytester.copy_example(name="pytest.ini") + result = pytester.runpytest("-v", "--from", "Merge", "--until", "Merge") + forks_under_test = forks_from_until(fork_map["Paris"], fork_map["Paris"]) + expected_passed = len(forks_under_test) * len(StateTest.fixture_formats()) + stdout = "\n".join(result.stdout.lines) + for fork in forks_under_test: + for fixture_format in StateTest.fixture_formats(): + assert f":test_all_forks[fork_{fork}-{fixture_format.name.lower()}]" in stdout result.assert_outcomes( - passed=len(forks_under_test), + passed=expected_passed, failed=0, skipped=0, errors=0, diff --git a/src/pytest_plugins/test_filler/test_filler.py b/src/pytest_plugins/test_filler/test_filler.py index 198245e42b..44cffb5faa 100644 --- a/src/pytest_plugins/test_filler/test_filler.py +++ b/src/pytest_plugins/test_filler/test_filler.py @@ -5,28 +5,21 @@ and that modifies pytest hooks in order to fill test specs for all tests and writes the generated fixtures to file. """ -import json -import os -import re -import shutil + +import warnings from pathlib import Path -from typing import Any, Dict, Generator, List, Literal, Optional, Tuple, Type, Union +from typing import Generator, List, Optional, Type import pytest -from ethereum_test_forks import Fork -from ethereum_test_tools import ( - BaseTest, - BaseTestConfig, - BlockchainTest, - BlockchainTestFiller, - Fixture, - HiveFixture, - StateTest, - StateTestFiller, - Yul, - fill_test, +from ethereum_test_forks import ( + Fork, + Frontier, + Paris, + get_closest_fork_with_solc_support, + get_forks_with_solc_support, ) +from ethereum_test_tools import SPEC_TYPES, BaseTest, FixtureCollector, TestInfo, Yul from evm_transition_tool import FixtureFormats, TransitionTool from pytest_plugins.spec_version_checker.spec_version_checker import EIPSpecTestItem @@ -95,6 +88,7 @@ def pytest_addoption(parser): action="store", dest="filler_path", default="./tests/", + type=Path, help="Path to filler directives", ) test_group.addoption( @@ -112,11 +106,14 @@ def pytest_addoption(parser): help="Output each test case in the directory without the folder structure.", ) test_group.addoption( - "--enable-hive", + "--single-fixture-per-file", action="store_true", - dest="enable_hive", + dest="single_fixture_per_file", default=False, - help="Output test fixtures with the hive-specific properties.", + help=( + "Don't group fixtures in JSON files by test function; write each fixture to its own " + "file. This can be used to increase the granularity of --verify-fixtures." + ), ) debug_group = parser.getgroup("debug", "Arguments defining debug behavior") @@ -124,7 +121,7 @@ def pytest_addoption(parser): "--evm-dump-dir", "--t8n-dump-dir", action="store", - dest="t8n_dump_dir", + dest="base_dump_dir", default="", help="Path to dump the transition tool debug output.", ) @@ -138,14 +135,15 @@ def pytest_configure(config): Custom marker registration: https://docs.pytest.org/en/7.1.x/how-to/writing_plugins.html#registering-custom-markers """ - config.addinivalue_line( - "markers", - "state_test: a test case that implement a single state transition test.", - ) - config.addinivalue_line( - "markers", - "blockchain_test: a test case that implements a block transition test.", - ) + for fixture_format in FixtureFormats: + config.addinivalue_line( + "markers", + ( + f"{fixture_format.name.lower()}: " + f"{FixtureFormats.get_format_description(fixture_format)}" + ), + ) + config.addinivalue_line( "markers", "yul_test: a test case that compiles Yul code.", @@ -170,6 +168,13 @@ def pytest_configure(config): "The Besu t8n tool does not work well with the xdist plugin; use -n=0.", returncode=pytest.ExitCode.USAGE_ERROR, ) + config.solc_version = Yul("", binary=config.getoption("solc_bin")).version() + if config.solc_version < Frontier.solc_min_version(): + pytest.exit( + f"Unsupported solc version: {config.solc_version}. Minimum required version is " + f"{Frontier.solc_min_version()}", + returncode=pytest.ExitCode.USAGE_ERROR, + ) @pytest.hookimpl(trylast=True) @@ -179,8 +184,7 @@ def pytest_report_header(config, start_path): return binary_path = config.getoption("evm_bin") t8n = TransitionTool.from_binary_path(binary_path=binary_path) - solc_version_string = Yul("", binary=config.getoption("solc_bin")).version() - return [f"{t8n.version()}, solc version {solc_version_string}"] + return [f"{t8n.version()}, solc version {config.solc_version}"] @pytest.fixture(autouse=True, scope="session") @@ -231,19 +235,13 @@ def do_fixture_verification(request, t8n) -> bool: do_fixture_verification = True if request.config.getoption("verify_fixtures"): do_fixture_verification = True - if do_fixture_verification and request.config.getoption("enable_hive"): - pytest.exit( - "Hive fixtures can not be verify using geth's evm tool: " - "Remove --enable-hive to verify test fixtures.", - returncode=pytest.ExitCode.USAGE_ERROR, - ) return do_fixture_verification @pytest.fixture(autouse=True, scope="session") def evm_fixture_verification( request, do_fixture_verification: bool, evm_bin: Path, verify_fixtures_bin: Path -) -> Optional[Generator[TransitionTool, None, None]]: +) -> Generator[Optional[TransitionTool], None, None]: """ Returns the configured evm binary for executing statetest and blocktest commands used to verify generated JSON fixtures. @@ -264,83 +262,21 @@ def evm_fixture_verification( evm_fixture_verification.shutdown() -@pytest.fixture(autouse=True, scope="session") -def base_test_config(request) -> BaseTestConfig: - """ - Returns the base test configuration that all tests must use. - """ - config = BaseTestConfig() - config.enable_hive = request.config.getoption("enable_hive") - return config - - -def strip_test_prefix(name: str) -> str: - """ - Removes the test prefix from a test case name. - """ - TEST_PREFIX = "test_" - if name.startswith(TEST_PREFIX): - return name[len(TEST_PREFIX) :] - return name - - -def convert_test_id_to_test_name_and_parameters(name: str) -> Tuple[str, str]: - """ - Converts a test name to a tuple containing the test name and test parameters. - - Example: - test_push0_key_sstore[fork=Shanghai] -> test_push0_key_sstore, fork_Shanghai - """ - test_name, parameters = name.split("[") - return test_name, re.sub(r"[\[=\-]", "_", parameters).replace("]", "") - - -def get_module_relative_output_dir(test_module: Path, filler_path: Path) -> Path: - """ - Return a directory name for the provided test_module (relative to the - base ./tests directory) that can be used for output (within the - configured fixtures output path or the evm_t8n_dump_dir directory). - - Example: - tests/shanghai/eip3855_push0/test_push0.py -> shanghai/eip3855_push0/test_push0 - """ - basename = test_module.with_suffix("").absolute() - basename_relative = basename.relative_to(filler_path.absolute()) - module_path = basename_relative.parent / basename_relative.stem - return module_path - - -def get_evm_dump_dir( - evm_dump_dir: str, - node: pytest.Item, - filler_path: Path, - level: Literal["test_module", "test_function", "test_parameter"] = "test_parameter", -) -> Optional[Path]: - """ - The directory to dump the evm transition tool debug output. - """ - test_module_relative_dir = get_module_relative_output_dir(Path(node.path), filler_path) - if level == "test_module": - return Path(evm_dump_dir) / Path(str(test_module_relative_dir).replace(os.sep, "__")) - test_name, test_parameter_string = convert_test_id_to_test_name_and_parameters(node.name) - flat_path = f"{str(test_module_relative_dir).replace(os.sep, '__')}__{test_name}" - if level == "test_function": - return Path(evm_dump_dir) / flat_path - elif level == "test_parameter": - return Path(evm_dump_dir) / flat_path / test_parameter_string - raise Exception("Unexpected level.") - - @pytest.fixture(scope="session") -def evm_dump_dir(request) -> Path: +def base_dump_dir(request) -> Optional[Path]: """ The base directory to dump the evm debug output. """ - return request.config.getoption("t8n_dump_dir") + base_dump_dir_str = request.config.getoption("base_dump_dir") + if base_dump_dir_str: + return Path(base_dump_dir_str) + return None @pytest.fixture(scope="function") -def evm_dump_dir_parameter_level(request, filler_path: Path) -> Optional[Path]: +def dump_dir_parameter_level( + request, base_dump_dir: Optional[Path], filler_path: Path +) -> Optional[Path]: """ The directory to dump evm transition tool debug output on a test parameter level. @@ -348,128 +284,31 @@ def evm_dump_dir_parameter_level(request, filler_path: Path) -> Optional[Path]: Example with --evm-dump-dir=/tmp/evm: -> /tmp/evm/shanghai__eip3855_push0__test_push0__test_push0_key_sstore/fork_shanghai/ """ - evm_dump_dir = request.config.getoption("t8n_dump_dir") - if not evm_dump_dir: - return None - return get_evm_dump_dir(evm_dump_dir, request.node, filler_path, level="test_parameter") - + return node_to_test_info(request.node).get_dump_dir_path( + base_dump_dir, + filler_path, + level="test_parameter", + ) -@pytest.fixture(scope="module") -def evm_dump_dir_module_level(request, filler_path: Path) -> Optional[Path]: - """ - A helper fixture to get the directory to dump evm transition tool debug - output on the module level. - Note: We never write output to this level; we actually want to write - output on the function level. Reason: This is used by the - `fixture_collector` which must be scoped on the module level in order to - work with the xdist plugin, i.e., we can't pass a function-scoped fixture - to the `fixture_collector` fixture; it must construct the rest of the - path itself. +def get_fixture_collection_scope(fixture_name, config): """ - evm_dump_dir = request.config.getoption("t8n_dump_dir") - if not evm_dump_dir: - return None - return get_evm_dump_dir(evm_dump_dir, request.node, filler_path, level="test_module") + Return the appropriate scope to write fixture JSON files. - -class FixtureCollector: - """ - Collects all fixtures generated by the test cases. + See: https://docs.pytest.org/en/stable/how-to/fixtures.html#dynamic-scope """ - - all_fixtures: Dict[Path, List[Tuple[str, Any, FixtureFormats]]] - output_dir: str - flat_output: bool - json_path_to_fixture_type: Dict[Path, FixtureFormats] - - def __init__(self, output_dir: str, flat_output: bool) -> None: - self.all_fixtures = {} - self.output_dir = output_dir - self.flat_output = flat_output - self.json_path_to_fixture_type = {} - - def add_fixture( - self, item, fixture: Optional[Union[Fixture, HiveFixture]], fixture_format: FixtureFormats - ) -> None: - """ - Adds a fixture to the list of fixtures of a given test case. - """ - # TODO: remove this logic. if hive enabled set --from to Merge - if fixture is None: - return - - # NOTE: We strip the 'test_' prefix from the test module and the test function names. - fixture_basename: Path - if self.flat_output: - fixture_basename = Path(strip_test_prefix(item.originalname)) - else: - relative_fixture_output_dir = Path(item.path).parent / strip_test_prefix( - Path(item.path).stem - ) - module_relative_output_dir = get_module_relative_output_dir( - relative_fixture_output_dir, Path(item.funcargs["filler_path"]) - ) - fixture_basename = module_relative_output_dir / strip_test_prefix(item.originalname) - - if fixture_basename not in self.all_fixtures: - self.all_fixtures[fixture_basename] = [] - m = re.match(r".*?\[(.*)\]", item.name) - if not m: - raise Exception("Could not parse test name: " + item.name) - name = m.group(1) - if fixture.name: - name += "-" + fixture.name - jsonFixture = fixture.to_json() - self.all_fixtures[fixture_basename].append((name, jsonFixture, fixture_format)) - - def dump_fixtures(self) -> None: - """ - Dumps all collected fixtures to their respective files. - """ - os.makedirs(self.output_dir, exist_ok=True) - for fixture_basename, fixtures in self.all_fixtures.items(): - output_json = {} - for index, fixture_props in enumerate(fixtures): - name, fixture, fixture_format = fixture_props - name = str(index).zfill(3) + "-" + name - output_json[name] = fixture - file_path = self.output_dir / fixture_basename.with_suffix(".json") - if not self.flat_output: - os.makedirs(file_path.parent, exist_ok=True) - with open(file_path, "w") as f: - json.dump(output_json, f, indent=4) - # All tests have same format within one file (one json file ^= single test function). - self.json_path_to_fixture_type[Path(file_path)] = fixture_format - - def copy_fixture_file_to_dump_dir(self, evm_dump_dir: Path) -> None: - """ - Copy the generated fixture files to the evm_dump_dir directory. - """ - for fixture_path, fixture_format in self.json_path_to_fixture_type.items(): - test_dump_dir = self._get_test_dump_dir(evm_dump_dir, fixture_path) - shutil.copy(fixture_path, str(test_dump_dir)) - - def verify_fixture_files( - self, evm_fixture_verification: TransitionTool, evm_dump_dir: Path - ) -> None: - """ - Runs `evm [state|block]test` on each fixture. - """ - for fixture_path, fixture_format in self.json_path_to_fixture_type.items(): - test_dump_dir = self._get_test_dump_dir(evm_dump_dir, fixture_path) - evm_fixture_verification.verify_fixture(fixture_format, fixture_path, test_dump_dir) - - def _get_test_dump_dir(self, evm_dump_dir: Path, fixture_path: Path) -> Optional[Path]: - if evm_dump_dir: - # NOTE: Here we add the 'test_' prefix back to get the dump dir! - return Path(f"{evm_dump_dir}__test_{fixture_path.stem}") - return None + if config.getoption("single_fixture_per_file"): + return "function" + return "module" -@pytest.fixture(scope="module") +@pytest.fixture(scope=get_fixture_collection_scope) def fixture_collector( - request, do_fixture_verification, evm_fixture_verification, evm_dump_dir_module_level + request, + do_fixture_verification: bool, + evm_fixture_verification: TransitionTool, + filler_path: Path, + base_dump_dir: Optional[Path], ): """ Returns the configured fixture collector instance used for all tests @@ -478,13 +317,14 @@ def fixture_collector( fixture_collector = FixtureCollector( output_dir=request.config.getoption("output"), flat_output=request.config.getoption("flat_output"), + single_fixture_per_file=request.config.getoption("single_fixture_per_file"), + filler_path=filler_path, + base_dump_dir=base_dump_dir, ) yield fixture_collector fixture_collector.dump_fixtures() - if evm_dump_dir_module_level: - fixture_collector.copy_fixture_file_to_dump_dir(evm_dump_dir_module_level) if do_fixture_verification: - fixture_collector.verify_fixture_files(evm_fixture_verification, evm_dump_dir_module_level) + fixture_collector.verify_fixture_files(evm_fixture_verification) @pytest.fixture(autouse=True, scope="session") @@ -492,7 +332,7 @@ def filler_path(request) -> Path: """ Returns the directory containing the tests to execute. """ - return Path(request.config.getoption("filler_path")) + return request.config.getoption("filler_path") @pytest.fixture(autouse=True) @@ -521,140 +361,156 @@ class so that upon instantiation within the test case, it provides the Test cases can override the default value by specifying a fixed version with the @pytest.mark.compile_yul_with(FORK) marker. """ + solc_target_fork: Fork | None marker = request.node.get_closest_marker("compile_yul_with") if marker: if not marker.args[0]: pytest.fail( f"{request.node.name}: Expected one argument in 'compile_yul_with' marker." ) - fork = request.config.fork_map[marker.args[0]] + solc_target_fork = request.config.fork_map[marker.args[0]] + assert solc_target_fork in get_forks_with_solc_support(request.config.solc_version) + else: + solc_target_fork = get_closest_fork_with_solc_support(fork, request.config.solc_version) + assert solc_target_fork is not None, "No fork supports provided solc version." + if solc_target_fork != fork and request.config.getoption("verbose") >= 1: + warnings.warn(f"Compiling Yul for {solc_target_fork.name()}, not {fork.name()}.") class YulWrapper(Yul): def __init__(self, *args, **kwargs): - super(YulWrapper, self).__init__(*args, **kwargs, fork=fork) + super(YulWrapper, self).__init__(*args, **kwargs, fork=solc_target_fork) return YulWrapper -SPEC_TYPES: List[Type[BaseTest]] = [StateTest, BlockchainTest] SPEC_TYPES_PARAMETERS: List[str] = [s.pytest_parameter_name() for s in SPEC_TYPES] -@pytest.fixture(scope="function") -def fixture_format(request) -> FixtureFormats: +def node_to_test_info(node) -> TestInfo: """ - Returns the test format of the current test case. + Returns the test info of the current node item. """ - enable_hive = request.config.getoption("enable_hive") - has_blockchain_test_format = set(["state_test", "blockchain_test"]) & set(request.fixturenames) - if has_blockchain_test_format and enable_hive: - return FixtureFormats.BLOCKCHAIN_TEST_HIVE - elif has_blockchain_test_format and not enable_hive: - return FixtureFormats.BLOCKCHAIN_TEST - raise Exception("Unknown fixture format.") + return TestInfo( + name=node.name, + id=node.nodeid, + original_name=node.originalname, + path=Path(node.path), + ) -@pytest.fixture(scope="function") -def state_test( - request, - t8n, - fork, - reference_spec, - eips, - evm_dump_dir_parameter_level, - fixture_collector, - fixture_format, - base_test_config, -) -> StateTestFiller: - """ - Fixture used to instantiate an auto-fillable StateTest object from within - a test function. - - Every test that defines a StateTest filler must explicitly specify this - fixture in its function arguments. - - Implementation detail: It must be scoped on test function level to avoid +def base_test_parametrizer(cls: Type[BaseTest]): + """ + Generates a pytest.fixture for a given BaseTest subclass. + + Implementation detail: All spec fixtures must be scoped on test function level to avoid leakage between tests. """ - class StateTestWrapper(StateTest): - def __init__(self, *args, **kwargs): - kwargs["base_test_config"] = base_test_config - kwargs["t8n_dump_dir"] = evm_dump_dir_parameter_level - super(StateTestWrapper, self).__init__(*args, **kwargs) - fixture_collector.add_fixture( - request.node, - fill_test( + @pytest.fixture( + scope="function", + name=cls.pytest_parameter_name(), + ) + def base_test_parametrizer_func( + request, + t8n, + fork, + reference_spec, + eips, + dump_dir_parameter_level, + fixture_collector, + ): + """ + Fixture used to instantiate an auto-fillable BaseTest object from within + a test function. + + Every test that defines a test filler must explicitly specify its parameter name + (see `pytest_parameter_name` in each implementation of BaseTest) in its function + arguments. + + When parametrizing, indirect must be used along with the fixture format as value. + """ + fixture_format = request.param + assert isinstance(fixture_format, FixtureFormats) + + class BaseTestWrapper(cls): + def __init__(self, *args, **kwargs): + kwargs["fixture_format"] = fixture_format + kwargs["t8n_dump_dir"] = dump_dir_parameter_level + super(BaseTestWrapper, self).__init__(*args, **kwargs) + fixture = self.generate( t8n, - self, fork, - reference_spec, eips=eips, - ), - fixture_format, - ) + ) + fixture.fill_info(t8n, reference_spec) - return StateTestWrapper + fixture_collector.add_fixture( + node_to_test_info(request.node), + fixture, + ) + return BaseTestWrapper + + return base_test_parametrizer_func -@pytest.fixture(scope="function") -def blockchain_test( - request, - t8n, - fork, - reference_spec, - eips, - evm_dump_dir_parameter_level, - fixture_collector, - fixture_format, - base_test_config, -) -> BlockchainTestFiller: - """ - Fixture used to define an auto-fillable BlockchainTest analogous to the - state_test fixture for StateTests. - See the state_test fixture docstring for details. - """ - - class BlockchainTestWrapper(BlockchainTest): - def __init__(self, *args, **kwargs): - kwargs["base_test_config"] = base_test_config - kwargs["t8n_dump_dir"] = evm_dump_dir_parameter_level - super(BlockchainTestWrapper, self).__init__(*args, **kwargs) - fixture_collector.add_fixture( - request.node, - fill_test( - t8n, - self, - fork, - reference_spec, - eips=eips, - ), - fixture_format, - ) - return BlockchainTestWrapper +# Dynamically generate a pytest fixture for each test spec type. +for cls in SPEC_TYPES: + # Fixture needs to be defined in the global scope so pytest can detect it. + globals()[cls.pytest_parameter_name()] = base_test_parametrizer(cls) -def pytest_collection_modifyitems(items, config): +def pytest_generate_tests(metafunc): + """ + Pytest hook used to dynamically generate test cases for each fixture format a given + test spec supports. """ - A pytest hook called during collection, after all items have been - collected. + for test_type in SPEC_TYPES: + if test_type.pytest_parameter_name() in metafunc.fixturenames: + metafunc.parametrize( + [test_type.pytest_parameter_name()], + [ + pytest.param( + fixture_format, + id=fixture_format.name.lower(), + marks=[getattr(pytest.mark, fixture_format.name.lower())], + ) + for fixture_format in test_type.fixture_formats() + ], + scope="function", + indirect=True, + ) + - Here we dynamically apply "state_test" or "blockchain_test" markers - to a test if the test function uses the corresponding fixture. +def pytest_collection_modifyitems(config, items): """ - for item in items: + Remove pre-Paris tests parametrized to generate hive type fixtures; these + can't be used in the Hive Pyspec Simulator. + + This can't be handled in this plugins pytest_generate_tests() as the fork + parametrization occurs in the forks plugin. + """ + for item in items[:]: # use a copy of the list, as we'll be modifying it if isinstance(item, EIPSpecTestItem): continue - if "state_test" in item.fixturenames: - marker = pytest.mark.state_test() - item.add_marker(marker) - elif "blockchain_test" in item.fixturenames: - marker = pytest.mark.blockchain_test() - item.add_marker(marker) + if "fork" not in item.callspec.params or item.callspec.params["fork"] is None: + items.remove(item) + continue + if item.callspec.params["fork"] < Paris: + # Even though the `state_test` test spec does not produce a hive STATE_TEST, it does + # produce a BLOCKCHAIN_TEST_HIVE, so we need to remove it here. + # TODO: Ideally, the logic could be contained in the `FixtureFormat` class, we create + # a `fork_supported` method that returns True if the fork is supported. + if ("state_test" in item.callspec.params) and item.callspec.params[ + "state_test" + ].name.endswith("HIVE"): + items.remove(item) + if ("blockchain_test" in item.callspec.params) and item.callspec.params[ + "blockchain_test" + ].name.endswith("HIVE"): + items.remove(item) if "yul" in item.fixturenames: - marker = pytest.mark.yul_test() - item.add_marker(marker) + item.add_marker(pytest.mark.yul_test) def pytest_make_parametrize_id(config, val, argname): @@ -662,7 +518,7 @@ def pytest_make_parametrize_id(config, val, argname): Pytest hook called when generating test ids. We use this to generate more readable test ids for the generated tests. """ - return f"{argname}={val}" + return f"{argname}_{val}" def pytest_runtest_call(item): diff --git a/src/pytest_plugins/test_filler/tests/test_test_filler.py b/src/pytest_plugins/test_filler/tests/test_test_filler.py new file mode 100644 index 0000000000..8b6eb071e3 --- /dev/null +++ b/src/pytest_plugins/test_filler/tests/test_test_filler.py @@ -0,0 +1,501 @@ +""" +Test the forks plugin. +""" +import json +import os +import textwrap +from pathlib import Path + +import pytest + + +# flake8: noqa +def get_all_files_in_directory(base_dir): # noqa: D103 + base_path = Path(base_dir) + return [f.relative_to(os.getcwd()) for f in base_path.rglob("*") if f.is_file()] + + +def count_keys_in_fixture(file_path): # noqa: D103 + with open(file_path, "r") as f: + data = json.load(f) + if not isinstance(data, dict): # Ensure the loaded data is a dictionary + raise ValueError( + f"Expected a dictionary in {file_path}, but got {type(data).__name__}." + ) + return len(data) + + +test_module_paris = textwrap.dedent( + """\ + import pytest + + from ethereum_test_tools import Account, Environment, TestAddress, Transaction + + @pytest.mark.valid_from("Paris") + @pytest.mark.valid_until("Shanghai") + def test_paris_one(state_test): + state_test(env=Environment(), + pre={TestAddress: Account(balance=1_000_000)}, post={}, tx=Transaction()) + + @pytest.mark.valid_from("Paris") + @pytest.mark.valid_until("Shanghai") + def test_paris_two(state_test): + state_test(env=Environment(), + pre={TestAddress: Account(balance=1_000_000)}, post={}, tx=Transaction()) + """ +) +test_count_paris = 4 + +test_module_shanghai = textwrap.dedent( + """\ + import pytest + + from ethereum_test_tools import Account, Environment, TestAddress, Transaction + + @pytest.mark.valid_from("Paris") + @pytest.mark.valid_until("Shanghai") + def test_shanghai_one(state_test): + state_test(env=Environment(), + pre={TestAddress: Account(balance=1_000_000)}, post={}, tx=Transaction()) + + @pytest.mark.parametrize("x", [1, 2, 3]) + @pytest.mark.valid_from("Paris") + @pytest.mark.valid_until("Shanghai") + def test_shanghai_two(state_test, x): + state_test(env=Environment(), + pre={TestAddress: Account(balance=1_000_000)}, post={}, tx=Transaction()) + """ +) + +test_count_shanghai = 8 +total_test_count = test_count_paris + test_count_shanghai + + +@pytest.mark.parametrize( + "args, expected_fixture_files, expected_fixture_counts", + [ + pytest.param( + [], + [ + Path("fixtures/blockchain_tests/paris/module_paris/paris_one.json"), + Path("fixtures/blockchain_tests_hive/paris/module_paris/paris_one.json"), + Path("fixtures/state_tests/paris/module_paris/paris_one.json"), + Path("fixtures/blockchain_tests/paris/module_paris/paris_two.json"), + Path("fixtures/blockchain_tests_hive/paris/module_paris/paris_two.json"), + Path("fixtures/state_tests/paris/module_paris/paris_two.json"), + Path("fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_one.json"), + Path("fixtures/blockchain_tests_hive/shanghai/module_shanghai/shanghai_one.json"), + Path("fixtures/state_tests/shanghai/module_shanghai/shanghai_one.json"), + Path("fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two.json"), + Path("fixtures/blockchain_tests_hive/shanghai/module_shanghai/shanghai_two.json"), + Path("fixtures/state_tests/shanghai/module_shanghai/shanghai_two.json"), + ], + [2, 2, 2, 2, 2, 2, 2, 2, 2, 6, 6, 6], + id="default-args", + ), + pytest.param( + ["--flat-output"], + [ + Path("fixtures/blockchain_tests/paris_one.json"), + Path("fixtures/blockchain_tests_hive/paris_one.json"), + Path("fixtures/state_tests/paris_one.json"), + Path("fixtures/blockchain_tests/paris_two.json"), + Path("fixtures/blockchain_tests_hive/paris_two.json"), + Path("fixtures/state_tests/paris_two.json"), + Path("fixtures/blockchain_tests/shanghai_one.json"), + Path("fixtures/blockchain_tests_hive/shanghai_one.json"), + Path("fixtures/state_tests/shanghai_one.json"), + Path("fixtures/blockchain_tests/shanghai_two.json"), + Path("fixtures/blockchain_tests_hive/shanghai_two.json"), + Path("fixtures/state_tests/shanghai_two.json"), + ], + [2, 2, 2, 2, 2, 2, 2, 2, 2, 6, 6, 6], + id="flat-output", + ), + pytest.param( + ["--flat-output", "--output", "other_fixtures"], + [ + Path("other_fixtures/blockchain_tests/paris_one.json"), + Path("other_fixtures/blockchain_tests_hive/paris_one.json"), + Path("other_fixtures/state_tests/paris_one.json"), + Path("other_fixtures/blockchain_tests/paris_two.json"), + Path("other_fixtures/blockchain_tests_hive/paris_two.json"), + Path("other_fixtures/state_tests/paris_two.json"), + Path("other_fixtures/blockchain_tests/shanghai_one.json"), + Path("other_fixtures/blockchain_tests_hive/shanghai_one.json"), + Path("other_fixtures/state_tests/shanghai_one.json"), + Path("other_fixtures/blockchain_tests/shanghai_two.json"), + Path("other_fixtures/blockchain_tests_hive/shanghai_two.json"), + Path("other_fixtures/state_tests/shanghai_two.json"), + ], + [2, 2, 2, 2, 2, 2, 2, 2, 2, 6, 6, 6], + id="flat-output_custom-output-dir", + ), + pytest.param( + ["--single-fixture-per-file"], + [ + Path( + "fixtures/blockchain_tests/paris/module_paris/paris_one__fork_Paris_blockchain_test.json" + ), + Path( + "fixtures/state_tests/paris/module_paris/paris_one__fork_Paris_state_test.json" + ), + Path( + "fixtures/blockchain_tests_hive/paris/module_paris/paris_one__fork_Paris_blockchain_test_hive.json" + ), + Path( + "fixtures/blockchain_tests/paris/module_paris/paris_one__fork_Shanghai_blockchain_test.json" + ), + Path( + "fixtures/state_tests/paris/module_paris/paris_one__fork_Shanghai_state_test.json" + ), + Path( + "fixtures/blockchain_tests_hive/paris/module_paris/paris_one__fork_Shanghai_blockchain_test_hive.json" + ), + Path( + "fixtures/blockchain_tests/paris/module_paris/paris_two__fork_Paris_blockchain_test.json" + ), + Path( + "fixtures/state_tests/paris/module_paris/paris_two__fork_Paris_state_test.json" + ), + Path( + "fixtures/blockchain_tests_hive/paris/module_paris/paris_two__fork_Paris_blockchain_test_hive.json" + ), + Path( + "fixtures/blockchain_tests/paris/module_paris/paris_two__fork_Shanghai_blockchain_test.json" + ), + Path( + "fixtures/state_tests/paris/module_paris/paris_two__fork_Shanghai_state_test.json" + ), + Path( + "fixtures/blockchain_tests_hive/paris/module_paris/paris_two__fork_Shanghai_blockchain_test_hive.json" + ), + Path( + "fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_one__fork_Paris_blockchain_test.json" + ), + Path( + "fixtures/state_tests/shanghai/module_shanghai/shanghai_one__fork_Paris_state_test.json" + ), + Path( + "fixtures/blockchain_tests_hive/shanghai/module_shanghai/shanghai_one__fork_Paris_blockchain_test_hive.json" + ), + Path( + "fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_one__fork_Shanghai_blockchain_test.json" + ), + Path( + "fixtures/state_tests/shanghai/module_shanghai/shanghai_one__fork_Shanghai_state_test.json" + ), + Path( + "fixtures/blockchain_tests_hive/shanghai/module_shanghai/shanghai_one__fork_Shanghai_blockchain_test_hive.json" + ), + Path( + "fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two__fork_Paris_blockchain_test_x_1.json" + ), + Path( + "fixtures/state_tests/shanghai/module_shanghai/shanghai_two__fork_Paris_state_test_x_1.json" + ), + Path( + "fixtures/blockchain_tests_hive/shanghai/module_shanghai/shanghai_two__fork_Paris_blockchain_test_hive_x_1.json" + ), + Path( + "fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two__fork_Paris_blockchain_test_x_2.json" + ), + Path( + "fixtures/state_tests/shanghai/module_shanghai/shanghai_two__fork_Paris_state_test_x_2.json" + ), + Path( + "fixtures/blockchain_tests_hive/shanghai/module_shanghai/shanghai_two__fork_Paris_blockchain_test_hive_x_2.json" + ), + Path( + "fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two__fork_Paris_blockchain_test_x_3.json" + ), + Path( + "fixtures/state_tests/shanghai/module_shanghai/shanghai_two__fork_Paris_state_test_x_3.json" + ), + Path( + "fixtures/blockchain_tests_hive/shanghai/module_shanghai/shanghai_two__fork_Paris_blockchain_test_hive_x_3.json" + ), + Path( + "fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two__fork_Shanghai_blockchain_test_x_1.json" + ), + Path( + "fixtures/state_tests/shanghai/module_shanghai/shanghai_two__fork_Shanghai_state_test_x_1.json" + ), + Path( + "fixtures/blockchain_tests_hive/shanghai/module_shanghai/shanghai_two__fork_Shanghai_blockchain_test_hive_x_1.json" + ), + Path( + "fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two__fork_Shanghai_blockchain_test_x_2.json" + ), + Path( + "fixtures/state_tests/shanghai/module_shanghai/shanghai_two__fork_Shanghai_state_test_x_2.json" + ), + Path( + "fixtures/blockchain_tests_hive/shanghai/module_shanghai/shanghai_two__fork_Shanghai_blockchain_test_hive_x_2.json" + ), + Path( + "fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two__fork_Shanghai_blockchain_test_x_3.json" + ), + Path( + "fixtures/state_tests/shanghai/module_shanghai/shanghai_two__fork_Shanghai_state_test_x_3.json" + ), + Path( + "fixtures/blockchain_tests_hive/shanghai/module_shanghai/shanghai_two__fork_Shanghai_blockchain_test_hive_x_3.json" + ), + ], + [1] * 36, + id="single-fixture-per-file", + ), + pytest.param( + ["--single-fixture-per-file", "--output", "other_fixtures"], + [ + Path( + "other_fixtures/blockchain_tests/paris/module_paris/paris_one__fork_Paris_blockchain_test.json" + ), + Path( + "other_fixtures/state_tests/paris/module_paris/paris_one__fork_Paris_state_test.json" + ), + Path( + "other_fixtures/blockchain_tests_hive/paris/module_paris/paris_one__fork_Paris_blockchain_test_hive.json" + ), + Path( + "other_fixtures/blockchain_tests/paris/module_paris/paris_one__fork_Shanghai_blockchain_test.json" + ), + Path( + "other_fixtures/state_tests/paris/module_paris/paris_one__fork_Shanghai_state_test.json" + ), + Path( + "other_fixtures/blockchain_tests_hive/paris/module_paris/paris_one__fork_Shanghai_blockchain_test_hive.json" + ), + Path( + "other_fixtures/blockchain_tests/paris/module_paris/paris_two__fork_Paris_blockchain_test.json" + ), + Path( + "other_fixtures/state_tests/paris/module_paris/paris_two__fork_Paris_state_test.json" + ), + Path( + "other_fixtures/blockchain_tests_hive/paris/module_paris/paris_two__fork_Paris_blockchain_test_hive.json" + ), + Path( + "other_fixtures/blockchain_tests/paris/module_paris/paris_two__fork_Shanghai_blockchain_test.json" + ), + Path( + "other_fixtures/state_tests/paris/module_paris/paris_two__fork_Shanghai_state_test.json" + ), + Path( + "other_fixtures/blockchain_tests_hive/paris/module_paris/paris_two__fork_Shanghai_blockchain_test_hive.json" + ), + Path( + "other_fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_one__fork_Paris_blockchain_test.json" + ), + Path( + "other_fixtures/state_tests/shanghai/module_shanghai/shanghai_one__fork_Paris_state_test.json" + ), + Path( + "other_fixtures/blockchain_tests_hive/shanghai/module_shanghai/shanghai_one__fork_Paris_blockchain_test_hive.json" + ), + Path( + "other_fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_one__fork_Shanghai_blockchain_test.json" + ), + Path( + "other_fixtures/state_tests/shanghai/module_shanghai/shanghai_one__fork_Shanghai_state_test.json" + ), + Path( + "other_fixtures/blockchain_tests_hive/shanghai/module_shanghai/shanghai_one__fork_Shanghai_blockchain_test_hive.json" + ), + Path( + "other_fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two__fork_Paris_blockchain_test_x_1.json" + ), + Path( + "other_fixtures/state_tests/shanghai/module_shanghai/shanghai_two__fork_Paris_state_test_x_1.json" + ), + Path( + "other_fixtures/blockchain_tests_hive/shanghai/module_shanghai/shanghai_two__fork_Paris_blockchain_test_hive_x_1.json" + ), + Path( + "other_fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two__fork_Paris_blockchain_test_x_2.json" + ), + Path( + "other_fixtures/state_tests/shanghai/module_shanghai/shanghai_two__fork_Paris_state_test_x_2.json" + ), + Path( + "other_fixtures/blockchain_tests_hive/shanghai/module_shanghai/shanghai_two__fork_Paris_blockchain_test_hive_x_2.json" + ), + Path( + "other_fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two__fork_Paris_blockchain_test_x_3.json" + ), + Path( + "other_fixtures/state_tests/shanghai/module_shanghai/shanghai_two__fork_Paris_state_test_x_3.json" + ), + Path( + "other_fixtures/blockchain_tests_hive/shanghai/module_shanghai/shanghai_two__fork_Paris_blockchain_test_hive_x_3.json" + ), + Path( + "other_fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two__fork_Shanghai_blockchain_test_x_1.json" + ), + Path( + "other_fixtures/state_tests/shanghai/module_shanghai/shanghai_two__fork_Shanghai_state_test_x_1.json" + ), + Path( + "other_fixtures/blockchain_tests_hive/shanghai/module_shanghai/shanghai_two__fork_Shanghai_blockchain_test_hive_x_1.json" + ), + Path( + "other_fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two__fork_Shanghai_blockchain_test_x_2.json" + ), + Path( + "other_fixtures/state_tests/shanghai/module_shanghai/shanghai_two__fork_Shanghai_state_test_x_2.json" + ), + Path( + "other_fixtures/blockchain_tests_hive/shanghai/module_shanghai/shanghai_two__fork_Shanghai_blockchain_test_hive_x_2.json" + ), + Path( + "other_fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two__fork_Shanghai_blockchain_test_x_3.json" + ), + Path( + "other_fixtures/state_tests/shanghai/module_shanghai/shanghai_two__fork_Shanghai_state_test_x_3.json" + ), + Path( + "other_fixtures/blockchain_tests_hive/shanghai/module_shanghai/shanghai_two__fork_Shanghai_blockchain_test_hive_x_3.json" + ), + ], + [1] * 36, + id="single-fixture-per-file_custom_output_dir", + ), + pytest.param( + ["--flat-output", "--single-fixture-per-file"], + [ + Path("fixtures/blockchain_tests/paris_one__fork_Paris_blockchain_test.json"), + Path("fixtures/state_tests/paris_one__fork_Paris_state_test.json"), + Path( + "fixtures/blockchain_tests_hive/paris_one__fork_Paris_blockchain_test_hive.json" + ), + Path("fixtures/blockchain_tests/paris_one__fork_Shanghai_blockchain_test.json"), + Path("fixtures/state_tests/paris_one__fork_Shanghai_state_test.json"), + Path( + "fixtures/blockchain_tests_hive/paris_one__fork_Shanghai_blockchain_test_hive.json" + ), + Path("fixtures/blockchain_tests/paris_two__fork_Paris_blockchain_test.json"), + Path("fixtures/state_tests/paris_two__fork_Paris_state_test.json"), + Path( + "fixtures/blockchain_tests_hive/paris_two__fork_Paris_blockchain_test_hive.json" + ), + Path("fixtures/blockchain_tests/paris_two__fork_Shanghai_blockchain_test.json"), + Path("fixtures/state_tests/paris_two__fork_Shanghai_state_test.json"), + Path( + "fixtures/blockchain_tests_hive/paris_two__fork_Shanghai_blockchain_test_hive.json" + ), + Path("fixtures/blockchain_tests/shanghai_one__fork_Paris_blockchain_test.json"), + Path("fixtures/state_tests/shanghai_one__fork_Paris_state_test.json"), + Path( + "fixtures/blockchain_tests_hive/shanghai_one__fork_Paris_blockchain_test_hive.json" + ), + Path("fixtures/blockchain_tests/shanghai_one__fork_Shanghai_blockchain_test.json"), + Path("fixtures/state_tests/shanghai_one__fork_Shanghai_state_test.json"), + Path( + "fixtures/blockchain_tests_hive/shanghai_one__fork_Shanghai_blockchain_test_hive.json" + ), + Path( + "fixtures/blockchain_tests/shanghai_two__fork_Paris_blockchain_test_x_1.json" + ), + Path("fixtures/state_tests/shanghai_two__fork_Paris_state_test_x_1.json"), + Path( + "fixtures/blockchain_tests_hive/shanghai_two__fork_Paris_blockchain_test_hive_x_1.json" + ), + Path( + "fixtures/blockchain_tests/shanghai_two__fork_Paris_blockchain_test_x_2.json" + ), + Path("fixtures/state_tests/shanghai_two__fork_Paris_state_test_x_2.json"), + Path( + "fixtures/blockchain_tests_hive/shanghai_two__fork_Paris_blockchain_test_hive_x_2.json" + ), + Path( + "fixtures/blockchain_tests/shanghai_two__fork_Paris_blockchain_test_x_3.json" + ), + Path("fixtures/state_tests/shanghai_two__fork_Paris_state_test_x_3.json"), + Path( + "fixtures/blockchain_tests_hive/shanghai_two__fork_Paris_blockchain_test_hive_x_3.json" + ), + Path( + "fixtures/blockchain_tests/shanghai_two__fork_Shanghai_blockchain_test_x_1.json" + ), + Path("fixtures/state_tests/shanghai_two__fork_Shanghai_state_test_x_1.json"), + Path( + "fixtures/blockchain_tests_hive/shanghai_two__fork_Shanghai_blockchain_test_hive_x_1.json" + ), + Path( + "fixtures/blockchain_tests/shanghai_two__fork_Shanghai_blockchain_test_x_2.json" + ), + Path("fixtures/state_tests/shanghai_two__fork_Shanghai_state_test_x_2.json"), + Path( + "fixtures/blockchain_tests_hive/shanghai_two__fork_Shanghai_blockchain_test_hive_x_2.json" + ), + Path( + "fixtures/blockchain_tests/shanghai_two__fork_Shanghai_blockchain_test_x_3.json" + ), + Path("fixtures/state_tests/shanghai_two__fork_Shanghai_state_test_x_3.json"), + Path( + "fixtures/blockchain_tests_hive/shanghai_two__fork_Shanghai_blockchain_test_hive_x_3.json" + ), + ], + [1] * 36, + id="flat-single-per-file_flat-output", + ), + ], +) +def test_fixture_output_based_on_command_line_args( + testdir, args, expected_fixture_files, expected_fixture_counts +): + """ + Test: + - fixture files are created at the expected paths. + - no other files are present in the output directory. + - each fixture file contains the expected number of fixtures. + + The modules above generate the following test cases: + tests/paris/test_module_paris.py::test_paris_one[fork_Paris] PASSED + tests/paris/test_module_paris.py::test_paris_one[fork_Shanghai] PASSED + tests/paris/test_module_paris.py::test_paris_two[fork_Paris] PASSED + tests/paris/test_module_paris.py::test_paris_two[fork_Shanghai] PASSED + tests/shanghai/test_module_shanghai.py::test_shanghai_one[fork_Paris] PASSED + tests/shanghai/test_module_shanghai.py::test_shanghai_one[fork_Shanghai] PASSED + tests/shanghai/test_module_shanghai.py::test_shanghai_two[fork_Paris-x=1] PASSED + tests/shanghai/test_module_shanghai.py::test_shanghai_two[fork_Paris-x=2] PASSED + tests/shanghai/test_module_shanghai.py::test_shanghai_two[fork_Paris-x=3] PASSED + tests/shanghai/test_module_shanghai.py::test_shanghai_two[fork_Shanghai-x=1] PASSED + tests/shanghai/test_module_shanghai.py::test_shanghai_two[fork_Shanghai-x=2] PASSED + tests/shanghai/test_module_shanghai.py::test_shanghai_two[fork_Shanghai-x=3] PASSED + """ + tests_dir = testdir.mkdir("tests") + + paris_tests_dir = tests_dir.mkdir("paris") + test_module = paris_tests_dir.join("test_module_paris.py") + test_module.write(test_module_paris) + + shanghai_tests_dir = tests_dir.mkdir("shanghai") + test_module = shanghai_tests_dir.join("test_module_shanghai.py") + test_module.write(test_module_shanghai) + + testdir.copy_example(name="pytest.ini") + args.append("-v") + result = testdir.runpytest(*args) + result.assert_outcomes( + passed=total_test_count * 3, + failed=0, + skipped=0, + errors=0, + ) + if "--output" in args: + output_dir = Path(args[args.index("--output") + 1]).absolute() + else: + output_dir = Path("fixtures").absolute() + assert output_dir.exists() + + all_files = get_all_files_in_directory(output_dir) + + for fixture_file, fixture_count in zip(expected_fixture_files, expected_fixture_counts): + assert fixture_file.exists() + assert fixture_count == count_keys_in_fixture(fixture_file) + + assert set(all_files) == set( + expected_fixture_files + ), f"Unexpected files in directory: {set(all_files) - set(expected_fixture_files)}" diff --git a/tests/berlin/eip2930_access_list/__init__.py b/tests/berlin/eip2930_access_list/__init__.py index 678f7f20d1..24be6dfd09 100644 --- a/tests/berlin/eip2930_access_list/__init__.py +++ b/tests/berlin/eip2930_access_list/__init__.py @@ -1,5 +1,4 @@ """ abstract: Tests [EIP-2930: Optional access lists](https://eips.ethereum.org/EIPS/eip-2930) - Test cases for [EIP-2930: Optional access lists](https://eips.ethereum.org/EIPS/eip-2930). """ diff --git a/tests/berlin/eip2930_access_list/test_acl.py b/tests/berlin/eip2930_access_list/test_acl.py index 6b29ef959a..b69a5ada7e 100644 --- a/tests/berlin/eip2930_access_list/test_acl.py +++ b/tests/berlin/eip2930_access_list/test_acl.py @@ -4,7 +4,6 @@ import pytest -from ethereum_test_forks import Fork, London, is_fork from ethereum_test_tools import AccessList, Account, Environment from ethereum_test_tools import Opcodes as Op from ethereum_test_tools import StateTestFiller, Transaction @@ -14,8 +13,7 @@ @pytest.mark.valid_from("Berlin") -@pytest.mark.valid_until("London") -def test_access_list(state_test: StateTestFiller, fork: Fork): +def test_access_list(state_test: StateTestFiller): """ Test type 1 transaction. """ @@ -59,12 +57,9 @@ def test_access_list(state_test: StateTestFiller, fork: Fork): balance=4, nonce=1, ), - "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": Account( - balance=0x1BC16D674EC80000 if is_fork(fork, London) else 0x1BC16D674ECB26CE, - ), "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": Account( balance=0x2CD931, nonce=1, ), } - state_test(env=env, pre=pre, post=post, txs=[tx]) + state_test(env=env, pre=pre, post=post, tx=tx) diff --git a/tests/byzantium/__init__.py b/tests/byzantium/__init__.py new file mode 100644 index 0000000000..6792ecb2cc --- /dev/null +++ b/tests/byzantium/__init__.py @@ -0,0 +1,3 @@ +""" +Test cases for EVM functionality introduced in Byzantium. +""" diff --git a/tests/byzantium/eip198_modexp_precompile/__init__.py b/tests/byzantium/eip198_modexp_precompile/__init__.py new file mode 100644 index 0000000000..d46d4fe5a7 --- /dev/null +++ b/tests/byzantium/eip198_modexp_precompile/__init__.py @@ -0,0 +1,3 @@ +""" +Test for precompiles introduced in Byzantium. +""" diff --git a/tests/byzantium/eip198_modexp_precompile/test_modexp.py b/tests/byzantium/eip198_modexp_precompile/test_modexp.py new file mode 100644 index 0000000000..239d9139f0 --- /dev/null +++ b/tests/byzantium/eip198_modexp_precompile/test_modexp.py @@ -0,0 +1,271 @@ +""" +abstract: Test [EIP-198: MODEXP Precompile](https://eips.ethereum.org/EIPS/eip-198) + Tests the MODEXP precompile, located at address 0x0000..0005. Test cases from the EIP are + labelled with `EIP-198-caseX` in the test id. +""" +from dataclasses import dataclass + +import pytest + +from ethereum_test_tools import ( + Account, + Address, + Environment, + StateTestFiller, + TestAddress, + TestParameterGroup, + Transaction, + compute_create_address, +) +from ethereum_test_tools.vm.opcode import Opcodes as Op + +REFERENCE_SPEC_GIT_PATH = "EIPS/eip-198.md" +REFERENCE_SPEC_VERSION = "9e393a79d9937f579acbdcb234a67869259d5a96" + + +@dataclass(kw_only=True, frozen=True, repr=False) +class ModExpInput(TestParameterGroup): + """ + Helper class that defines the MODEXP precompile inputs and creates the + call data from them. + + Attributes: + base (str): The base value for the MODEXP precompile. + exponent (str): The exponent value for the MODEXP precompile. + modulus (str): The modulus value for the MODEXP precompile. + extra_data (str): Defines extra padded data to be added at the end of the calldata + to the precompile. Defaults to an empty string. + """ + + base: str + exponent: str + modulus: str + extra_data: str = "" + + def create_modexp_tx_data(self): + """ + Generates input for the MODEXP precompile. + """ + return ( + "0x" + + f"{int(len(self.base)/2):x}".zfill(64) + + f"{int(len(self.exponent)/2):x}".zfill(64) + + f"{int(len(self.modulus)/2):x}".zfill(64) + + self.base + + self.exponent + + self.modulus + + self.extra_data + ) + + +@dataclass(kw_only=True, frozen=True, repr=False) +class ModExpRawInput(TestParameterGroup): + """ + Helper class to directly define a raw input to the MODEXP precompile. + """ + + raw_input: str + + def create_modexp_tx_data(self): + """ + The raw input is already the MODEXP precompile input. + """ + return self.raw_input + + +@dataclass(kw_only=True, frozen=True, repr=False) +class ExpectedOutput(TestParameterGroup): + """ + Expected test result. + + Attributes: + call_return_code (str): The return_code from CALL, 0 indicates unsuccessful call + (out-of-gas), 1 indicates call succeeded. + returned_data (str): The output returnData is the expected output of the call + """ + + call_return_code: str + returned_data: str + + +@pytest.mark.valid_from("Byzantium") +@pytest.mark.parametrize( + ["input", "output"], + [ + ( + ModExpInput(base="", exponent="", modulus="02"), + ExpectedOutput(call_return_code="0x01", returned_data="0x01"), + ), + ( + ModExpInput(base="", exponent="", modulus="0002"), + ExpectedOutput(call_return_code="0x01", returned_data="0x0001"), + ), + ( + ModExpInput(base="00", exponent="00", modulus="02"), + ExpectedOutput(call_return_code="0x01", returned_data="0x01"), + ), + ( + ModExpInput(base="", exponent="01", modulus="02"), + ExpectedOutput(call_return_code="0x01", returned_data="0x00"), + ), + ( + ModExpInput(base="01", exponent="01", modulus="02"), + ExpectedOutput(call_return_code="0x01", returned_data="0x01"), + ), + ( + ModExpInput(base="02", exponent="01", modulus="03"), + ExpectedOutput(call_return_code="0x01", returned_data="0x02"), + ), + ( + ModExpInput(base="02", exponent="02", modulus="05"), + ExpectedOutput(call_return_code="0x01", returned_data="0x04"), + ), + ( + ModExpInput(base="", exponent="", modulus=""), + ExpectedOutput(call_return_code="0x01", returned_data="0x"), + ), + ( + ModExpInput(base="", exponent="", modulus="00"), + ExpectedOutput(call_return_code="0x01", returned_data="0x00"), + ), + ( + ModExpInput(base="", exponent="", modulus="01"), + ExpectedOutput(call_return_code="0x01", returned_data="0x00"), + ), + ( + ModExpInput(base="", exponent="", modulus="0001"), + ExpectedOutput(call_return_code="0x01", returned_data="0x0000"), + ), + # Test cases from EIP 198. + pytest.param( + ModExpInput( + base="03", + exponent="fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e", + modulus="fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + ), + ExpectedOutput( + call_return_code="0x01", + returned_data="0000000000000000000000000000000000000000000000000000000000000001", + ), + id="EIP-198-case1", + ), + pytest.param( + ModExpInput( + base="", + exponent="fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e", + modulus="fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + ), + ExpectedOutput( + call_return_code="0x01", + returned_data="0000000000000000000000000000000000000000000000000000000000000000", + ), + id="EIP-198-case2", + ), + pytest.param( # Note: This is the only test case which goes out-of-gas. + ModExpRawInput( + raw_input="0000000000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000000020" + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe" + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd" + ), + ExpectedOutput( + call_return_code="0x00", + returned_data="0000000000000000000000000000000000000000000000000000000000000000", + ), + id="EIP-198-case3-raw-input-out-of-gas", + ), + pytest.param( + ModExpInput( + base="03", + exponent="ffff", + modulus="8000000000000000000000000000000000000000000000000000000000000000", + extra_data="07", + ), + ExpectedOutput( + call_return_code="0x01", + returned_data="0x3b01b01ac41f2d6e917c6d6a221ce793802469026d9ab7578fa2e79e4da6aaab", + ), + id="EIP-198-case4-extra-data_07", + ), + pytest.param( + ModExpRawInput( + raw_input="0000000000000000000000000000000000000000000000000000000000000001" + "0000000000000000000000000000000000000000000000000000000000000002" + "0000000000000000000000000000000000000000000000000000000000000020" + "03" + "ffff" + "80" + ), + ExpectedOutput( + call_return_code="0x01", + returned_data="0x3b01b01ac41f2d6e917c6d6a221ce793802469026d9ab7578fa2e79e4da6aaab", + ), + id="EIP-198-case5-raw-input", + ), + ], + ids=lambda param: param.__repr__(), # only required to remove parameter names (input/output) +) +def test_modexp(state_test: StateTestFiller, input: ModExpInput, output: ExpectedOutput): + """ + Test the MODEXP precompile + """ + env = Environment() + pre = {TestAddress: Account(balance=1000000000000000000000)} + + account = Address(0x100) + + pre[account] = Account( + code=( + # Store all CALLDATA into memory (offset 0) + Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE()) + # Store the returned CALL status (success = 1, fail = 0) into slot 0: + + Op.SSTORE( + 0, + # Setup stack to CALL into ModExp with the CALLDATA and CALL into it (+ pop value) + Op.CALL(Op.GAS(), 0x05, 0, 0, Op.CALLDATASIZE(), 0, 0), + ) + # Store contract deployment code to deploy the returned data from ModExp as + # contract code (16 bytes) + + Op.MSTORE( + 0, + ( + ( + # Need to `ljust` this PUSH32 in order to ensure the code starts + # in memory at offset 0 (memory right-aligns stack items which are not + # 32 bytes) + Op.PUSH32( + ( + Op.CODECOPY(0, 16, Op.SUB(Op.CODESIZE(), 16)) + + Op.RETURN(0, Op.SUB(Op.CODESIZE, 16)) + ).ljust(32, bytes(1)) + ) + ) + ), + ) + # RETURNDATACOPY the returned data from ModExp into memory (offset 16 bytes) + + Op.RETURNDATACOPY(16, 0, Op.RETURNDATASIZE()) + # CREATE contract with the deployment code + the returned data from ModExp + + Op.CREATE(0, 0, Op.ADD(16, Op.RETURNDATASIZE())) + # STOP (handy for tracing) + + Op.STOP() + ) + ) + + tx = Transaction( + ty=0x0, + nonce=0, + to=account, + data=input.create_modexp_tx_data(), + gas_limit=500000, + gas_price=10, + protected=True, + ) + + post = {} + if output.call_return_code != "0x00": + contract_address = compute_create_address(account, tx.nonce) + post[contract_address] = Account(code=output.returned_data) + post[account] = Account(storage={0: output.call_return_code}) + + state_test(env=env, pre=pre, post=post, tx=tx) diff --git a/tests/cancun/__init__.py b/tests/cancun/__init__.py index e917294b80..118715bb48 100644 --- a/tests/cancun/__init__.py +++ b/tests/cancun/__init__.py @@ -1,3 +1,6 @@ """ Test cases for EVM functionality introduced in Cancun. + +See [EIP-7659: Hardfork Meta - Dencun](https://eips.ethereum.org/EIPS/eip-7569) +for a list of EIPS included in Dencun (Deneb/Cancun). """ diff --git a/tests/cancun/eip1153_tstore/test_tstorage.py b/tests/cancun/eip1153_tstore/test_tstorage.py index de8efe4c2e..c186dcd1c0 100644 --- a/tests/cancun/eip1153_tstore/test_tstorage.py +++ b/tests/cancun/eip1153_tstore/test_tstorage.py @@ -1,6 +1,5 @@ """ abstract: Tests [EIP-1153: Transient Storage Opcodes](https://eips.ethereum.org/EIPS/eip-1153) - Test [EIP-1153: Transient Storage Opcodes](https://eips.ethereum.org/EIPS/eip-1153). Ports and extends some tests from [ethereum/tests/src/EIPTestsFiller/StateTests/stEIP1153-transientStorage/](https://github.com/ethereum/tests/blob/9b00b68593f5869eb51a6659e1cc983e875e616b/src/EIPTestsFiller/StateTests/stEIP1153-transientStorage) @@ -42,13 +41,11 @@ def test_transient_storage_unset_values(state_test: StateTestFiller): code_address: Account(code=code, storage={slot: 1 for slot in slots_under_test}), } - txs = [ - Transaction( - to=code_address, - data=b"", - gas_limit=1_000_000, - ) - ] + tx = Transaction( + to=code_address, + data=b"", + gas_limit=1_000_000, + ) post = {code_address: Account(storage={slot: 0 for slot in slots_under_test})} @@ -56,7 +53,7 @@ def test_transient_storage_unset_values(state_test: StateTestFiller): env=env, pre=pre, post=post, - txs=txs, + tx=tx, ) @@ -76,16 +73,14 @@ def test_tload_after_tstore(state_test: StateTestFiller): pre = { TestAddress: Account(balance=10_000_000), - code_address: Account(code=code, storage={slot: 0 for slot in slots_under_test}), + code_address: Account(code=code, storage={slot: 0xFF for slot in slots_under_test}), } - txs = [ - Transaction( - to=code_address, - data=b"", - gas_limit=1_000_000, - ) - ] + tx = Transaction( + to=code_address, + data=b"", + gas_limit=1_000_000, + ) post = {code_address: Account(storage={slot: slot for slot in slots_under_test})} @@ -93,7 +88,7 @@ def test_tload_after_tstore(state_test: StateTestFiller): env=env, pre=pre, post=post, - txs=txs, + tx=tx, ) @@ -119,13 +114,11 @@ def test_tload_after_sstore(state_test: StateTestFiller): code_address: Account(code=code, storage={slot: 1 for slot in slots_under_test}), } - txs = [ - Transaction( - to=code_address, - data=b"", - gas_limit=1_000_000, - ) - ] + tx = Transaction( + to=code_address, + data=b"", + gas_limit=1_000_000, + ) post = { code_address: Account( @@ -139,7 +132,7 @@ def test_tload_after_sstore(state_test: StateTestFiller): env=env, pre=pre, post=post, - txs=txs, + tx=tx, ) @@ -166,13 +159,11 @@ def test_tload_after_tstore_is_zero(state_test: StateTestFiller): ), } - txs = [ - Transaction( - to=code_address, - data=b"", - gas_limit=1_000_000, - ) - ] + tx = Transaction( + to=code_address, + data=b"", + gas_limit=1_000_000, + ) post = { code_address: Account( @@ -184,7 +175,7 @@ def test_tload_after_tstore_is_zero(state_test: StateTestFiller): env=env, pre=pre, post=post, - txs=txs, + tx=tx, ) @@ -244,15 +235,62 @@ def test_gas_usage( TestAddress: Account(balance=10_000_000, nonce=0), code_address: Account(code=gas_measure_bytecode), } - txs = [ - Transaction( - to=code_address, - data=b"", - gas_limit=1_000_000, - ) - ] + tx = Transaction( + to=code_address, + data=b"", + gas_limit=1_000_000, + ) post = { code_address: Account(code=gas_measure_bytecode, storage={0: expected_gas}), TestAddress: Account(nonce=1), } - state_test(env=env, pre=pre, txs=txs, post=post) + state_test(env=env, pre=pre, tx=tx, post=post) + + +@unique +class LoopRunUntilOutOfGasCases(PytestParameterEnum): + """ + Test cases to run until out of gas. + """ + + TSTORE = { + "description": "Run tstore in loop until out of gas", + "repeat_bytecode": Op.TSTORE(Op.GAS, Op.GAS), + "bytecode_repeat_times": 1000, + } + TSTORE_WIDE_ADDRESS_SPACE = { + "description": "Run tstore in loop until out of gas, using a wide address space", + "repeat_bytecode": Op.TSTORE(Op.ADD(Op.SHL(Op.PC, 1), Op.GAS), Op.GAS), + "bytecode_repeat_times": 32, + } + TSTORE_TLOAD = { + "description": "Run tstore and tload in loop until out of gas", + "repeat_bytecode": Op.GAS + Op.DUP1 + Op.DUP1 + Op.TSTORE + Op.TLOAD + Op.POP, + "bytecode_repeat_times": 1000, + } + + +@LoopRunUntilOutOfGasCases.parametrize() +def test_run_until_out_of_gas( + state_test: StateTestFiller, + repeat_bytecode: bytes, + bytecode_repeat_times: int, +): + """ + Use TSTORE over and over to different keys until we run out of gas. + """ + bytecode = Op.JUMPDEST + repeat_bytecode * bytecode_repeat_times + Op.JUMP(Op.PUSH0) + pre = { + TestAddress: Account(balance=10_000_000_000_000, nonce=0), + code_address: Account(code=bytecode), + } + tx = Transaction( + to=code_address, + data=b"", + gas_limit=30_000_000, + ) + post = { + code_address: Account(code=bytecode, storage={}), + TestAddress: Account(nonce=1), + } + state_test(env=Environment(), pre=pre, tx=tx, post=post) diff --git a/tests/cancun/eip1153_tstore/test_tstorage_create_contexts.py b/tests/cancun/eip1153_tstore/test_tstorage_create_contexts.py index 32d6ca3174..6d730178f7 100644 --- a/tests/cancun/eip1153_tstore/test_tstorage_create_contexts.py +++ b/tests/cancun/eip1153_tstore/test_tstorage_create_contexts.py @@ -1,6 +1,5 @@ """ abstract: Tests for [EIP-1153: Transient Storage](https://eips.ethereum.org/EIPS/eip-1153) - Test cases for `TSTORE` and `TLOAD` opcode calls in contract initcode. """ # noqa: E501 @@ -9,7 +8,7 @@ import pytest -from ethereum_test_tools import Account, Environment, Initcode +from ethereum_test_tools import Account, Address, Environment, Initcode from ethereum_test_tools import Opcodes as Op from ethereum_test_tools import ( StateTestFiller, @@ -151,7 +150,7 @@ def creator_contract_code( # noqa: D102 self, opcode: Op, create2_salt: int, - created_contract_address: str, + created_contract_address: Address, ) -> bytes: if opcode == Op.CREATE: create_call = Op.CREATE(0, 0, Op.CALLDATASIZE) @@ -159,9 +158,7 @@ def creator_contract_code( # noqa: D102 create_call = Op.CREATE2(0, 0, Op.CALLDATASIZE, create2_salt) else: raise Exception("Invalid opcode specified for test.") - contract_call = Op.SSTORE( - 4, Op.CALL(Op.GAS(), Op.PUSH20(created_contract_address), 0, 0, 0, 0, 0) - ) + contract_call = Op.SSTORE(4, Op.CALL(Op.GAS(), created_contract_address, 0, 0, 0, 0, 0)) return ( Op.TSTORE(0, 0x0100) + Op.TSTORE(1, 0x0200) @@ -183,7 +180,7 @@ def expected_creator_storage(self) -> dict: # noqa: D102 @pytest.fixture() def created_contract_address( # noqa: D102 self, opcode: Op, create2_salt: int, initcode: bytes - ) -> str: + ) -> Address: if opcode == Op.CREATE: return compute_create_address(address=creator_address, nonce=1) if opcode == Op.CREATE2: @@ -195,8 +192,8 @@ def created_contract_address( # noqa: D102 def test_contract_creation( self, state_test: StateTestFiller, - creator_contract_code: str, - created_contract_address: str, + creator_contract_code: bytes, + created_contract_address: Address, initcode: bytes, deploy_code: bytes, expected_creator_storage: dict, @@ -237,5 +234,5 @@ def test_contract_creation( env=Environment(), pre=pre, post=post, - txs=[tx], + tx=tx, ) diff --git a/tests/cancun/eip1153_tstore/test_tstorage_execution_contexts.py b/tests/cancun/eip1153_tstore/test_tstorage_execution_contexts.py index 5f22bb8dc2..81500aa1e0 100644 --- a/tests/cancun/eip1153_tstore/test_tstorage_execution_contexts.py +++ b/tests/cancun/eip1153_tstore/test_tstorage_execution_contexts.py @@ -1,11 +1,10 @@ """ abstract: Tests for [EIP-1153: Transient Storage](https://eips.ethereum.org/EIPS/eip-1153) - Test cases for `TSTORE` and `TLOAD` opcode calls in different execution contexts. """ # noqa: E501 from enum import EnumMeta, unique -from typing import List, Mapping +from typing import Mapping import pytest @@ -314,12 +313,10 @@ def __init__(self, value): caller_address: Account(code=value["caller_bytecode"]), callee_address: Account(code=value["callee_bytecode"]), }, - "txs": [ - Transaction( - to=caller_address, - gas_limit=1_000_000, - ) - ], + "tx": Transaction( + to=caller_address, + gas_limit=1_000_000, + ), "post": { caller_address: Account(storage=value["expected_caller_storage"]), callee_address: Account(storage=value["expected_callee_storage"]), @@ -333,7 +330,7 @@ def test_subcall( state_test: StateTestFiller, env: Environment, pre: Mapping, - txs: List[Transaction], + tx: Transaction, post: Mapping, ): """ @@ -344,4 +341,4 @@ def test_subcall( - `DELEGATECALL` - `STATICCALL` """ - state_test(env=env, pre=pre, post=post, txs=txs) + state_test(env=env, pre=pre, post=post, tx=tx) diff --git a/tests/cancun/eip1153_tstore/test_tstorage_reentrancy_contexts.py b/tests/cancun/eip1153_tstore/test_tstorage_reentrancy_contexts.py index 5ad4c3c244..3938609e47 100644 --- a/tests/cancun/eip1153_tstore/test_tstorage_reentrancy_contexts.py +++ b/tests/cancun/eip1153_tstore/test_tstorage_reentrancy_contexts.py @@ -1,6 +1,5 @@ """ abstract: Tests for [EIP-1153: Transient Storage](https://eips.ethereum.org/EIPS/eip-1153) - Test cases for `TSTORE` and `TLOAD` opcode calls in reentrancy contexts. """ # noqa: E501 @@ -8,9 +7,9 @@ import pytest -from ethereum_test_tools import Account, CalldataCase, Conditional, Environment +from ethereum_test_tools import Account, CalldataCase, Conditional, Environment, Hash from ethereum_test_tools import Opcodes as Op -from ethereum_test_tools import StateTestFiller, Switch, TestAddress, Transaction, to_hash_bytes +from ethereum_test_tools import StateTestFiller, Switch, TestAddress, Transaction from . import PytestParameterEnum from .spec import ref_spec_1153 @@ -273,10 +272,10 @@ def test_reentrant_call(state_test: StateTestFiller, bytecode, expected_storage) tx = Transaction( to=callee_address, - data=to_hash_bytes(1), + data=Hash(1), gas_limit=10_000_000, ) post = {callee_address: Account(code=bytecode, storage=expected_storage)} - state_test(env=env, pre=pre, post=post, txs=[tx]) + state_test(env=env, pre=pre, post=post, tx=tx) diff --git a/tests/cancun/eip1153_tstore/test_tstorage_selfdestruct.py b/tests/cancun/eip1153_tstore/test_tstorage_selfdestruct.py index 8225c67f0e..5bd4f7964f 100644 --- a/tests/cancun/eip1153_tstore/test_tstorage_selfdestruct.py +++ b/tests/cancun/eip1153_tstore/test_tstorage_selfdestruct.py @@ -1,6 +1,5 @@ """ abstract: Tests for [EIP-1153: Transient Storage](https://eips.ethereum.org/EIPS/eip-1153) - Test cases for `TSTORE` and `TLOAD` opcode calls in reentrancy after self-destruct, taking into account the changes in EIP-6780. """ # noqa: E501 @@ -42,9 +41,7 @@ def call_option(option_number: int) -> bytes: """ Return the bytecode for a call to the callee contract with the given option number. """ - return Op.MSTORE(0, option_number) + Op.CALL( - Op.GAS, Op.PUSH20(callee_address), 0, 0, 32, 0, 32 - ) + return Op.MSTORE(0, option_number) + Op.CALL(Op.GAS, callee_address, 0, 0, 32, 0, 32) @unique @@ -249,4 +246,4 @@ def test_reentrant_selfdestructing_call( else: post[callee_address] = Account.NONEXISTENT - state_test(env=env, pre=pre, post=post, txs=[tx]) + state_test(env=env, pre=pre, post=post, tx=tx) diff --git a/tests/cancun/eip4788_beacon_root/conftest.py b/tests/cancun/eip4788_beacon_root/conftest.py index b6fb6d4255..e97f075914 100644 --- a/tests/cancun/eip4788_beacon_root/conftest.py +++ b/tests/cancun/eip4788_beacon_root/conftest.py @@ -10,13 +10,13 @@ from ethereum_test_tools import ( AccessList, Account, + Address, Environment, + Hash, Storage, TestAddress, Transaction, add_kzg_version, - to_address, - to_hash_bytes, ) from ethereum_test_tools.vm.opcode import Opcodes as Op @@ -51,7 +51,7 @@ def __next__(self) -> bytes: @pytest.fixture def beacon_root(request, beacon_roots: Iterator[bytes]) -> bytes: # noqa: D103 - return to_hash_bytes(request.param) if hasattr(request, "param") else next(beacon_roots) + return Hash(request.param) if hasattr(request, "param") else next(beacon_roots) @pytest.fixture @@ -86,8 +86,8 @@ def call_gas() -> int: # noqa: D103 @pytest.fixture -def caller_address() -> str: # noqa: D103 - return to_address(0x100) +def caller_address() -> Address: # noqa: D103 + return Address(0x100) @pytest.fixture @@ -177,7 +177,7 @@ def system_address_balance() -> int: def pre( contract_call_account: Account, system_address_balance: int, - caller_address: str, + caller_address: Address, ) -> Dict: """ Prepares the pre state of all test cases, by setting the balance of the @@ -191,7 +191,7 @@ def pre( caller_address: contract_call_account, } if system_address_balance > 0: - pre_alloc[to_address(Spec.SYSTEM_ADDRESS)] = Account( + pre_alloc[Address(Spec.SYSTEM_ADDRESS)] = Account( nonce=0, balance=system_address_balance, ) @@ -199,7 +199,7 @@ def pre( @pytest.fixture -def tx_to_address(request, caller_address: Account) -> bytes: # noqa: D103 +def tx_to_address(request, caller_address: Account) -> Address: # noqa: D103 return request.param if hasattr(request, "param") else caller_address @@ -234,7 +234,7 @@ def tx_data(timestamp: int) -> bytes: """ Data included in the transaction to call the beacon root contract. """ - return to_hash_bytes(timestamp) + return Hash(timestamp) @pytest.fixture @@ -249,7 +249,7 @@ def tx_type() -> int: @pytest.fixture def tx( - tx_to_address: str, + tx_to_address: Address, tx_data: bytes, tx_type: int, access_list: List[AccessList], @@ -289,7 +289,7 @@ def tx( @pytest.fixture def post( - caller_address: str, + caller_address: Address, beacon_root: bytes, valid_call: bool, valid_input: bool, diff --git a/tests/cancun/eip4788_beacon_root/test_beacon_root_contract.py b/tests/cancun/eip4788_beacon_root/test_beacon_root_contract.py index 02aa434500..01d307e29f 100644 --- a/tests/cancun/eip4788_beacon_root/test_beacon_root_contract.py +++ b/tests/cancun/eip4788_beacon_root/test_beacon_root_contract.py @@ -1,10 +1,8 @@ """ abstract: Tests beacon block root for [EIP-4788: Beacon block root in the EVM](https://eips.ethereum.org/EIPS/eip-4788) - Test the exposed beacon chain root in the EVM for [EIP-4788: Beacon block root in the EVM](https://eips.ethereum.org/EIPS/eip-4788) note: Adding a new test - Add a function that is named `test_` and takes at least the following arguments: - state_test @@ -14,26 +12,27 @@ - post - valid_call - The following arguments *need* to be parametrized or the test will not be generated: - - - - All other `pytest.fixtures` can be parametrized to generate new combinations and test cases. """ # noqa: E501 -from typing import Dict +from itertools import count +from typing import Dict, Iterator, List import pytest +from ethereum_test_forks import Fork from ethereum_test_tools import ( Account, - Environment, - StateTestFiller, + Address, + Block, + BlockchainTestFiller, + Hash, Storage, + TestAddress, Transaction, - to_address, + Withdrawal, ) from ethereum_test_tools.vm.opcode import Opcodes as Op @@ -47,12 +46,7 @@ "call_gas, valid_call", [ pytest.param(Spec.BEACON_ROOTS_CALL_GAS, True), - pytest.param(Spec.BEACON_ROOTS_CALL_GAS + 1, True), - pytest.param( - Spec.BEACON_ROOTS_CALL_GAS - 1, - False, - marks=pytest.mark.xfail(reason="gas calculation is incorrect"), # TODO - ), + pytest.param(int(Spec.BEACON_ROOTS_CALL_GAS / 100), False), ], ) @pytest.mark.parametrize( @@ -67,8 +61,9 @@ ) @pytest.mark.valid_from("Cancun") def test_beacon_root_contract_calls( - state_test: StateTestFiller, - env: Environment, + blockchain_test: BlockchainTestFiller, + beacon_root: bytes, + timestamp: int, pre: Dict, tx: Transaction, post: Dict, @@ -89,10 +84,9 @@ def test_beacon_root_contract_calls( be executed. This is highlighted within storage by storing the return value of each call context. """ - state_test( - env=env, + blockchain_test( pre=pre, - txs=[tx], + blocks=[Block(txs=[tx], beacon_root=beacon_root, timestamp=timestamp)], post=post, ) @@ -122,8 +116,9 @@ def test_beacon_root_contract_calls( ) @pytest.mark.valid_from("Cancun") def test_beacon_root_contract_timestamps( - state_test: StateTestFiller, - env: Environment, + blockchain_test: BlockchainTestFiller, + beacon_root: bytes, + timestamp: int, pre: Dict, tx: Transaction, post: Dict, @@ -135,10 +130,9 @@ def test_beacon_root_contract_timestamps( `parent_beacon_block_root` for a valid input timestamp and return the zero'd 32 bytes value for an invalid input timestamp. """ - state_test( - env=env, + blockchain_test( pre=pre, - txs=[tx], + blocks=[Block(txs=[tx], beacon_root=beacon_root, timestamp=timestamp)], post=post, ) @@ -157,8 +151,9 @@ def test_beacon_root_contract_timestamps( @pytest.mark.parametrize("timestamp", [12]) @pytest.mark.valid_from("Cancun") def test_calldata_lengths( - state_test: StateTestFiller, - env: Environment, + blockchain_test: BlockchainTestFiller, + beacon_root: bytes, + timestamp: int, pre: Dict, tx: Transaction, post: Dict, @@ -166,10 +161,9 @@ def test_calldata_lengths( """ Tests the beacon root contract call using multiple invalid input lengths. """ - state_test( - env=env, + blockchain_test( pre=pre, - txs=[tx], + blocks=[Block(txs=[tx], beacon_root=beacon_root, timestamp=timestamp)], post=post, ) @@ -187,8 +181,9 @@ def test_calldata_lengths( @pytest.mark.parametrize("auto_access_list", [False, True]) @pytest.mark.valid_from("Cancun") def test_beacon_root_equal_to_timestamp( - state_test: StateTestFiller, - env: Environment, + blockchain_test: BlockchainTestFiller, + beacon_root: bytes, + timestamp: int, pre: Dict, tx: Transaction, post: Dict, @@ -199,10 +194,9 @@ def test_beacon_root_equal_to_timestamp( The expected result is that the contract call will return the `parent_beacon_block_root`, as all timestamps used are valid. """ - state_test( - env=env, + blockchain_test( pre=pre, - txs=[tx], + blocks=[Block(txs=[tx], beacon_root=beacon_root, timestamp=timestamp)], post=post, ) @@ -212,8 +206,9 @@ def test_beacon_root_equal_to_timestamp( @pytest.mark.with_all_tx_types @pytest.mark.valid_from("Cancun") def test_tx_to_beacon_root_contract( - state_test: StateTestFiller, - env: Environment, + blockchain_test: BlockchainTestFiller, + beacon_root: bytes, + timestamp: int, pre: Dict, tx: Transaction, post: Dict, @@ -221,10 +216,9 @@ def test_tx_to_beacon_root_contract( """ Tests the beacon root contract using a transaction with different types and data lengths. """ - state_test( - env=env, + blockchain_test( pre=pre, - txs=[tx], + blocks=[Block(txs=[tx], beacon_root=beacon_root, timestamp=timestamp)], post=post, ) @@ -239,8 +233,9 @@ def test_tx_to_beacon_root_contract( @pytest.mark.parametrize("timestamp", [12]) @pytest.mark.valid_from("Cancun") def test_invalid_beacon_root_calldata_value( - state_test: StateTestFiller, - env: Environment, + blockchain_test: BlockchainTestFiller, + beacon_root: bytes, + timestamp: int, pre: Dict, tx: Transaction, post: Dict, @@ -251,10 +246,9 @@ def test_invalid_beacon_root_calldata_value( Contract should revert. """ - state_test( - env=env, + blockchain_test( pre=pre, - txs=[tx], + blocks=[Block(txs=[tx], beacon_root=beacon_root, timestamp=timestamp)], post=post, ) @@ -262,8 +256,9 @@ def test_invalid_beacon_root_calldata_value( @pytest.mark.parametrize("timestamp", [12]) @pytest.mark.valid_from("Cancun") def test_beacon_root_selfdestruct( - state_test: StateTestFiller, - env: Environment, + blockchain_test: BlockchainTestFiller, + beacon_root: bytes, + timestamp: int, pre: Dict, tx: Transaction, post: Dict, @@ -272,21 +267,522 @@ def test_beacon_root_selfdestruct( Tests that self destructing the beacon root address transfers actors balance correctly. """ # self destruct actor - pre[to_address(0x1337)] = Account( + pre[Address(0x1337)] = Account( code=Op.SELFDESTRUCT(Spec.BEACON_ROOTS_ADDRESS), balance=0xBA1, ) # self destruct caller - pre[to_address(0xCC)] = Account( - code=Op.CALL(100000, Op.PUSH20(to_address(0x1337)), 0, 0, 0, 0, 0) + pre[Address(0xCC)] = Account( + code=Op.CALL(100000, Address(0x1337), 0, 0, 0, 0, 0) + Op.SSTORE(0, Op.BALANCE(Spec.BEACON_ROOTS_ADDRESS)), ) - post[to_address(0xCC)] = Account( - storage=Storage({0: 0xBA1}), + post = { + Address(0xCC): Account( + storage=Storage({0: 0xBA1}), + ) + } + blockchain_test( + pre=pre, + blocks=[ + Block(txs=[Transaction(nonce=0, to=Address(0xCC), gas_limit=100000, gas_price=10)]) + ], + post=post, + ) + + +@pytest.mark.parametrize( + "timestamps", + [ + pytest.param( + count( + start=Spec.HISTORY_BUFFER_LENGTH - 5, + step=1, + ), + id="buffer_wraparound", + ), + pytest.param( + count( + start=12, + step=Spec.HISTORY_BUFFER_LENGTH, + ), + id="buffer_wraparound_overwrite", + ), + pytest.param( + count( + start=2**32, + step=Spec.HISTORY_BUFFER_LENGTH, + ), + id="buffer_wraparound_overwrite_high_timestamp", + ), + pytest.param( + count( + start=5, + step=Spec.HISTORY_BUFFER_LENGTH - 1, + ), + id="buffer_wraparound_no_overwrite", + ), + pytest.param( + count( + start=Spec.HISTORY_BUFFER_LENGTH - 3, + step=Spec.HISTORY_BUFFER_LENGTH + 1, + ), + id="buffer_wraparound_no_overwrite_2", + ), + ], +) +@pytest.mark.parametrize("block_count", [10]) # All tests use 10 blocks +@pytest.mark.valid_from("Cancun") +def test_multi_block_beacon_root_timestamp_calls( + blockchain_test: BlockchainTestFiller, + timestamps: Iterator[int], + beacon_roots: Iterator[bytes], + block_count: int, + tx: Transaction, + call_gas: int, + call_value: int, +): + """ + Tests multiple blocks where each block writes a timestamp to storage and contains one + transaction that calls the beacon root contract multiple times. + + The blocks might overwrite the historical roots buffer, or not, depending on the `timestamps`, + and whether they increment in multiples of `Spec.HISTORY_BUFFER_LENGTH` or not. + + By default, the beacon roots are the keccak of the block number. + + Each transaction checks the current timestamp and also all previous timestamps, and verifies + that the beacon root is correct for all of them if the timestamp is supposed to be in the + buffer, which might have been overwritten by a later block. + """ + blocks: List[Block] = [] + pre = { + TestAddress: Account( + nonce=0, + balance=0x10**10, + ), + } + post = {} + + timestamps_storage: Dict[int, int] = {} + roots_storage: Dict[int, bytes] = {} + + all_timestamps: List[int] = [] + + for timestamp, beacon_root, i in zip(timestamps, beacon_roots, range(block_count)): + timestamp_index = timestamp % Spec.HISTORY_BUFFER_LENGTH + timestamps_storage[timestamp_index] = timestamp + roots_storage[timestamp_index] = beacon_root + + all_timestamps.append(timestamp) + + withdraw_index = count(0) + + current_call_account_code = bytes() + current_call_account_expected_storage = Storage() + current_call_account_address = Address(0x100 + i) + + # We are going to call the beacon roots contract once for every timestamp of the current + # and all previous blocks, and check that the returned beacon root is still correct only + # if it was not overwritten. + for t in all_timestamps: + current_call_account_code += Op.MSTORE(0, t) + call_valid = ( + timestamp_index in timestamps_storage + and timestamps_storage[t % Spec.HISTORY_BUFFER_LENGTH] == t + ) + current_call_account_code += Op.SSTORE( + current_call_account_expected_storage.store_next(0x01 if call_valid else 0x00), + Op.CALL( + call_gas, + Spec.BEACON_ROOTS_ADDRESS, + call_value, + 0x00, + 0x20, + 0x20, + 0x20, + ), + ) + + current_call_account_code += Op.SSTORE( + current_call_account_expected_storage.store_next( + roots_storage[t % Spec.HISTORY_BUFFER_LENGTH] if call_valid else 0x00 + ), + Op.MLOAD(0x20), + ) + + pre[current_call_account_address] = Account( + code=current_call_account_code, + ) + post[current_call_account_address] = Account( + storage=current_call_account_expected_storage, + ) + blocks.append( + Block( + txs=[ + tx.with_fields( + nonce=i, + to=Address(0x100 + i), + data=Hash(timestamp), + ) + ], + beacon_root=beacon_root, + timestamp=timestamp, + withdrawals=[ + # Also withdraw to the beacon root contract and the system address + Withdrawal( + address=Spec.BEACON_ROOTS_ADDRESS, + amount=1, + index=next(withdraw_index), + validator=0, + ), + Withdrawal( + address=Spec.SYSTEM_ADDRESS, + amount=1, + index=next(withdraw_index), + validator=1, + ), + ], + ) + ) + + blockchain_test( + pre=pre, + blocks=blocks, + post=post, + ) + + +@pytest.mark.parametrize( + "timestamps", + [pytest.param(count(start=1000, step=1000), id="fork_transition")], +) +@pytest.mark.parametrize("block_count", [20]) +@pytest.mark.valid_at_transition_to("Cancun") +def test_beacon_root_transition( + blockchain_test: BlockchainTestFiller, + timestamps: Iterator[int], + beacon_roots: Iterator[bytes], + block_count: int, + tx: Transaction, + call_gas: int, + call_value: int, + fork: Fork, +): + """ + Tests the fork transition to cancun and verifies that blocks with timestamp lower than the + transition timestamp do not contain beacon roots in the pre-deployed contract. + """ + blocks: List[Block] = [] + pre = { + TestAddress: Account( + nonce=0, + balance=0x10**10, + ), + } + post = {} + + timestamps_storage: Dict[int, int] = {} + roots_storage: Dict[int, bytes] = {} + + all_timestamps: List[int] = [] + timestamps_in_beacon_root_contract: List[int] = [] + + for timestamp, beacon_root, i in zip(timestamps, beacon_roots, range(block_count)): + timestamp_index = timestamp % Spec.HISTORY_BUFFER_LENGTH + + transitioned = fork.header_beacon_root_required(i, timestamp) + if transitioned: + # We've transitioned, the current timestamp must contain a value in the contract + timestamps_in_beacon_root_contract.append(timestamp) + timestamps_storage[timestamp_index] = timestamp + roots_storage[timestamp_index] = beacon_root + + all_timestamps.append(timestamp) + + withdraw_index = count(0) + + current_call_account_code = bytes() + current_call_account_expected_storage = Storage() + current_call_account_address = Address(0x100 + i) + + # We are going to call the beacon roots contract once for every timestamp of the current + # and all previous blocks, and check that the returned beacon root is correct only + # if it was after the transition timestamp. + for t in all_timestamps: + current_call_account_code += Op.MSTORE(0, t) + call_valid = ( + t in timestamps_in_beacon_root_contract + and timestamp_index in timestamps_storage + and timestamps_storage[t % Spec.HISTORY_BUFFER_LENGTH] == t + ) + current_call_account_code += Op.SSTORE( + current_call_account_expected_storage.store_next(0x01 if call_valid else 0x00), + Op.CALL( + call_gas, + Spec.BEACON_ROOTS_ADDRESS, + call_value, + 0x00, + 0x20, + 0x20, + 0x20, + ), + ) + + current_call_account_code += Op.SSTORE( + current_call_account_expected_storage.store_next( + roots_storage[t % Spec.HISTORY_BUFFER_LENGTH] if call_valid else 0x00 + ), + Op.MLOAD(0x20), + ) + + pre[current_call_account_address] = Account( + code=current_call_account_code, + ) + post[current_call_account_address] = Account( + storage=current_call_account_expected_storage, + ) + blocks.append( + Block( + txs=[ + tx.with_fields( + nonce=i, + to=Address(0x100 + i), + data=Hash(timestamp), + ) + ], + beacon_root=beacon_root if transitioned else None, + timestamp=timestamp, + withdrawals=[ + # Also withdraw to the beacon root contract and the system address + Withdrawal( + address=Spec.BEACON_ROOTS_ADDRESS, + amount=1, + index=next(withdraw_index), + validator=0, + ), + Withdrawal( + address=Spec.SYSTEM_ADDRESS, + amount=1, + index=next(withdraw_index), + validator=1, + ), + ], + ) + ) + + blockchain_test( + pre=pre, + blocks=blocks, + post=post, + ) + + +@pytest.mark.parametrize("timestamp", [15_000]) +@pytest.mark.valid_at_transition_to("Cancun") +def test_no_beacon_root_contract_at_transition( + blockchain_test: BlockchainTestFiller, + pre: Dict, + beacon_roots: Iterator[bytes], + tx: Transaction, + timestamp: int, + caller_address: Address, + fork: Fork, +): + """ + Tests the fork transition to cancun in the case where the beacon root pre-deploy was not + deployed in time for the fork. + """ + assert fork.header_beacon_root_required(1, timestamp) + blocks: List[Block] = [ + Block( + txs=[tx], + beacon_root=next(beacon_roots), + timestamp=timestamp, + withdrawals=[ + # Also withdraw to the beacon root contract and the system address + Withdrawal( + address=Spec.BEACON_ROOTS_ADDRESS, + amount=1, + index=0, + validator=0, + ), + Withdrawal( + address=Spec.SYSTEM_ADDRESS, + amount=1, + index=1, + validator=1, + ), + ], + ) + ] + pre[Spec.BEACON_ROOTS_ADDRESS] = Account( + code=b"", # Remove the code that is automatically allocated on Cancun fork + nonce=0, + balance=0, + ) + post = { + Spec.BEACON_ROOTS_ADDRESS: Account( + storage={ + timestamp % Spec.HISTORY_BUFFER_LENGTH: 0, + (timestamp % Spec.HISTORY_BUFFER_LENGTH) + Spec.HISTORY_BUFFER_LENGTH: 0, + }, + code=b"", + nonce=0, + balance=int(1e9), + ), + caller_address: Account( + storage={ + 0: 1 + }, # Successful call because the contract is not there, but nothing else is stored + ), + } + blockchain_test( + pre=pre, + blocks=blocks, + post=post, + ) + + +@pytest.mark.parametrize( + "timestamp", + [ + pytest.param(15_000, id="deploy_on_shanghai"), + pytest.param(30_000, id="deploy_on_cancun"), + ], +) +@pytest.mark.valid_at_transition_to("Cancun") +def test_beacon_root_contract_deploy( + blockchain_test: BlockchainTestFiller, + pre: Dict, + beacon_root: bytes, + tx: Transaction, + timestamp: int, + post: Dict, + fork: Fork, +): + """ + Tests the fork transition to cancun deploying the contract during Shanghai and verifying the + code deployed and its functionality after Cancun. + """ + assert fork.header_beacon_root_required(1, timestamp) + tx_gas_limit = 0x3D090 + tx_gas_price = 0xE8D4A51000 + deployer_required_balance = tx_gas_limit * tx_gas_price + deploy_tx = Transaction( + ty=0, + nonce=0, + to=None, + gas_limit=tx_gas_limit, + gas_price=tx_gas_price, + value=0, + data=bytes.fromhex( + "60618060095f395ff33373fffffffffffffffffffffffffffffffffffffffe14604d576020361460" + "24575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f526020" + "5ff35b5f5ffd5b62001fff42064281555f359062001fff015500" + ), + v=0x1B, + r=0x539, + s=0x1B9B6EB1F0, + protected=False, + ).with_signature_and_sender() + deployer_address = deploy_tx.sender + assert deployer_address is not None + assert Address(deployer_address) == Spec.BEACON_ROOTS_DEPLOYER_ADDRESS + blocks: List[Block] = [] + + beacon_root_contract_storage: Dict = {} + for i, current_timestamp in enumerate(range(timestamp // 2, timestamp + 1, timestamp // 2)): + if i == 0: + blocks.append( + Block( # Deployment block + txs=[deploy_tx], + beacon_root=( + beacon_root + if fork.header_beacon_root_required(1, current_timestamp) + else None + ), + timestamp=timestamp // 2, + withdrawals=[ + # Also withdraw to the beacon root contract and the system address + Withdrawal( + address=Spec.BEACON_ROOTS_ADDRESS, + amount=1, + index=0, + validator=0, + ), + Withdrawal( + address=Spec.SYSTEM_ADDRESS, + amount=1, + index=1, + validator=1, + ), + ], + ) + ) + beacon_root_contract_storage[current_timestamp % Spec.HISTORY_BUFFER_LENGTH] = 0 + beacon_root_contract_storage[ + (current_timestamp % Spec.HISTORY_BUFFER_LENGTH) + Spec.HISTORY_BUFFER_LENGTH + ] = 0 + elif i == 1: + blocks.append( + Block( # Contract already deployed + txs=[tx], + beacon_root=beacon_root, + timestamp=timestamp, + withdrawals=[ + # Also withdraw to the beacon root contract and the system address + Withdrawal( + address=Spec.BEACON_ROOTS_ADDRESS, + amount=1, + index=2, + validator=0, + ), + Withdrawal( + address=Spec.SYSTEM_ADDRESS, + amount=1, + index=3, + validator=1, + ), + ], + ), + ) + beacon_root_contract_storage[ + current_timestamp % Spec.HISTORY_BUFFER_LENGTH + ] = current_timestamp + beacon_root_contract_storage[ + (current_timestamp % Spec.HISTORY_BUFFER_LENGTH) + Spec.HISTORY_BUFFER_LENGTH + ] = beacon_root + else: + assert False, "This test should only have two blocks" + + expected_code = fork.pre_allocation_blockchain()[Spec.BEACON_ROOTS_ADDRESS]["code"] + pre[Spec.BEACON_ROOTS_ADDRESS] = Account( + code=b"", # Remove the code that is automatically allocated on Cancun fork + nonce=0, + balance=0, + ) + pre[deployer_address] = Account( + balance=deployer_required_balance, + ) + + post[Spec.BEACON_ROOTS_ADDRESS] = Account( + storage=beacon_root_contract_storage, + code=expected_code, + nonce=1, + balance=int(2e9), + ) + post[Spec.SYSTEM_ADDRESS] = Account( + storage={}, + code=b"", + nonce=0, + balance=int(2e9), + ) + post[deployer_address] = Account( + balance=175916000000000000, # It doesn't consume all the balance :( + nonce=1, ) - state_test( - env=env, + blockchain_test( pre=pre, - txs=[tx, Transaction(nonce=1, to=to_address(0xCC), gas_limit=100000, gas_price=10)], + blocks=blocks, post=post, ) diff --git a/tests/cancun/eip4788_beacon_root/test_blocks_beacon_root_contract.py b/tests/cancun/eip4788_beacon_root/test_blocks_beacon_root_contract.py deleted file mode 100644 index bb55311655..0000000000 --- a/tests/cancun/eip4788_beacon_root/test_blocks_beacon_root_contract.py +++ /dev/null @@ -1,544 +0,0 @@ -""" -abstract: Tests beacon block root for [EIP-4788: Beacon block root in the EVM](https://eips.ethereum.org/EIPS/eip-4788) - - Test the exposed beacon chain root in the EVM for [EIP-4788: Beacon block root in the EVM](https://eips.ethereum.org/EIPS/eip-4788) using multi-block tests - -note: Adding a new test - - Add a function that is named `test_` and takes at least the following arguments: - - - blockchain_test - - env - - pre - - blocks - - post - - valid_call - - The following arguments *need* to be parametrized or the test will not be generated: - - - - - All other `pytest.fixtures` can be parametrized to generate new combinations and test - cases. - -""" # noqa: E501 - -from itertools import count -from typing import Dict, Iterator, List - -import pytest - -from ethereum_test_forks import Fork -from ethereum_test_tools import ( - Account, - Block, - BlockchainTestFiller, - Storage, - TestAddress, - Transaction, - Withdrawal, - to_address, - to_hash_bytes, -) -from ethereum_test_tools.vm.opcode import Opcodes as Op - -from .spec import Spec, ref_spec_4788 - -REFERENCE_SPEC_GIT_PATH = ref_spec_4788.git_path -REFERENCE_SPEC_VERSION = ref_spec_4788.version - - -@pytest.mark.parametrize( - "timestamps", - [ - pytest.param( - count( - start=Spec.HISTORY_BUFFER_LENGTH - 5, - step=1, - ), - id="buffer_wraparound", - ), - pytest.param( - count( - start=12, - step=Spec.HISTORY_BUFFER_LENGTH, - ), - id="buffer_wraparound_overwrite", - ), - pytest.param( - count( - start=2**32, - step=Spec.HISTORY_BUFFER_LENGTH, - ), - id="buffer_wraparound_overwrite_high_timestamp", - ), - pytest.param( - count( - start=5, - step=Spec.HISTORY_BUFFER_LENGTH - 1, - ), - id="buffer_wraparound_no_overwrite", - ), - pytest.param( - count( - start=Spec.HISTORY_BUFFER_LENGTH - 3, - step=Spec.HISTORY_BUFFER_LENGTH + 1, - ), - id="buffer_wraparound_no_overwrite_2", - ), - ], -) -@pytest.mark.parametrize("block_count", [10]) # All tests use 10 blocks -@pytest.mark.valid_from("Cancun") -def test_multi_block_beacon_root_timestamp_calls( - blockchain_test: BlockchainTestFiller, - timestamps: Iterator[int], - beacon_roots: Iterator[bytes], - block_count: int, - tx: Transaction, - call_gas: int, - call_value: int, -): - """ - Tests multiple blocks where each block writes a timestamp to storage and contains one - transaction that calls the beacon root contract multiple times. - - The blocks might overwrite the historical roots buffer, or not, depending on the `timestamps`, - and whether they increment in multiples of `Spec.HISTORY_BUFFER_LENGTH` or not. - - By default, the beacon roots are the keccak of the block number. - - Each transaction checks the current timestamp and also all previous timestamps, and verifies - that the beacon root is correct for all of them if the timestamp is supposed to be in the - buffer, which might have been overwritten by a later block. - """ - blocks: List[Block] = [] - pre = { - TestAddress: Account( - nonce=0, - balance=0x10**10, - ), - } - post = {} - - timestamps_storage: Dict[int, int] = {} - roots_storage: Dict[int, bytes] = {} - - all_timestamps: List[int] = [] - - for timestamp, beacon_root, i in zip(timestamps, beacon_roots, range(block_count)): - timestamp_index = timestamp % Spec.HISTORY_BUFFER_LENGTH - timestamps_storage[timestamp_index] = timestamp - roots_storage[timestamp_index] = beacon_root - - all_timestamps.append(timestamp) - - withdraw_index = count(0) - - current_call_account_code = bytes() - current_call_account_expected_storage = Storage() - current_call_account_address = to_address(0x100 + i) - - # We are going to call the beacon roots contract once for every timestamp of the current - # and all previous blocks, and check that the returned beacon root is still correct only - # if it was not overwritten. - for t in all_timestamps: - current_call_account_code += Op.MSTORE(0, t) - call_valid = ( - timestamp_index in timestamps_storage - and timestamps_storage[t % Spec.HISTORY_BUFFER_LENGTH] == t - ) - current_call_account_code += Op.SSTORE( - current_call_account_expected_storage.store_next(0x01 if call_valid else 0x00), - Op.CALL( - call_gas, - Spec.BEACON_ROOTS_ADDRESS, - call_value, - 0x00, - 0x20, - 0x20, - 0x20, - ), - ) - - current_call_account_code += Op.SSTORE( - current_call_account_expected_storage.store_next( - roots_storage[t % Spec.HISTORY_BUFFER_LENGTH] if call_valid else 0x00 - ), - Op.MLOAD(0x20), - ) - - pre[current_call_account_address] = Account( - code=current_call_account_code, - ) - post[current_call_account_address] = Account( - storage=current_call_account_expected_storage, - ) - blocks.append( - Block( - txs=[ - tx.with_fields( - nonce=i, - to=to_address(0x100 + i), - data=to_hash_bytes(timestamp), - ) - ], - beacon_root=beacon_root, - timestamp=timestamp, - withdrawals=[ - # Also withdraw to the beacon root contract and the system address - Withdrawal( - address=Spec.BEACON_ROOTS_ADDRESS, - amount=1, - index=next(withdraw_index), - validator=0, - ), - Withdrawal( - address=Spec.SYSTEM_ADDRESS, - amount=1, - index=next(withdraw_index), - validator=1, - ), - ], - ) - ) - - blockchain_test( - pre=pre, - blocks=blocks, - post=post, - ) - - -@pytest.mark.parametrize( - "timestamps", - [pytest.param(count(start=1000, step=1000), id="fork_transition")], -) -@pytest.mark.parametrize("block_count", [20]) -@pytest.mark.valid_at_transition_to("Cancun") -def test_beacon_root_transition( - blockchain_test: BlockchainTestFiller, - timestamps: Iterator[int], - beacon_roots: Iterator[bytes], - block_count: int, - tx: Transaction, - call_gas: int, - call_value: int, - fork: Fork, -): - """ - Tests the fork transition to cancun and verifies that blocks with timestamp lower than the - transition timestamp do not contain beacon roots in the pre-deployed contract. - """ - blocks: List[Block] = [] - pre = { - TestAddress: Account( - nonce=0, - balance=0x10**10, - ), - } - post = {} - - timestamps_storage: Dict[int, int] = {} - roots_storage: Dict[int, bytes] = {} - - all_timestamps: List[int] = [] - timestamps_in_beacon_root_contract: List[int] = [] - - for timestamp, beacon_root, i in zip(timestamps, beacon_roots, range(block_count)): - timestamp_index = timestamp % Spec.HISTORY_BUFFER_LENGTH - - transitioned = fork.header_beacon_root_required(i, timestamp) - if transitioned: - # We've transitioned, the current timestamp must contain a value in the contract - timestamps_in_beacon_root_contract.append(timestamp) - timestamps_storage[timestamp_index] = timestamp - roots_storage[timestamp_index] = beacon_root - - all_timestamps.append(timestamp) - - withdraw_index = count(0) - - current_call_account_code = bytes() - current_call_account_expected_storage = Storage() - current_call_account_address = to_address(0x100 + i) - - # We are going to call the beacon roots contract once for every timestamp of the current - # and all previous blocks, and check that the returned beacon root is correct only - # if it was after the transition timestamp. - for t in all_timestamps: - current_call_account_code += Op.MSTORE(0, t) - call_valid = ( - t in timestamps_in_beacon_root_contract - and timestamp_index in timestamps_storage - and timestamps_storage[t % Spec.HISTORY_BUFFER_LENGTH] == t - ) - current_call_account_code += Op.SSTORE( - current_call_account_expected_storage.store_next(0x01 if call_valid else 0x00), - Op.CALL( - call_gas, - Spec.BEACON_ROOTS_ADDRESS, - call_value, - 0x00, - 0x20, - 0x20, - 0x20, - ), - ) - - current_call_account_code += Op.SSTORE( - current_call_account_expected_storage.store_next( - roots_storage[t % Spec.HISTORY_BUFFER_LENGTH] if call_valid else 0x00 - ), - Op.MLOAD(0x20), - ) - - pre[current_call_account_address] = Account( - code=current_call_account_code, - ) - post[current_call_account_address] = Account( - storage=current_call_account_expected_storage, - ) - blocks.append( - Block( - txs=[ - tx.with_fields( - nonce=i, - to=to_address(0x100 + i), - data=to_hash_bytes(timestamp), - ) - ], - beacon_root=beacon_root if transitioned else None, - timestamp=timestamp, - withdrawals=[ - # Also withdraw to the beacon root contract and the system address - Withdrawal( - address=Spec.BEACON_ROOTS_ADDRESS, - amount=1, - index=next(withdraw_index), - validator=0, - ), - Withdrawal( - address=Spec.SYSTEM_ADDRESS, - amount=1, - index=next(withdraw_index), - validator=1, - ), - ], - ) - ) - - blockchain_test( - pre=pre, - blocks=blocks, - post=post, - ) - - -@pytest.mark.parametrize("timestamp", [15_000]) -@pytest.mark.valid_at_transition_to("Cancun") -def test_no_beacon_root_contract_at_transition( - blockchain_test: BlockchainTestFiller, - pre: Dict, - beacon_roots: Iterator[bytes], - tx: Transaction, - timestamp: int, - caller_address: str, - fork: Fork, -): - """ - Tests the fork transition to cancun in the case where the beacon root pre-deploy was not - deployed in time for the fork. - """ - assert fork.header_beacon_root_required(1, timestamp) - blocks: List[Block] = [ - Block( - txs=[tx], - beacon_root=next(beacon_roots), - timestamp=timestamp, - withdrawals=[ - # Also withdraw to the beacon root contract and the system address - Withdrawal( - address=Spec.BEACON_ROOTS_ADDRESS, - amount=1, - index=0, - validator=0, - ), - Withdrawal( - address=Spec.SYSTEM_ADDRESS, - amount=1, - index=1, - validator=1, - ), - ], - ) - ] - pre[Spec.BEACON_ROOTS_ADDRESS] = Account( - code=b"", # Remove the code that is automatically allocated on Cancun fork - nonce=0, - balance=0, - ) - post = { - Spec.BEACON_ROOTS_ADDRESS: Account( - storage={ - timestamp % Spec.HISTORY_BUFFER_LENGTH: 0, - (timestamp % Spec.HISTORY_BUFFER_LENGTH) + Spec.HISTORY_BUFFER_LENGTH: 0, - }, - code=b"", - nonce=0, - balance=int(1e9), - ), - caller_address: Account( - storage={ - 0: 1 - }, # Successful call because the contract is not there, but nothing else is stored - ), - } - blockchain_test( - pre=pre, - blocks=blocks, - post=post, - ) - - -@pytest.mark.parametrize( - "timestamp", - [ - pytest.param(15_000, id="deploy_on_shanghai"), - pytest.param(30_000, id="deploy_on_cancun"), - ], -) -@pytest.mark.valid_at_transition_to("Cancun") -def test_beacon_root_contract_deploy( - blockchain_test: BlockchainTestFiller, - pre: Dict, - beacon_root: bytes, - tx: Transaction, - timestamp: int, - post: Dict, - fork: Fork, -): - """ - Tests the fork transition to cancun deploying the contract during Shanghai and verifying the - code deployed and its functionality after Cancun. - """ - assert fork.header_beacon_root_required(1, timestamp) - tx_gas_limit = 0x3D090 - tx_gas_price = 0xE8D4A51000 - deployer_required_balance = tx_gas_limit * tx_gas_price - deploy_tx = Transaction( - ty=0, - nonce=0, - to=None, - gas_limit=tx_gas_limit, - gas_price=tx_gas_price, - value=0, - data=bytes.fromhex( - "60618060095f395ff33373fffffffffffffffffffffffffffffffffffffffe14604d576020361460" - "24575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f526020" - "5ff35b5f5ffd5b62001fff42064281555f359062001fff015500" - ), - v=0x1B, - r=0x539, - s=0x1B9B6EB1F0, - protected=False, - ).with_signature_and_sender() - deployer_address = deploy_tx.sender - assert deployer_address is not None - assert deployer_address == int.to_bytes(Spec.BEACON_ROOTS_DEPLOYER_ADDRESS, 20, "big") - blocks: List[Block] = [] - - beacon_root_contract_storage: Dict = {} - for i, current_timestamp in enumerate(range(timestamp // 2, timestamp + 1, timestamp // 2)): - if i == 0: - blocks.append( - Block( # Deployment block - txs=[deploy_tx], - beacon_root=beacon_root - if fork.header_beacon_root_required(1, current_timestamp) - else None, - timestamp=timestamp // 2, - withdrawals=[ - # Also withdraw to the beacon root contract and the system address - Withdrawal( - address=Spec.BEACON_ROOTS_ADDRESS, - amount=1, - index=0, - validator=0, - ), - Withdrawal( - address=Spec.SYSTEM_ADDRESS, - amount=1, - index=1, - validator=1, - ), - ], - ) - ) - beacon_root_contract_storage[current_timestamp % Spec.HISTORY_BUFFER_LENGTH] = 0 - beacon_root_contract_storage[ - (current_timestamp % Spec.HISTORY_BUFFER_LENGTH) + Spec.HISTORY_BUFFER_LENGTH - ] = 0 - elif i == 1: - blocks.append( - Block( # Contract already deployed - txs=[tx], - beacon_root=beacon_root, - timestamp=timestamp, - withdrawals=[ - # Also withdraw to the beacon root contract and the system address - Withdrawal( - address=Spec.BEACON_ROOTS_ADDRESS, - amount=1, - index=2, - validator=0, - ), - Withdrawal( - address=Spec.SYSTEM_ADDRESS, - amount=1, - index=3, - validator=1, - ), - ], - ), - ) - beacon_root_contract_storage[ - current_timestamp % Spec.HISTORY_BUFFER_LENGTH - ] = current_timestamp - beacon_root_contract_storage[ - (current_timestamp % Spec.HISTORY_BUFFER_LENGTH) + Spec.HISTORY_BUFFER_LENGTH - ] = beacon_root - else: - assert False, "This test should only have two blocks" - - expected_code = fork.pre_allocation(1, timestamp)[Spec.BEACON_ROOTS_ADDRESS]["code"] - pre[Spec.BEACON_ROOTS_ADDRESS] = Account( - code=b"", # Remove the code that is automatically allocated on Cancun fork - nonce=0, - balance=0, - ) - pre[deployer_address] = Account( - balance=deployer_required_balance, - ) - - post[Spec.BEACON_ROOTS_ADDRESS] = Account( - storage=beacon_root_contract_storage, - code=expected_code, - nonce=1, - balance=int(2e9), - ) - post[Spec.SYSTEM_ADDRESS] = Account( - storage={}, - code=b"", - nonce=0, - balance=int(2e9), - ) - post[deployer_address] = Account( - balance=175916000000000000, # It doesn't consume all the balance :( - nonce=1, - ) - blockchain_test( - pre=pre, - blocks=blocks, - post=post, - ) diff --git a/tests/cancun/eip4844_blobs/common.py b/tests/cancun/eip4844_blobs/common.py index 4b63233a06..aca1932326 100644 --- a/tests/cancun/eip4844_blobs/common.py +++ b/tests/cancun/eip4844_blobs/common.py @@ -5,12 +5,12 @@ from typing import List, Literal, Tuple, Union from ethereum_test_tools import ( + Address, TestAddress, YulCompiler, add_kzg_version, compute_create2_address, compute_create_address, - to_address, ) from ethereum_test_tools.vm.opcode import Opcodes as Op @@ -100,14 +100,14 @@ class BlobhashContext: yul_compiler: Union[YulCompiler, None] = None addresses = { - "blobhash_sstore": to_address(0x100), - "blobhash_return": to_address(0x600), - "call": to_address(0x200), - "delegatecall": to_address(0x300), - "callcode": to_address(0x800), - "staticcall": to_address(0x700), - "create": to_address(0x400), - "create2": to_address(0x500), + "blobhash_sstore": Address(0x100), + "blobhash_return": Address(0x600), + "call": Address(0x200), + "delegatecall": Address(0x300), + "callcode": Address(0x800), + "staticcall": Address(0x700), + "create": Address(0x400), + "create2": Address(0x500), } @staticmethod diff --git a/tests/cancun/eip4844_blobs/conftest.py b/tests/cancun/eip4844_blobs/conftest.py index f7bc99da63..64aa7c7de0 100644 --- a/tests/cancun/eip4844_blobs/conftest.py +++ b/tests/cancun/eip4844_blobs/conftest.py @@ -3,14 +3,7 @@ """ import pytest -from ethereum_test_tools import ( - Block, - TestPrivateKey2, - Transaction, - add_kzg_version, - to_address, - to_hash_bytes, -) +from ethereum_test_tools import Address, Block, Hash, TestPrivateKey2, Transaction, add_kzg_version from .spec import BlockHeaderBlobGasFields, Spec @@ -48,7 +41,7 @@ def non_zero_blob_gas_used_genesis_block( Transaction( ty=Spec.BLOB_TX_TYPE, nonce=0, - to=to_address(0x200), + to=Address(0x200), value=1, gas_limit=21000, max_fee_per_gas=tx_max_fee_per_gas, @@ -56,7 +49,7 @@ def non_zero_blob_gas_used_genesis_block( max_fee_per_blob_gas=Spec.get_blob_gasprice(excess_blob_gas=excess_blob_gas), access_list=[], blob_versioned_hashes=add_kzg_version( - [to_hash_bytes(x) for x in range(parent_blobs)], + [Hash(x) for x in range(parent_blobs)], Spec.BLOB_COMMITMENT_VERSION_KZG, ), secret_key=TestPrivateKey2, diff --git a/tests/cancun/eip4844_blobs/point_evaluation_vectors/README.md b/tests/cancun/eip4844_blobs/point_evaluation_vectors/README.md index 24041aa48b..e8668ef02a 100644 --- a/tests/cancun/eip4844_blobs/point_evaluation_vectors/README.md +++ b/tests/cancun/eip4844_blobs/point_evaluation_vectors/README.md @@ -1,13 +1,39 @@ # KZG Point Evaluation Test Vectors -This directory contains test vectors for the KZG point evaluation algorithm, compiled from different sources. +This directory contains test vectors for the KZG point evaluation algorithm that are loaded and used throughout different tests. Each file must contain a JSON list of objects, each with the following fields: + - `name`: a string describing the test case - `input`: object containing `commitment`, `proof`, `z` and `y` - `output`: expected output of the evaluation, true, false or null. -The files are loaded and used throughout different test tests. +## Generating The Test Vectors (used in v1.0.6 and on) + +From execution-spec-tests release v1.0.6 and on, the point evaluation test vectors were generated using commit [63aa303c](https://github.com/ethereum/consensus-specs/tree/63aa303c5a2cf46ea98edbf3f82286079651bb78) from the [official-kzg](https://github.com/ethereum/consensus-specs/commits/official-kzg) [consensus-specs](https://github.com/ethereum/consensus-specs) branch. + +The test vectors were generated as following: + +1. In the consensus-specs repo: + + ```console + cd tests/generators/kzg_4844/ + rm -rf /tmp/kzg_4844_output + mkdir /tmp/kzg_4844_output + python -m main --output /tmp/kzg_4844_output + ``` + +2. In the execution-spec-tests repo: + + ```console + cd tests/cancun/4844_blobs/point_evaluation_vectors/ + pip install -r requirements.txt + python concat_kzg_vectors_to_json.py \ + --input /tmp/kzg_4844_output/general/deneb/kzg/verify_kzg_proof/kzg-mainnet/ + --output go_kzg_4844_verify_kzg_proof.json + ``` + +## Previous Versions of the Test Vectors (used up to v1.0.5) -Current files and their sources: +The test vectors up and including execution-spec-tests [release v1.0.5](https://github.com/ethereum/execution-spec-tests/releases/tag/v1.0.5) were: - `go_kzg_4844_verify_kzg_proof.json`: test vectors from the [go-kzg-4844](https://github.com/crate-crypto/go-kzg-4844) repository. \ No newline at end of file diff --git a/tests/cancun/eip4844_blobs/point_evaluation_vectors/concat_kzg_vectors_to_json.py b/tests/cancun/eip4844_blobs/point_evaluation_vectors/concat_kzg_vectors_to_json.py new file mode 100644 index 0000000000..f45ec5aa5d --- /dev/null +++ b/tests/cancun/eip4844_blobs/point_evaluation_vectors/concat_kzg_vectors_to_json.py @@ -0,0 +1,57 @@ +""" +Helper script to concatenate all the point evaluation test data.yaml files in +a directory into a single JSON file for easier consumption in tests. +""" +import argparse +import json +from pathlib import Path + +import yaml # type: ignore + + +def gather_yaml_data(directory: Path): # noqa: D103 + all_data = [] + + # Loop through each directory in the main directory + for sub_dir in sorted(directory.iterdir()): + if sub_dir.is_dir(): + yaml_file_path = sub_dir / "data.yaml" + + # Check if data.yaml exists in the directory + if yaml_file_path.exists(): + with yaml_file_path.open("r") as yaml_file: + yaml_data = yaml.safe_load(yaml_file) + # Append the data along with the directory name + all_data.append( + { + "input": yaml_data["input"], + "output": yaml_data["output"], + "name": sub_dir.name, + } + ) + return all_data + + +def main(): # noqa: D103 + parser = argparse.ArgumentParser( + description="Concatenate the data from multiple data.yaml files into one JSON file." + ) + parser.add_argument( + "-i", + "--input", + type=Path, + required=True, + help="Input directory containing the YAML files.", + ) + parser.add_argument( + "-o", "--output", type=Path, required=True, help="Path to the output JSON file." + ) + + args = parser.parse_args() + data = gather_yaml_data(args.input) + with args.output.open("w") as json_file: + json.dump(data, json_file, indent=2) + + +if __name__ == "__main__": + main() diff --git a/tests/cancun/eip4844_blobs/point_evaluation_vectors/go_kzg_4844_verify_kzg_proof.json b/tests/cancun/eip4844_blobs/point_evaluation_vectors/go_kzg_4844_verify_kzg_proof.json index ee05607df0..5bb85902f6 100644 --- a/tests/cancun/eip4844_blobs/point_evaluation_vectors/go_kzg_4844_verify_kzg_proof.json +++ b/tests/cancun/eip4844_blobs/point_evaluation_vectors/go_kzg_4844_verify_kzg_proof.json @@ -1,5 +1,4 @@ [ - "Source: https://github.com/crate-crypto/go-kzg-4844/tree/f396e76257450358c2191863c5bfca832d5d36e5/tests/verify_kzg_proof/small", { "input": { "commitment": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", @@ -10,6 +9,16 @@ "output": true, "name": "verify_kzg_proof_case_correct_proof_02e696ada7d4631d" }, + { + "input": { + "commitment": "0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e", + "z": "0x564c0a11a0f704f4fc3e8acfe0f8245f0ad1347b378fbf96e206da11a5d36306", + "y": "0x0000000000000000000000000000000000000000000000000000000000000002", + "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "output": true, + "name": "verify_kzg_proof_case_correct_proof_05c1f3685f3393f0" + }, { "input": { "commitment": "0xb7f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb", @@ -32,30 +41,50 @@ }, { "input": { - "commitment": "0xb09ce4964278eff81a976fbc552488cb84fc4a102f004c87179cb912f49904d1e785ecaf5d184522a58e9035875440ef", + "commitment": "0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e", + "z": "0x5eb7004fe57383e6c88b99d839937fddf3f99279353aaf8d5c9a75f91ce33c62", + "y": "0x0000000000000000000000000000000000000000000000000000000000000002", + "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "output": true, + "name": "verify_kzg_proof_case_correct_proof_177b58dc7a46b08f" + }, + { + "input": { + "commitment": "0x93efc82d2017e9c57834a1246463e64774e56183bb247c8fc9dd98c56817e878d97b05f5c8d900acf1fbbbca6f146556", "z": "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", "y": "0x0000000000000000000000000000000000000000000000000000000000000000", - "proof": "0xa729910bad9058b4e277f3ddba7396df505a0191c38e5ad08af1d0d30792c887e99bf754b7932b83e1cef4e1ec678339" + "proof": "0x92c51ff81dd71dab71cefecd79e8274b4b7ba36a0f40e2dc086bc4061c7f63249877db23297212991fd63e07b7ebc348" }, "output": true, "name": "verify_kzg_proof_case_correct_proof_1ce8e4f69d5df899" }, { "input": { - "commitment": "0xb09ce4964278eff81a976fbc552488cb84fc4a102f004c87179cb912f49904d1e785ecaf5d184522a58e9035875440ef", + "commitment": "0x93efc82d2017e9c57834a1246463e64774e56183bb247c8fc9dd98c56817e878d97b05f5c8d900acf1fbbbca6f146556", "z": "0x0000000000000000000000000000000000000000000000000000000000000000", "y": "0x73e66878b46ae3705eb6a46a89213de7d3686828bfce5c19400fffff00100001", - "proof": "0x8931692634033cbbfe2db48c027dbb93f123754205bb7c7a56797966f83272b13b4be34f80c15bf8d4ed0b740ffb3c64" + "proof": "0xb82ded761997f2c6f1bb3db1e1dada2ef06d936551667c82f659b75f99d2da2068b81340823ee4e829a93c9fbed7810d" }, "output": true, "name": "verify_kzg_proof_case_correct_proof_26b753dec0560daa" }, { "input": { - "commitment": "0x978a0d595c823c05947b1156175e72634a377808384256e9921ebf72181890be2d6b58d4a73a880541d1656875654806", + "commitment": "0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e", + "z": "0x0000000000000000000000000000000000000000000000000000000000000001", + "y": "0x0000000000000000000000000000000000000000000000000000000000000002", + "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "output": true, + "name": "verify_kzg_proof_case_correct_proof_2b76dc9e3abf42f3" + }, + { + "input": { + "commitment": "0x8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7", "z": "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", "y": "0x1522a4a7f34e1ea350ae07c29c96c7e79655aa926122e95fe69fcbd932ca49e9", - "proof": "0xa57dd03e11678afb060af17cc3dc049e02f2814e13ed1dadc94d1105106ffa834907d1176420f637d51edbcd120274fb" + "proof": "0xa62ad71d14c5719385c0686f1871430475bf3a00f0aa3f7b8dd99a9abc2160744faf0070725e00b60ad9a026a15b1a8c" }, "output": true, "name": "verify_kzg_proof_case_correct_proof_31ebd010e6098750" @@ -82,14 +111,24 @@ }, { "input": { - "commitment": "0x991fc16086918023ba2301fb85054f814ef114cfce303650d90a456199c6196146cc1293a88384c6503be26d087f11c9", + "commitment": "0xa421e229565952cfff4ef3517100a97da1d4fe57956fa50a442f92af03b1bf37adacc8ad4ed209b31287ea5bb94d9d06", "z": "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", "y": "0x304962b3598a0adf33189fdfd9789feab1096ff40006900400000003fffffffc", - "proof": "0xa69fc1abb7125e6ae566a95cacd832cca426b8c8ecd7397b19a8f003103bc11a508fc6dceab3a2a16cc83782d295c08f" + "proof": "0xaa86c458b3065e7ec244033a2ade91a7499561f482419a3a372c42a636dad98262a2ce926d142fd7cfe26ca148efe8b4" }, "output": true, "name": "verify_kzg_proof_case_correct_proof_392169c16a2e5ef6" }, + { + "input": { + "commitment": "0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e", + "z": "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", + "y": "0x0000000000000000000000000000000000000000000000000000000000000002", + "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "output": true, + "name": "verify_kzg_proof_case_correct_proof_395cf6d697d1a743" + }, { "input": { "commitment": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", @@ -102,20 +141,20 @@ }, { "input": { - "commitment": "0x991fc16086918023ba2301fb85054f814ef114cfce303650d90a456199c6196146cc1293a88384c6503be26d087f11c9", + "commitment": "0xa421e229565952cfff4ef3517100a97da1d4fe57956fa50a442f92af03b1bf37adacc8ad4ed209b31287ea5bb94d9d06", "z": "0x0000000000000000000000000000000000000000000000000000000000000000", "y": "0x50625ad853cc21ba40594f79591e5d35c445ecf9453014da6524c0cf6367c359", - "proof": "0xa86f18630d3803118cb0f7a970e04cf58612a91f5cd4f7f9aead0a5e44996484cad44f6a21a37d9188f84e1cfe5d70ff" + "proof": "0xb72d80393dc39beea3857cb3719277138876b2b207f1d5e54dd62a14e3242d123b5a6db066181ff01a51c26c9d2f400b" }, "output": true, "name": "verify_kzg_proof_case_correct_proof_3c1e8b38219e3e12" }, { "input": { - "commitment": "0x991fc16086918023ba2301fb85054f814ef114cfce303650d90a456199c6196146cc1293a88384c6503be26d087f11c9", + "commitment": "0xa421e229565952cfff4ef3517100a97da1d4fe57956fa50a442f92af03b1bf37adacc8ad4ed209b31287ea5bb94d9d06", "z": "0x564c0a11a0f704f4fc3e8acfe0f8245f0ad1347b378fbf96e206da11a5d36306", "y": "0x6d928e13fe443e957d82e3e71d48cb65d51028eb4483e719bf8efcdf12f7c321", - "proof": "0xb437dcd924834a69a566f90d8b83cb80e7c58a08da289389b3b1ffb528fd8bc9b92e6397c42b40a1e876ba116bdcc7f9" + "proof": "0xa444d6bb5aadc3ceb615b50d6606bd54bfe529f59247987cd1ab848d19de599a9052f1835fb0d0d44cf70183e19a68c9" }, "output": true, "name": "verify_kzg_proof_case_correct_proof_3c87ec986c2656c2" @@ -132,20 +171,20 @@ }, { "input": { - "commitment": "0x991fc16086918023ba2301fb85054f814ef114cfce303650d90a456199c6196146cc1293a88384c6503be26d087f11c9", + "commitment": "0xa421e229565952cfff4ef3517100a97da1d4fe57956fa50a442f92af03b1bf37adacc8ad4ed209b31287ea5bb94d9d06", "z": "0x0000000000000000000000000000000000000000000000000000000000000002", "y": "0x2bf4e1f980eb94661a21affc4d7e6e56f214fe3e7dc4d20b98c66ffd43cabeb0", - "proof": "0x9956ed657b632d9de65783593320c0b0f4195ca853c9616ba11e244bf2333f6da4755883e9995210b20fbd78f9c205a5" + "proof": "0x89012990b0ca02775bd9df8145f6c936444b83f54df1f5f274fb4312800a6505dd000ee8ec7b0ea6d72092a3daf0bffb" }, "output": true, "name": "verify_kzg_proof_case_correct_proof_420f2a187ce77035" }, { "input": { - "commitment": "0xa6696ac7117d8ecc2224a4368e20d367d2d67995a32a53752096a501e6477e3b76ab6969fcfcc93f1f2a0f0ed74d2ca7", + "commitment": "0xb49d88afcd7f6c61a8ea69eff5f609d2432b47e7e4cd50b02cdddb4e0c1460517e8df02e4e64dc55e3d8ca192d57193a", "z": "0x0000000000000000000000000000000000000000000000000000000000000001", "y": "0x443e7af5274b52214ea6c775908c54519fea957eecd98069165a8b771082fd51", - "proof": "0x807b160e50107d5af2ac0c93eedc658eeb776d2416ecd9579b976795d0719eead66791c9603c703dc50a108d56823a98" + "proof": "0xa060b350ad63d61979b80b25258e7cc6caf781080222e0209b4a0b074decca874afc5c41de3313d8ed217d905e6ada43" }, "output": true, "name": "verify_kzg_proof_case_correct_proof_444b73ff54a19b44" @@ -162,30 +201,40 @@ }, { "input": { - "commitment": "0xa6696ac7117d8ecc2224a4368e20d367d2d67995a32a53752096a501e6477e3b76ab6969fcfcc93f1f2a0f0ed74d2ca7", + "commitment": "0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e", + "z": "0x0000000000000000000000000000000000000000000000000000000000000000", + "y": "0x0000000000000000000000000000000000000000000000000000000000000002", + "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "output": true, + "name": "verify_kzg_proof_case_correct_proof_585454b31673dd62" + }, + { + "input": { + "commitment": "0xb49d88afcd7f6c61a8ea69eff5f609d2432b47e7e4cd50b02cdddb4e0c1460517e8df02e4e64dc55e3d8ca192d57193a", "z": "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", "y": "0x58cdc98c4c44791bb8ba7e58a80324ef8c021c79c68e253c430fa2663188f7f2", - "proof": "0x88bfb4ce7aa40228147817d8b5ddad533e456b622dea6733e1813b8d1ce9e03ef5fb58d983e29da3187ba840c5a79759" + "proof": "0x9506a8dc7f3f720a592a79a4e711e28d8596854bac66b9cb2d6d361704f1735442d47ea09fda5e0984f0928ce7d2f5f6" }, "output": true, "name": "verify_kzg_proof_case_correct_proof_7db4f140a955dd1a" }, { "input": { - "commitment": "0x991fc16086918023ba2301fb85054f814ef114cfce303650d90a456199c6196146cc1293a88384c6503be26d087f11c9", + "commitment": "0xa421e229565952cfff4ef3517100a97da1d4fe57956fa50a442f92af03b1bf37adacc8ad4ed209b31287ea5bb94d9d06", "z": "0x0000000000000000000000000000000000000000000000000000000000000001", "y": "0x1824b159acc5056f998c4fefecbc4ff55884b7fa0003480200000001fffffffe", - "proof": "0x92cccc70d97c2eeb444077721221cbf048a7bcd5742a1c896c868672527c8fb92687db1d8c6970064375957af91b0b89" + "proof": "0xb0c829a8d2d3405304fecbea193e6c67f7c3912a6adc7c3737ad3f8a3b750425c1531a7426f03033a3994bc82a10609f" }, "output": true, "name": "verify_kzg_proof_case_correct_proof_83e53423a2dd93fe" }, { "input": { - "commitment": "0xb09ce4964278eff81a976fbc552488cb84fc4a102f004c87179cb912f49904d1e785ecaf5d184522a58e9035875440ef", + "commitment": "0x93efc82d2017e9c57834a1246463e64774e56183bb247c8fc9dd98c56817e878d97b05f5c8d900acf1fbbbca6f146556", "z": "0x0000000000000000000000000000000000000000000000000000000000000001", "y": "0x0000000000000000000000000000000000000000000000000000000000000000", - "proof": "0x87177e99700d9be917c648299b919b210d0449761bfd425c6fbf7c9cb48c44525715bf62bad3fe046a066ac3b1c747fc" + "proof": "0xb9241c6816af6388d1014cd4d7dd21662a6e3d47f96c0257bce642b70e8e375839a880864638669c6a709b414ab8bffc" }, "output": true, "name": "verify_kzg_proof_case_correct_proof_9b24f8997145435c" @@ -202,40 +251,50 @@ }, { "input": { - "commitment": "0xa6696ac7117d8ecc2224a4368e20d367d2d67995a32a53752096a501e6477e3b76ab6969fcfcc93f1f2a0f0ed74d2ca7", + "commitment": "0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e", + "z": "0x0000000000000000000000000000000000000000000000000000000000000002", + "y": "0x0000000000000000000000000000000000000000000000000000000000000002", + "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "output": true, + "name": "verify_kzg_proof_case_correct_proof_a0be66af9a97ea52" + }, + { + "input": { + "commitment": "0xb49d88afcd7f6c61a8ea69eff5f609d2432b47e7e4cd50b02cdddb4e0c1460517e8df02e4e64dc55e3d8ca192d57193a", "z": "0x564c0a11a0f704f4fc3e8acfe0f8245f0ad1347b378fbf96e206da11a5d36306", "y": "0x6c28d6edfea2f5e1638cb1a8be8197549d52e133fa9dae87e52abb45f7b192dd", - "proof": "0xad37aa0a5c07d64c137a3cc01893418c1ae6e766ca8842a295cad27cbde933aad4f499d96619f957a730d014927eaf72" + "proof": "0x8a46b67dcba4e3aa66f9952be69e1ecbc24e21d42b1df2bfe1c8e28431c6221a3f1d09808042f5624e857710cb24fb69" }, "output": true, "name": "verify_kzg_proof_case_correct_proof_af669445747d2585" }, { "input": { - "commitment": "0xb09ce4964278eff81a976fbc552488cb84fc4a102f004c87179cb912f49904d1e785ecaf5d184522a58e9035875440ef", + "commitment": "0x93efc82d2017e9c57834a1246463e64774e56183bb247c8fc9dd98c56817e878d97b05f5c8d900acf1fbbbca6f146556", "z": "0x0000000000000000000000000000000000000000000000000000000000000002", "y": "0x64d3b6baf69395bde2abd1d43f99be66bc64581234fd363e2ae3a0d419cfc3fc", - "proof": "0x8efb5f81312368c3d0778349fa9a37838d9da05f5019f13e98c451d44adbeaa24385efecc246958bc1e80ade37693425" + "proof": "0x893acd46552b81cc9e5ff6ca03dad873588f2c61031781367cfea2a2be4ef3090035623338711b3cf7eff4b4524df742" }, "output": true, "name": "verify_kzg_proof_case_correct_proof_af8b75f664ed7d43" }, { "input": { - "commitment": "0xa6696ac7117d8ecc2224a4368e20d367d2d67995a32a53752096a501e6477e3b76ab6969fcfcc93f1f2a0f0ed74d2ca7", + "commitment": "0xb49d88afcd7f6c61a8ea69eff5f609d2432b47e7e4cd50b02cdddb4e0c1460517e8df02e4e64dc55e3d8ca192d57193a", "z": "0x0000000000000000000000000000000000000000000000000000000000000002", "y": "0x6a75e4fe63e5e148c853462a680c3e3ccedea34719d28f19bf1b35ae4eea37d6", - "proof": "0x8be986bb46235d7dace424c585d8160a7ffb0be6cf09844d5dc50cd6f14f9d9b4a39cb187d590d5b9fabc41efe31673b" + "proof": "0xa38758fca85407078c0a7e5fd6d38b34340c809baa0e1fed9deaabb11aa503062acbbe23fcbe620a21b40a83bfa71b89" }, "output": true, "name": "verify_kzg_proof_case_correct_proof_b6cb6698327d9835" }, { "input": { - "commitment": "0xb09ce4964278eff81a976fbc552488cb84fc4a102f004c87179cb912f49904d1e785ecaf5d184522a58e9035875440ef", + "commitment": "0x93efc82d2017e9c57834a1246463e64774e56183bb247c8fc9dd98c56817e878d97b05f5c8d900acf1fbbbca6f146556", "z": "0x564c0a11a0f704f4fc3e8acfe0f8245f0ad1347b378fbf96e206da11a5d36306", "y": "0x0000000000000000000000000000000000000000000000000000000000000000", - "proof": "0x89d9a593cf16156d32a0998b48fdc0964fc8a63e4b87a6750c6e132089125bfd57105274821bbda7198cfe1b69ee49b4" + "proof": "0xa256a681861974cdf6b116467044aa75c85b01076423a92c3335b93d10bf2fcb99b943a53adc1ab8feb6b475c4688948" }, "output": true, "name": "verify_kzg_proof_case_correct_proof_b6ec3736f9ff2c62" @@ -262,90 +321,90 @@ }, { "input": { - "commitment": "0x978a0d595c823c05947b1156175e72634a377808384256e9921ebf72181890be2d6b58d4a73a880541d1656875654806", + "commitment": "0x8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7", "z": "0x564c0a11a0f704f4fc3e8acfe0f8245f0ad1347b378fbf96e206da11a5d36306", "y": "0x24d25032e67a7e6a4910df5834b8fe70e6bcfeeac0352434196bdf4b2485d5a1", - "proof": "0x942307f266e636553e94006d11423f2688945ff3bdf515859eba1005c1a7708d620a94d91a1c0c285f9584e75ec2f82a" + "proof": "0x873033e038326e87ed3e1276fd140253fa08e9fc25fb2d9a98527fc22a2c9612fbeafdad446cbc7bcdbdcd780af2c16a" }, "output": true, "name": "verify_kzg_proof_case_correct_proof_c5e1490d672d026d" }, { "input": { - "commitment": "0xa6696ac7117d8ecc2224a4368e20d367d2d67995a32a53752096a501e6477e3b76ab6969fcfcc93f1f2a0f0ed74d2ca7", + "commitment": "0xb49d88afcd7f6c61a8ea69eff5f609d2432b47e7e4cd50b02cdddb4e0c1460517e8df02e4e64dc55e3d8ca192d57193a", "z": "0x5eb7004fe57383e6c88b99d839937fddf3f99279353aaf8d5c9a75f91ce33c62", "y": "0x2c9ae4f1d6d08558d7027df9cc6b248c21290075d2c0df8a4084d02090b3fa14", - "proof": "0xb981665ea5575dbe48f8cea22d4e3effc42cb66f967c7ca174c21f06a4a55740f61d8cde1bc6339c3f036e8d5176516f" + "proof": "0xb059c60125debbbf29d041bac20fd853951b64b5f31bfe2fa825e18ff49a259953e734b3d57119ae66f7bd79de3027f6" }, "output": true, "name": "verify_kzg_proof_case_correct_proof_cae5d3491190b777" }, { "input": { - "commitment": "0x978a0d595c823c05947b1156175e72634a377808384256e9921ebf72181890be2d6b58d4a73a880541d1656875654806", + "commitment": "0x8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7", "z": "0x5eb7004fe57383e6c88b99d839937fddf3f99279353aaf8d5c9a75f91ce33c62", "y": "0x4882cf0609af8c7cd4c256e63a35838c95a9ebbf6122540ab344b42fd66d32e1", - "proof": "0xa731aa7fbb00bc5c46074279f0123976c2a0c05ef3987bdc16fa64fbab1621dec82c66b4bfb117456068137391617e59" + "proof": "0x987ea6df69bbe97c23e0dd948cf2d4490824ba7fea5af812721b2393354b0810a9dba2c231ea7ae30f26c412c7ea6e3a" }, "output": true, "name": "verify_kzg_proof_case_correct_proof_d0992bc0387790a4" }, { "input": { - "commitment": "0xb09ce4964278eff81a976fbc552488cb84fc4a102f004c87179cb912f49904d1e785ecaf5d184522a58e9035875440ef", + "commitment": "0x93efc82d2017e9c57834a1246463e64774e56183bb247c8fc9dd98c56817e878d97b05f5c8d900acf1fbbbca6f146556", "z": "0x5eb7004fe57383e6c88b99d839937fddf3f99279353aaf8d5c9a75f91ce33c62", "y": "0x5fd58150b731b4facfcdd89c0e393ff842f5f2071303eff99b51e103161cd233", - "proof": "0x884c7d4772baef968a6a407ac02cc1a813d67003e8f039c4d66f7757be0e5bd484e561550acf58b8a3199b2690e9809a" + "proof": "0x94425f5cf336685a6a4e806ad4601f4b0d3707a655718f968c57e225f0e4b8d5fd61878234f25ec59d090c07ea725cf4" }, "output": true, "name": "verify_kzg_proof_case_correct_proof_d736268229bd87ec" }, { "input": { - "commitment": "0x978a0d595c823c05947b1156175e72634a377808384256e9921ebf72181890be2d6b58d4a73a880541d1656875654806", + "commitment": "0x8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7", "z": "0x0000000000000000000000000000000000000000000000000000000000000002", "y": "0x549345dd3612e36fab0ab7baffe3faa5b820d56b71348c89ecaf63f7c4f85370", - "proof": "0x8f391ceb1bd34fe93e995f04fc78f8e715776ce6385936ab91a9ca88f3942cc37bd471c0180ed0ab6fc4f5e2d6f99dac" + "proof": "0xa35c4f136a09a33c6437c26dc0c617ce6548a14bc4af7127690a411f5e1cde2f73157365212dbcea6432e0e7869cb006" }, "output": true, "name": "verify_kzg_proof_case_correct_proof_e68d7111a2364a49" }, { "input": { - "commitment": "0x991fc16086918023ba2301fb85054f814ef114cfce303650d90a456199c6196146cc1293a88384c6503be26d087f11c9", + "commitment": "0xa421e229565952cfff4ef3517100a97da1d4fe57956fa50a442f92af03b1bf37adacc8ad4ed209b31287ea5bb94d9d06", "z": "0x5eb7004fe57383e6c88b99d839937fddf3f99279353aaf8d5c9a75f91ce33c62", "y": "0x5ee1e9a4a06a02ca6ea14b0ca73415a8ba0fba888f18dde56df499b480d4b9e0", - "proof": "0x940769c68153fe476f97be8841dd16ad8c8f52a8bd2e8a421bb8eccc3a77e705e3702fdf17f1bf27333d94e2d88ee85d" + "proof": "0xa1fcd37a924af9ec04143b44853c26f6b0738f6e15a3e0755057e7d5460406c7e148adb0e2d608982140d0ae42fe0b3b" }, "output": true, "name": "verify_kzg_proof_case_correct_proof_ed6b180ec759bcf6" }, { "input": { - "commitment": "0xa6696ac7117d8ecc2224a4368e20d367d2d67995a32a53752096a501e6477e3b76ab6969fcfcc93f1f2a0f0ed74d2ca7", + "commitment": "0xb49d88afcd7f6c61a8ea69eff5f609d2432b47e7e4cd50b02cdddb4e0c1460517e8df02e4e64dc55e3d8ca192d57193a", "z": "0x0000000000000000000000000000000000000000000000000000000000000000", "y": "0x1ed7d14d1b3fb1a1890d67b81715531553ad798df2009b4311d9fe2bea6cb964", - "proof": "0xa4b831fd8adfa8bffcbfa0f486f40cdb65ada7dc7b26e6c745c0369b3a59c338df67edebc3fd7c14ff374be3f1f66735" + "proof": "0xa71f21ca51b443ad35bb8a26d274223a690d88d9629927dc80b0856093e08a372820248df5b8a43b6d98fd52a62fa376" }, "output": true, "name": "verify_kzg_proof_case_correct_proof_f0ed3dc11cdeb130" }, { "input": { - "commitment": "0x978a0d595c823c05947b1156175e72634a377808384256e9921ebf72181890be2d6b58d4a73a880541d1656875654806", + "commitment": "0x8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7", "z": "0x0000000000000000000000000000000000000000000000000000000000000001", "y": "0x60f840641ec0d0c0d2b77b2d5a393b329442721fad05ab78c7b98f2aa3c20ec9", - "proof": "0x86031313e4108b347f185a1247c062be741e37376474c23812ba260a13436065507d429c9ea7a205b6eb069e49a70641" + "proof": "0xb30b3d1e4faccc380557792c9a0374d58fa286f5f75fea48870585393f890909cd3c53cfe4897e799fb211b4be531e43" }, "output": true, "name": "verify_kzg_proof_case_correct_proof_f47eb9fc139f6bfd" }, { "input": { - "commitment": "0x978a0d595c823c05947b1156175e72634a377808384256e9921ebf72181890be2d6b58d4a73a880541d1656875654806", + "commitment": "0x8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7", "z": "0x0000000000000000000000000000000000000000000000000000000000000000", "y": "0x61157104410181bdc6eac224aa9436ac268bdcfeecb6badf71d228adda820af3", - "proof": "0x807dce8223a17fd2702eb75a13333f5d3128639df8fc09881a68d464d5765a1a0d2f4628ea573eddb3d6cf4846a0b4ec" + "proof": "0x809adfa8b078b0921cdb8696ca017a0cc2d5337109016f36a766886eade28d32f205311ff5def247c3ddba91896fae97" }, "output": true, "name": "verify_kzg_proof_case_correct_proof_f7f44e1e864aa967" @@ -360,6 +419,126 @@ "output": true, "name": "verify_kzg_proof_case_correct_proof_ffa6e97b97146517" }, + { + "input": { + "commitment": "0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e", + "z": "0x564c0a11a0f704f4fc3e8acfe0f8245f0ad1347b378fbf96e206da11a5d36306", + "y": "0x0000000000000000000000000000000000000000000000000000000000000002", + "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "output": true, + "name": "verify_kzg_proof_case_correct_proof_point_at_infinity_for_twos_poly_05c1f3685f3393f0" + }, + { + "input": { + "commitment": "0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e", + "z": "0x5eb7004fe57383e6c88b99d839937fddf3f99279353aaf8d5c9a75f91ce33c62", + "y": "0x0000000000000000000000000000000000000000000000000000000000000002", + "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "output": true, + "name": "verify_kzg_proof_case_correct_proof_point_at_infinity_for_twos_poly_177b58dc7a46b08f" + }, + { + "input": { + "commitment": "0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e", + "z": "0x0000000000000000000000000000000000000000000000000000000000000001", + "y": "0x0000000000000000000000000000000000000000000000000000000000000002", + "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "output": true, + "name": "verify_kzg_proof_case_correct_proof_point_at_infinity_for_twos_poly_2b76dc9e3abf42f3" + }, + { + "input": { + "commitment": "0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e", + "z": "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", + "y": "0x0000000000000000000000000000000000000000000000000000000000000002", + "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "output": true, + "name": "verify_kzg_proof_case_correct_proof_point_at_infinity_for_twos_poly_395cf6d697d1a743" + }, + { + "input": { + "commitment": "0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e", + "z": "0x0000000000000000000000000000000000000000000000000000000000000000", + "y": "0x0000000000000000000000000000000000000000000000000000000000000002", + "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "output": true, + "name": "verify_kzg_proof_case_correct_proof_point_at_infinity_for_twos_poly_585454b31673dd62" + }, + { + "input": { + "commitment": "0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e", + "z": "0x0000000000000000000000000000000000000000000000000000000000000002", + "y": "0x0000000000000000000000000000000000000000000000000000000000000002", + "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "output": true, + "name": "verify_kzg_proof_case_correct_proof_point_at_infinity_for_twos_poly_a0be66af9a97ea52" + }, + { + "input": { + "commitment": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "z": "0x0000000000000000000000000000000000000000000000000000000000000002", + "y": "0x0000000000000000000000000000000000000000000000000000000000000000", + "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "output": true, + "name": "verify_kzg_proof_case_correct_proof_point_at_infinity_for_zero_poly_02e696ada7d4631d" + }, + { + "input": { + "commitment": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "z": "0x5eb7004fe57383e6c88b99d839937fddf3f99279353aaf8d5c9a75f91ce33c62", + "y": "0x0000000000000000000000000000000000000000000000000000000000000000", + "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "output": true, + "name": "verify_kzg_proof_case_correct_proof_point_at_infinity_for_zero_poly_0cf79b17cb5f4ea2" + }, + { + "input": { + "commitment": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "z": "0x564c0a11a0f704f4fc3e8acfe0f8245f0ad1347b378fbf96e206da11a5d36306", + "y": "0x0000000000000000000000000000000000000000000000000000000000000000", + "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "output": true, + "name": "verify_kzg_proof_case_correct_proof_point_at_infinity_for_zero_poly_3208425794224c3f" + }, + { + "input": { + "commitment": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "z": "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", + "y": "0x0000000000000000000000000000000000000000000000000000000000000000", + "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "output": true, + "name": "verify_kzg_proof_case_correct_proof_point_at_infinity_for_zero_poly_3ac8dc31e9aa6a70" + }, + { + "input": { + "commitment": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "z": "0x0000000000000000000000000000000000000000000000000000000000000000", + "y": "0x0000000000000000000000000000000000000000000000000000000000000000", + "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "output": true, + "name": "verify_kzg_proof_case_correct_proof_point_at_infinity_for_zero_poly_c3d4322ec17fe7cd" + }, + { + "input": { + "commitment": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "z": "0x0000000000000000000000000000000000000000000000000000000000000001", + "y": "0x0000000000000000000000000000000000000000000000000000000000000000", + "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "output": true, + "name": "verify_kzg_proof_case_correct_proof_point_at_infinity_for_zero_poly_ffa6e97b97146517" + }, { "input": { "commitment": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", @@ -370,6 +549,16 @@ "output": false, "name": "verify_kzg_proof_case_incorrect_proof_02e696ada7d4631d" }, + { + "input": { + "commitment": "0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e", + "z": "0x564c0a11a0f704f4fc3e8acfe0f8245f0ad1347b378fbf96e206da11a5d36306", + "y": "0x0000000000000000000000000000000000000000000000000000000000000002", + "proof": "0x97f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb" + }, + "output": false, + "name": "verify_kzg_proof_case_incorrect_proof_05c1f3685f3393f0" + }, { "input": { "commitment": "0xb7f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb", @@ -392,30 +581,50 @@ }, { "input": { - "commitment": "0xb09ce4964278eff81a976fbc552488cb84fc4a102f004c87179cb912f49904d1e785ecaf5d184522a58e9035875440ef", + "commitment": "0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e", + "z": "0x5eb7004fe57383e6c88b99d839937fddf3f99279353aaf8d5c9a75f91ce33c62", + "y": "0x0000000000000000000000000000000000000000000000000000000000000002", + "proof": "0x97f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb" + }, + "output": false, + "name": "verify_kzg_proof_case_incorrect_proof_177b58dc7a46b08f" + }, + { + "input": { + "commitment": "0x93efc82d2017e9c57834a1246463e64774e56183bb247c8fc9dd98c56817e878d97b05f5c8d900acf1fbbbca6f146556", "z": "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", "y": "0x0000000000000000000000000000000000000000000000000000000000000000", - "proof": "0xa491b284078832f22a229c6fb9c60670bbbcc13142ad50a9f53e3f2fa32e673416ce59d1dd05921cbb37f91bec352ad6" + "proof": "0x9779b8337f00de6aeac881256198bd2db2fe95bc3127ad9e6440d9e4d1e785b455f55fcfe80a3434dc40f8e6df85be88" }, "output": false, "name": "verify_kzg_proof_case_incorrect_proof_1ce8e4f69d5df899" }, { "input": { - "commitment": "0xb09ce4964278eff81a976fbc552488cb84fc4a102f004c87179cb912f49904d1e785ecaf5d184522a58e9035875440ef", + "commitment": "0x93efc82d2017e9c57834a1246463e64774e56183bb247c8fc9dd98c56817e878d97b05f5c8d900acf1fbbbca6f146556", "z": "0x0000000000000000000000000000000000000000000000000000000000000000", "y": "0x73e66878b46ae3705eb6a46a89213de7d3686828bfce5c19400fffff00100001", - "proof": "0x83bdb1c6f172d8e30a58533d4df96d9133b7ee293ae76777d60454cb35ef28f08634a794dc098baa22499347ab9a00db" + "proof": "0x90f53a4837bbde6ab0838fef0c0be5339ab03a78342c221cf6b2d6e465d01a3d47585a808c9d8d25dee885007deeb107" }, "output": false, "name": "verify_kzg_proof_case_incorrect_proof_26b753dec0560daa" }, { "input": { - "commitment": "0x978a0d595c823c05947b1156175e72634a377808384256e9921ebf72181890be2d6b58d4a73a880541d1656875654806", + "commitment": "0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e", + "z": "0x0000000000000000000000000000000000000000000000000000000000000001", + "y": "0x0000000000000000000000000000000000000000000000000000000000000002", + "proof": "0x97f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb" + }, + "output": false, + "name": "verify_kzg_proof_case_incorrect_proof_2b76dc9e3abf42f3" + }, + { + "input": { + "commitment": "0x8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7", "z": "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", "y": "0x1522a4a7f34e1ea350ae07c29c96c7e79655aa926122e95fe69fcbd932ca49e9", - "proof": "0x9418eb9a7cf2fa71125962f6662afeac10a7f1bbe26365995b13f6840946da49f79c7dfdd80b5b8a50bf44758cd2a96d" + "proof": "0xb9b65c2ebc89e669cf19e82fb178f0d1e9c958edbebe9ead62e97e95e2dcdc4972729fb9661f0cae3532b71b2664a8c1" }, "output": false, "name": "verify_kzg_proof_case_incorrect_proof_31ebd010e6098750" @@ -442,14 +651,24 @@ }, { "input": { - "commitment": "0x991fc16086918023ba2301fb85054f814ef114cfce303650d90a456199c6196146cc1293a88384c6503be26d087f11c9", + "commitment": "0xa421e229565952cfff4ef3517100a97da1d4fe57956fa50a442f92af03b1bf37adacc8ad4ed209b31287ea5bb94d9d06", "z": "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", "y": "0x304962b3598a0adf33189fdfd9789feab1096ff40006900400000003fffffffc", - "proof": "0x81b26a6f4606710d329c8977ba35d907358aa9a4cc288ff6e4be5f251cab25118f206b22d030b6c2aa82444d1accc73c" + "proof": "0xb08a5afbb1717334e08e05576b07bff58e8851d8cfd9ea71da1ab4233ad4217cffabd669dfa89c3ebf4c44f91694a2f4" }, "output": false, "name": "verify_kzg_proof_case_incorrect_proof_392169c16a2e5ef6" }, + { + "input": { + "commitment": "0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e", + "z": "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", + "y": "0x0000000000000000000000000000000000000000000000000000000000000002", + "proof": "0x97f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb" + }, + "output": false, + "name": "verify_kzg_proof_case_incorrect_proof_395cf6d697d1a743" + }, { "input": { "commitment": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", @@ -462,20 +681,20 @@ }, { "input": { - "commitment": "0x991fc16086918023ba2301fb85054f814ef114cfce303650d90a456199c6196146cc1293a88384c6503be26d087f11c9", + "commitment": "0xa421e229565952cfff4ef3517100a97da1d4fe57956fa50a442f92af03b1bf37adacc8ad4ed209b31287ea5bb94d9d06", "z": "0x0000000000000000000000000000000000000000000000000000000000000000", "y": "0x50625ad853cc21ba40594f79591e5d35c445ecf9453014da6524c0cf6367c359", - "proof": "0x87e0d60304667f4f331765a966bd70a196805dd852362d3cc163b8e675d1c504a88a6e3dcc5a766a4f1fbfdb98ea45ec" + "proof": "0x90559bfd8e58f5d144588a1a959c93aba58607777e09893f088e404eb2dc47c0269ed8e47c1be79ea07ae726abd921a8" }, "output": false, "name": "verify_kzg_proof_case_incorrect_proof_3c1e8b38219e3e12" }, { "input": { - "commitment": "0x991fc16086918023ba2301fb85054f814ef114cfce303650d90a456199c6196146cc1293a88384c6503be26d087f11c9", + "commitment": "0xa421e229565952cfff4ef3517100a97da1d4fe57956fa50a442f92af03b1bf37adacc8ad4ed209b31287ea5bb94d9d06", "z": "0x564c0a11a0f704f4fc3e8acfe0f8245f0ad1347b378fbf96e206da11a5d36306", "y": "0x6d928e13fe443e957d82e3e71d48cb65d51028eb4483e719bf8efcdf12f7c321", - "proof": "0x813889b6303c421a9c60a8163e7f31162e4d43e5594fda9b91fd34f98a9483169b959f24741d09b88e9ec9298f19de5d" + "proof": "0x8d72dc4eec977090f452b412a6b0a3cdced2ea6b622ebb6e289c7e05d85cc715b93eca244123c84a60b3ecbf33373903" }, "output": false, "name": "verify_kzg_proof_case_incorrect_proof_3c87ec986c2656c2" @@ -492,20 +711,20 @@ }, { "input": { - "commitment": "0x991fc16086918023ba2301fb85054f814ef114cfce303650d90a456199c6196146cc1293a88384c6503be26d087f11c9", + "commitment": "0xa421e229565952cfff4ef3517100a97da1d4fe57956fa50a442f92af03b1bf37adacc8ad4ed209b31287ea5bb94d9d06", "z": "0x0000000000000000000000000000000000000000000000000000000000000002", "y": "0x2bf4e1f980eb94661a21affc4d7e6e56f214fe3e7dc4d20b98c66ffd43cabeb0", - "proof": "0xaa421ef5eb79b0e642339a0edd408faed044978319b307f5230eb4f6b787602e29cd9246b905aa34077c802bc1087be0" + "proof": "0x99c282db3a79a9ec1553306515e6a71dc43df1ddbd1dbd9d5b71f3c1798ef482f5e1fd84500b0e47c82f72a189ecd526" }, "output": false, "name": "verify_kzg_proof_case_incorrect_proof_420f2a187ce77035" }, { "input": { - "commitment": "0xa6696ac7117d8ecc2224a4368e20d367d2d67995a32a53752096a501e6477e3b76ab6969fcfcc93f1f2a0f0ed74d2ca7", + "commitment": "0xb49d88afcd7f6c61a8ea69eff5f609d2432b47e7e4cd50b02cdddb4e0c1460517e8df02e4e64dc55e3d8ca192d57193a", "z": "0x0000000000000000000000000000000000000000000000000000000000000001", "y": "0x443e7af5274b52214ea6c775908c54519fea957eecd98069165a8b771082fd51", - "proof": "0xb3352fb4d0b82179ccba8f8cd2969f93e67f9ee926607cb374cff85e22c578ee94cf738d332e1b2cc2feb908690c1b03" + "proof": "0xa7de1e32bb336b85e42ff5028167042188317299333f091dd88675e84a550577bfa564b2f57cd2498e2acf875e0aaa40" }, "output": false, "name": "verify_kzg_proof_case_incorrect_proof_444b73ff54a19b44" @@ -522,30 +741,40 @@ }, { "input": { - "commitment": "0xa6696ac7117d8ecc2224a4368e20d367d2d67995a32a53752096a501e6477e3b76ab6969fcfcc93f1f2a0f0ed74d2ca7", + "commitment": "0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e", + "z": "0x0000000000000000000000000000000000000000000000000000000000000000", + "y": "0x0000000000000000000000000000000000000000000000000000000000000002", + "proof": "0x97f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb" + }, + "output": false, + "name": "verify_kzg_proof_case_incorrect_proof_585454b31673dd62" + }, + { + "input": { + "commitment": "0xb49d88afcd7f6c61a8ea69eff5f609d2432b47e7e4cd50b02cdddb4e0c1460517e8df02e4e64dc55e3d8ca192d57193a", "z": "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", "y": "0x58cdc98c4c44791bb8ba7e58a80324ef8c021c79c68e253c430fa2663188f7f2", - "proof": "0x82af93a59a4978439a9a05cf6324e0e558c68b114f590589255bffe9ea2c0917a6ff6a7cd2e317f26f18b07802e9379a" + "proof": "0xb0ac600174134691bf9d91fee448b4d58c127356567da1c456b9c38468909d4effe6b7faa11177e1f96ee5d2834df001" }, "output": false, "name": "verify_kzg_proof_case_incorrect_proof_7db4f140a955dd1a" }, { "input": { - "commitment": "0x991fc16086918023ba2301fb85054f814ef114cfce303650d90a456199c6196146cc1293a88384c6503be26d087f11c9", + "commitment": "0xa421e229565952cfff4ef3517100a97da1d4fe57956fa50a442f92af03b1bf37adacc8ad4ed209b31287ea5bb94d9d06", "z": "0x0000000000000000000000000000000000000000000000000000000000000001", "y": "0x1824b159acc5056f998c4fefecbc4ff55884b7fa0003480200000001fffffffe", - "proof": "0x94b3fc86ed60de7223ce1ec3a22dcf3c7c3672c1870ffa0888bc29eeb684e9120857089163dea588685ff2fa0798e808" + "proof": "0x8e3069b19e6e71aed9b7dc8fbba13e4217d91cfc59be47cfaa7d09ef626242517541992c0f76091ddabf271682cc7c2c" }, "output": false, "name": "verify_kzg_proof_case_incorrect_proof_83e53423a2dd93fe" }, { "input": { - "commitment": "0xb09ce4964278eff81a976fbc552488cb84fc4a102f004c87179cb912f49904d1e785ecaf5d184522a58e9035875440ef", + "commitment": "0x93efc82d2017e9c57834a1246463e64774e56183bb247c8fc9dd98c56817e878d97b05f5c8d900acf1fbbbca6f146556", "z": "0x0000000000000000000000000000000000000000000000000000000000000001", "y": "0x0000000000000000000000000000000000000000000000000000000000000000", - "proof": "0xa229f9de17c0f4b53ee9d448c189a5909358e32ec04409265497cf41cbdcd03af5bd50812cafffc09e08e0e7e7b96337" + "proof": "0xafc13cef6ed41f7abe142d32d7b5354e5664bd4b6d52080460dd404dc2cb26269c24826d2bcd0152d0b55ee0a9e90289" }, "output": false, "name": "verify_kzg_proof_case_incorrect_proof_9b24f8997145435c" @@ -562,40 +791,50 @@ }, { "input": { - "commitment": "0xa6696ac7117d8ecc2224a4368e20d367d2d67995a32a53752096a501e6477e3b76ab6969fcfcc93f1f2a0f0ed74d2ca7", + "commitment": "0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e", + "z": "0x0000000000000000000000000000000000000000000000000000000000000002", + "y": "0x0000000000000000000000000000000000000000000000000000000000000002", + "proof": "0x97f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb" + }, + "output": false, + "name": "verify_kzg_proof_case_incorrect_proof_a0be66af9a97ea52" + }, + { + "input": { + "commitment": "0xb49d88afcd7f6c61a8ea69eff5f609d2432b47e7e4cd50b02cdddb4e0c1460517e8df02e4e64dc55e3d8ca192d57193a", "z": "0x564c0a11a0f704f4fc3e8acfe0f8245f0ad1347b378fbf96e206da11a5d36306", "y": "0x6c28d6edfea2f5e1638cb1a8be8197549d52e133fa9dae87e52abb45f7b192dd", - "proof": "0x908fa50ed7c7359b6fea6031f8089459bb91ea77cb0af622bdfc7abb7a98d571aba9c322e70b25e2f439fc75887a0af0" + "proof": "0xa88d68fe3ad0d09b07f4605b1364c8d4804bf7096dae003d821cc01c3b7d35c6d1fdae14e2db3c05e1cdcea7c7b7f262" }, "output": false, "name": "verify_kzg_proof_case_incorrect_proof_af669445747d2585" }, { "input": { - "commitment": "0xb09ce4964278eff81a976fbc552488cb84fc4a102f004c87179cb912f49904d1e785ecaf5d184522a58e9035875440ef", + "commitment": "0x93efc82d2017e9c57834a1246463e64774e56183bb247c8fc9dd98c56817e878d97b05f5c8d900acf1fbbbca6f146556", "z": "0x0000000000000000000000000000000000000000000000000000000000000002", "y": "0x64d3b6baf69395bde2abd1d43f99be66bc64581234fd363e2ae3a0d419cfc3fc", - "proof": "0xa80216e6ca6221a11f1a0417387eaba6571d2cfdfa338d69d173923d8722fa8394bf9ab487d2a155d1def23a512ef5a3" + "proof": "0xaf08cbca9deec336f2a56ca0b202995830f238fc3cb2ecdbdc0bbb6419e3e60507e823ff7dcbd17394cea55bc514716c" }, "output": false, "name": "verify_kzg_proof_case_incorrect_proof_af8b75f664ed7d43" }, { "input": { - "commitment": "0xa6696ac7117d8ecc2224a4368e20d367d2d67995a32a53752096a501e6477e3b76ab6969fcfcc93f1f2a0f0ed74d2ca7", + "commitment": "0xb49d88afcd7f6c61a8ea69eff5f609d2432b47e7e4cd50b02cdddb4e0c1460517e8df02e4e64dc55e3d8ca192d57193a", "z": "0x0000000000000000000000000000000000000000000000000000000000000002", "y": "0x6a75e4fe63e5e148c853462a680c3e3ccedea34719d28f19bf1b35ae4eea37d6", - "proof": "0xabacbdcba60523fdc84ed1a32bb770f610f8b284d875479918c2fe397e04eb52ecc0f1ff0029f9466ffe5b278b8fecc3" + "proof": "0x861a2aef7aa82db033bfa125b9f756afecaf1db28384925d5007bcf7dff1a53b72bdf522610303075aeecab41685d720" }, "output": false, "name": "verify_kzg_proof_case_incorrect_proof_b6cb6698327d9835" }, { "input": { - "commitment": "0xb09ce4964278eff81a976fbc552488cb84fc4a102f004c87179cb912f49904d1e785ecaf5d184522a58e9035875440ef", + "commitment": "0x93efc82d2017e9c57834a1246463e64774e56183bb247c8fc9dd98c56817e878d97b05f5c8d900acf1fbbbca6f146556", "z": "0x564c0a11a0f704f4fc3e8acfe0f8245f0ad1347b378fbf96e206da11a5d36306", "y": "0x0000000000000000000000000000000000000000000000000000000000000000", - "proof": "0x831cc31c152e768eb8c2fd2d0c04003e7abc66b2c3f4cc7fc2b3426b4a698dc2a5e5161dfb841ad17cd09be61b33987b" + "proof": "0x82f1cd05471ab6ff21bcfd5c3369cba05b03a872a10829236d184fe1872767c391c2aa7e3b85babb1e6093b7224e7732" }, "output": false, "name": "verify_kzg_proof_case_incorrect_proof_b6ec3736f9ff2c62" @@ -622,90 +861,90 @@ }, { "input": { - "commitment": "0x978a0d595c823c05947b1156175e72634a377808384256e9921ebf72181890be2d6b58d4a73a880541d1656875654806", + "commitment": "0x8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7", "z": "0x564c0a11a0f704f4fc3e8acfe0f8245f0ad1347b378fbf96e206da11a5d36306", "y": "0x24d25032e67a7e6a4910df5834b8fe70e6bcfeeac0352434196bdf4b2485d5a1", - "proof": "0x80c0d129b845321df0c5b09d06d5cee182cf743783f33d7927735e91a08a4055b4053be9cb84ba370ea275f4b91da2bf" + "proof": "0xacd56791e0ab0d1b3802021862013418993da2646e87140e12631e2914d9e6c676466aa3adfc91b61f84255544cab544" }, "output": false, "name": "verify_kzg_proof_case_incorrect_proof_c5e1490d672d026d" }, { "input": { - "commitment": "0xa6696ac7117d8ecc2224a4368e20d367d2d67995a32a53752096a501e6477e3b76ab6969fcfcc93f1f2a0f0ed74d2ca7", + "commitment": "0xb49d88afcd7f6c61a8ea69eff5f609d2432b47e7e4cd50b02cdddb4e0c1460517e8df02e4e64dc55e3d8ca192d57193a", "z": "0x5eb7004fe57383e6c88b99d839937fddf3f99279353aaf8d5c9a75f91ce33c62", "y": "0x2c9ae4f1d6d08558d7027df9cc6b248c21290075d2c0df8a4084d02090b3fa14", - "proof": "0xaf62efd28d2f6a4d044da3db338612b43f453b7746999c7501f5be48c1dce51e481b5cfe5ec93f90264ba0655782ffe9" + "proof": "0xa4cc8c419ade0cf043cbf30f43c8f7ee6da3ab8d2c15070f323e5a13a8178fe07c8f89686e5fd16565247b520028251b" }, "output": false, "name": "verify_kzg_proof_case_incorrect_proof_cae5d3491190b777" }, { "input": { - "commitment": "0x978a0d595c823c05947b1156175e72634a377808384256e9921ebf72181890be2d6b58d4a73a880541d1656875654806", + "commitment": "0x8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7", "z": "0x5eb7004fe57383e6c88b99d839937fddf3f99279353aaf8d5c9a75f91ce33c62", "y": "0x4882cf0609af8c7cd4c256e63a35838c95a9ebbf6122540ab344b42fd66d32e1", - "proof": "0x95c51f028ec8ace94b2c24fff6662e4c61ad7b315b799aa5f40fcf5b36b2f1b6f9fc23bc66290aeef1de7e6ee4cb52ce" + "proof": "0xb8f731ba6a52e419ffc843c50d2947d30e933e3a881b208de54149714ece74a599503f84c6249b5fd8a7c70189882a6b" }, "output": false, "name": "verify_kzg_proof_case_incorrect_proof_d0992bc0387790a4" }, { "input": { - "commitment": "0xb09ce4964278eff81a976fbc552488cb84fc4a102f004c87179cb912f49904d1e785ecaf5d184522a58e9035875440ef", + "commitment": "0x93efc82d2017e9c57834a1246463e64774e56183bb247c8fc9dd98c56817e878d97b05f5c8d900acf1fbbbca6f146556", "z": "0x5eb7004fe57383e6c88b99d839937fddf3f99279353aaf8d5c9a75f91ce33c62", "y": "0x5fd58150b731b4facfcdd89c0e393ff842f5f2071303eff99b51e103161cd233", - "proof": "0xb8d91f1642376ff871e99eda3209fb7968520c5353522f31d1013e8925d05e7ea44f9117ccedfa03ef5f6fc8205080a7" + "proof": "0x84c349506215a2d55f9d06f475b8229c6dedc08fd467f41fabae6bb042c2d0dbdbcd5f7532c475e479588eec5820fd37" }, "output": false, "name": "verify_kzg_proof_case_incorrect_proof_d736268229bd87ec" }, { "input": { - "commitment": "0x978a0d595c823c05947b1156175e72634a377808384256e9921ebf72181890be2d6b58d4a73a880541d1656875654806", + "commitment": "0x8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7", "z": "0x0000000000000000000000000000000000000000000000000000000000000002", "y": "0x549345dd3612e36fab0ab7baffe3faa5b820d56b71348c89ecaf63f7c4f85370", - "proof": "0xa99a3f7861e6350419e20be7018f518a54db5ec93698a2565b53e6303d3f99fad5559ed047ad09031b1b64111ac31b25" + "proof": "0x94fce36bf7e9f0ed981728fcd829013de96f7d25f8b4fe885059ec24af36f801ffbf68ec4604ef6e5f5f800f5cf31238" }, "output": false, "name": "verify_kzg_proof_case_incorrect_proof_e68d7111a2364a49" }, { "input": { - "commitment": "0x991fc16086918023ba2301fb85054f814ef114cfce303650d90a456199c6196146cc1293a88384c6503be26d087f11c9", + "commitment": "0xa421e229565952cfff4ef3517100a97da1d4fe57956fa50a442f92af03b1bf37adacc8ad4ed209b31287ea5bb94d9d06", "z": "0x5eb7004fe57383e6c88b99d839937fddf3f99279353aaf8d5c9a75f91ce33c62", "y": "0x5ee1e9a4a06a02ca6ea14b0ca73415a8ba0fba888f18dde56df499b480d4b9e0", - "proof": "0xaf9bbe2e73d18f879bcc0867b22e8ac255040a0e85e258b6a8243bc12aa526067d1762dddf18fb25a9fced1cbdd9a447" + "proof": "0xb3477fc9a5bfab5fdb5523251818ee5a6d52613c59502a3d2df58217f4e366cd9ef37dee55bf2c705a2b08e7808b6fa0" }, "output": false, "name": "verify_kzg_proof_case_incorrect_proof_ed6b180ec759bcf6" }, { "input": { - "commitment": "0xa6696ac7117d8ecc2224a4368e20d367d2d67995a32a53752096a501e6477e3b76ab6969fcfcc93f1f2a0f0ed74d2ca7", + "commitment": "0xb49d88afcd7f6c61a8ea69eff5f609d2432b47e7e4cd50b02cdddb4e0c1460517e8df02e4e64dc55e3d8ca192d57193a", "z": "0x0000000000000000000000000000000000000000000000000000000000000000", "y": "0x1ed7d14d1b3fb1a1890d67b81715531553ad798df2009b4311d9fe2bea6cb964", - "proof": "0xa6b598c7936325f3d21e53c433720c65783d54bfddd83146f63247caba7aa25671641a9cc7b5a1ddcb229760f311cbb8" + "proof": "0x98e15cbf800b69b90bfcaf1d907a9889c7743f7e5a19ee4b557471c005600f56d78e3dd887b2f5b87d76405b80dd2115" }, "output": false, "name": "verify_kzg_proof_case_incorrect_proof_f0ed3dc11cdeb130" }, { "input": { - "commitment": "0x978a0d595c823c05947b1156175e72634a377808384256e9921ebf72181890be2d6b58d4a73a880541d1656875654806", + "commitment": "0x8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7", "z": "0x0000000000000000000000000000000000000000000000000000000000000001", "y": "0x60f840641ec0d0c0d2b77b2d5a393b329442721fad05ab78c7b98f2aa3c20ec9", - "proof": "0x9331a4bf48d66edbf77db3778203ad7045ed762bcfb2fe3ded65763983208e78ec1ef255babab4b41875b9505d1fb129" + "proof": "0x98613e9e1b1ed52fc2fdc54e945b863ff52870e6565307ff9e32327196d7a03c428fc51a9abedc97de2a68daa1274b50" }, "output": false, "name": "verify_kzg_proof_case_incorrect_proof_f47eb9fc139f6bfd" }, { "input": { - "commitment": "0x978a0d595c823c05947b1156175e72634a377808384256e9921ebf72181890be2d6b58d4a73a880541d1656875654806", + "commitment": "0x8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7", "z": "0x0000000000000000000000000000000000000000000000000000000000000000", "y": "0x61157104410181bdc6eac224aa9436ac268bdcfeecb6badf71d228adda820af3", - "proof": "0xa2e739f11e3a85482c5a9941ac46608ddfe459b09fb21b81fcb8f0ac3993ac618639bff39aefb5356bee9ccd1a3ebb5c" + "proof": "0xa1d8f2a5ab22acdfc1a9492ee2e1c2cbde681b51b312bf718821937e5088cd8ee002b718264027d10c5c5855dabe0353" }, "output": false, "name": "verify_kzg_proof_case_incorrect_proof_f7f44e1e864aa967" @@ -720,12 +959,72 @@ "output": false, "name": "verify_kzg_proof_case_incorrect_proof_ffa6e97b97146517" }, + { + "input": { + "commitment": "0xa421e229565952cfff4ef3517100a97da1d4fe57956fa50a442f92af03b1bf37adacc8ad4ed209b31287ea5bb94d9d06", + "z": "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", + "y": "0x304962b3598a0adf33189fdfd9789feab1096ff40006900400000003fffffffc", + "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "output": false, + "name": "verify_kzg_proof_case_incorrect_proof_point_at_infinity_392169c16a2e5ef6" + }, + { + "input": { + "commitment": "0xa421e229565952cfff4ef3517100a97da1d4fe57956fa50a442f92af03b1bf37adacc8ad4ed209b31287ea5bb94d9d06", + "z": "0x0000000000000000000000000000000000000000000000000000000000000000", + "y": "0x50625ad853cc21ba40594f79591e5d35c445ecf9453014da6524c0cf6367c359", + "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "output": false, + "name": "verify_kzg_proof_case_incorrect_proof_point_at_infinity_3c1e8b38219e3e12" + }, + { + "input": { + "commitment": "0xa421e229565952cfff4ef3517100a97da1d4fe57956fa50a442f92af03b1bf37adacc8ad4ed209b31287ea5bb94d9d06", + "z": "0x564c0a11a0f704f4fc3e8acfe0f8245f0ad1347b378fbf96e206da11a5d36306", + "y": "0x6d928e13fe443e957d82e3e71d48cb65d51028eb4483e719bf8efcdf12f7c321", + "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "output": false, + "name": "verify_kzg_proof_case_incorrect_proof_point_at_infinity_3c87ec986c2656c2" + }, + { + "input": { + "commitment": "0xa421e229565952cfff4ef3517100a97da1d4fe57956fa50a442f92af03b1bf37adacc8ad4ed209b31287ea5bb94d9d06", + "z": "0x0000000000000000000000000000000000000000000000000000000000000002", + "y": "0x2bf4e1f980eb94661a21affc4d7e6e56f214fe3e7dc4d20b98c66ffd43cabeb0", + "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "output": false, + "name": "verify_kzg_proof_case_incorrect_proof_point_at_infinity_420f2a187ce77035" + }, + { + "input": { + "commitment": "0xa421e229565952cfff4ef3517100a97da1d4fe57956fa50a442f92af03b1bf37adacc8ad4ed209b31287ea5bb94d9d06", + "z": "0x0000000000000000000000000000000000000000000000000000000000000001", + "y": "0x1824b159acc5056f998c4fefecbc4ff55884b7fa0003480200000001fffffffe", + "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "output": false, + "name": "verify_kzg_proof_case_incorrect_proof_point_at_infinity_83e53423a2dd93fe" + }, + { + "input": { + "commitment": "0xa421e229565952cfff4ef3517100a97da1d4fe57956fa50a442f92af03b1bf37adacc8ad4ed209b31287ea5bb94d9d06", + "z": "0x5eb7004fe57383e6c88b99d839937fddf3f99279353aaf8d5c9a75f91ce33c62", + "y": "0x5ee1e9a4a06a02ca6ea14b0ca73415a8ba0fba888f18dde56df499b480d4b9e0", + "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "output": false, + "name": "verify_kzg_proof_case_incorrect_proof_point_at_infinity_ed6b180ec759bcf6" + }, { "input": { "commitment": "0x97f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6", "z": "0x0000000000000000000000000000000000000000000000000000000000000001", - "y": "0x443e7af5274b52214ea6c775908c54519fea957eecd98069165a8b771082fd51", - "proof": "0x807b160e50107d5af2ac0c93eedc658eeb776d2416ecd9579b976795d0719eead66791c9603c703dc50a108d56823a98" + "y": "0x1824b159acc5056f998c4fefecbc4ff55884b7fa0003480200000001fffffffe", + "proof": "0xb0c829a8d2d3405304fecbea193e6c67f7c3912a6adc7c3737ad3f8a3b750425c1531a7426f03033a3994bc82a10609f" }, "output": null, "name": "verify_kzg_proof_case_invalid_commitment_1b44e341d56c757d" @@ -734,8 +1033,8 @@ "input": { "commitment": "0x8123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", "z": "0x0000000000000000000000000000000000000000000000000000000000000001", - "y": "0x443e7af5274b52214ea6c775908c54519fea957eecd98069165a8b771082fd51", - "proof": "0x807b160e50107d5af2ac0c93eedc658eeb776d2416ecd9579b976795d0719eead66791c9603c703dc50a108d56823a98" + "y": "0x1824b159acc5056f998c4fefecbc4ff55884b7fa0003480200000001fffffffe", + "proof": "0xb0c829a8d2d3405304fecbea193e6c67f7c3912a6adc7c3737ad3f8a3b750425c1531a7426f03033a3994bc82a10609f" }, "output": null, "name": "verify_kzg_proof_case_invalid_commitment_32afa9561a4b3b91" @@ -744,8 +1043,8 @@ "input": { "commitment": "0x97f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb00", "z": "0x0000000000000000000000000000000000000000000000000000000000000001", - "y": "0x443e7af5274b52214ea6c775908c54519fea957eecd98069165a8b771082fd51", - "proof": "0x807b160e50107d5af2ac0c93eedc658eeb776d2416ecd9579b976795d0719eead66791c9603c703dc50a108d56823a98" + "y": "0x1824b159acc5056f998c4fefecbc4ff55884b7fa0003480200000001fffffffe", + "proof": "0xb0c829a8d2d3405304fecbea193e6c67f7c3912a6adc7c3737ad3f8a3b750425c1531a7426f03033a3994bc82a10609f" }, "output": null, "name": "verify_kzg_proof_case_invalid_commitment_3e55802a5ed3c757" @@ -754,17 +1053,17 @@ "input": { "commitment": "0x8123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcde0", "z": "0x0000000000000000000000000000000000000000000000000000000000000001", - "y": "0x443e7af5274b52214ea6c775908c54519fea957eecd98069165a8b771082fd51", - "proof": "0x807b160e50107d5af2ac0c93eedc658eeb776d2416ecd9579b976795d0719eead66791c9603c703dc50a108d56823a98" + "y": "0x1824b159acc5056f998c4fefecbc4ff55884b7fa0003480200000001fffffffe", + "proof": "0xb0c829a8d2d3405304fecbea193e6c67f7c3912a6adc7c3737ad3f8a3b750425c1531a7426f03033a3994bc82a10609f" }, "output": null, "name": "verify_kzg_proof_case_invalid_commitment_e9d3e9ec16fbc15f" }, { "input": { - "commitment": "0xa6696ac7117d8ecc2224a4368e20d367d2d67995a32a53752096a501e6477e3b76ab6969fcfcc93f1f2a0f0ed74d2ca7", + "commitment": "0xa421e229565952cfff4ef3517100a97da1d4fe57956fa50a442f92af03b1bf37adacc8ad4ed209b31287ea5bb94d9d06", "z": "0x0000000000000000000000000000000000000000000000000000000000000001", - "y": "0x443e7af5274b52214ea6c775908c54519fea957eecd98069165a8b771082fd51", + "y": "0x1824b159acc5056f998c4fefecbc4ff55884b7fa0003480200000001fffffffe", "proof": "0x97f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6" }, "output": null, @@ -772,9 +1071,9 @@ }, { "input": { - "commitment": "0xa6696ac7117d8ecc2224a4368e20d367d2d67995a32a53752096a501e6477e3b76ab6969fcfcc93f1f2a0f0ed74d2ca7", + "commitment": "0xa421e229565952cfff4ef3517100a97da1d4fe57956fa50a442f92af03b1bf37adacc8ad4ed209b31287ea5bb94d9d06", "z": "0x0000000000000000000000000000000000000000000000000000000000000001", - "y": "0x443e7af5274b52214ea6c775908c54519fea957eecd98069165a8b771082fd51", + "y": "0x1824b159acc5056f998c4fefecbc4ff55884b7fa0003480200000001fffffffe", "proof": "0x8123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" }, "output": null, @@ -782,9 +1081,9 @@ }, { "input": { - "commitment": "0xa6696ac7117d8ecc2224a4368e20d367d2d67995a32a53752096a501e6477e3b76ab6969fcfcc93f1f2a0f0ed74d2ca7", + "commitment": "0xa421e229565952cfff4ef3517100a97da1d4fe57956fa50a442f92af03b1bf37adacc8ad4ed209b31287ea5bb94d9d06", "z": "0x0000000000000000000000000000000000000000000000000000000000000001", - "y": "0x443e7af5274b52214ea6c775908c54519fea957eecd98069165a8b771082fd51", + "y": "0x1824b159acc5056f998c4fefecbc4ff55884b7fa0003480200000001fffffffe", "proof": "0x97f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb00" }, "output": null, @@ -792,9 +1091,9 @@ }, { "input": { - "commitment": "0xa6696ac7117d8ecc2224a4368e20d367d2d67995a32a53752096a501e6477e3b76ab6969fcfcc93f1f2a0f0ed74d2ca7", + "commitment": "0xa421e229565952cfff4ef3517100a97da1d4fe57956fa50a442f92af03b1bf37adacc8ad4ed209b31287ea5bb94d9d06", "z": "0x0000000000000000000000000000000000000000000000000000000000000001", - "y": "0x443e7af5274b52214ea6c775908c54519fea957eecd98069165a8b771082fd51", + "y": "0x1824b159acc5056f998c4fefecbc4ff55884b7fa0003480200000001fffffffe", "proof": "0x8123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcde0" }, "output": null, @@ -802,120 +1101,120 @@ }, { "input": { - "commitment": "0xb7f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb", + "commitment": "0x8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7", "z": "0x0000000000000000000000000000000000000000000000000000000000000001", "y": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + "proof": "0xb30b3d1e4faccc380557792c9a0374d58fa286f5f75fea48870585393f890909cd3c53cfe4897e799fb211b4be531e43" }, "output": null, "name": "verify_kzg_proof_case_invalid_y_35d08d612aad2197" }, { "input": { - "commitment": "0xb7f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb", + "commitment": "0x8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7", "z": "0x0000000000000000000000000000000000000000000000000000000000000001", "y": "0xffffffffffffffffffffffffffffffff00000000000000000000000000000000", - "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + "proof": "0xb30b3d1e4faccc380557792c9a0374d58fa286f5f75fea48870585393f890909cd3c53cfe4897e799fb211b4be531e43" }, "output": null, "name": "verify_kzg_proof_case_invalid_y_4aa6def8c35c9097" }, { "input": { - "commitment": "0xb7f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb", + "commitment": "0x8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7", "z": "0x0000000000000000000000000000000000000000000000000000000000000001", "y": "0x00000000000000000000000000000000000000000000000000000000000000", - "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + "proof": "0xb30b3d1e4faccc380557792c9a0374d58fa286f5f75fea48870585393f890909cd3c53cfe4897e799fb211b4be531e43" }, "output": null, "name": "verify_kzg_proof_case_invalid_y_4e51cef08a61606f" }, { "input": { - "commitment": "0xb7f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb", + "commitment": "0x8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7", "z": "0x0000000000000000000000000000000000000000000000000000000000000001", "y": "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000002", - "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + "proof": "0xb30b3d1e4faccc380557792c9a0374d58fa286f5f75fea48870585393f890909cd3c53cfe4897e799fb211b4be531e43" }, "output": null, "name": "verify_kzg_proof_case_invalid_y_64b9ff2b8f7dddee" }, { "input": { - "commitment": "0xb7f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb", + "commitment": "0x8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7", "z": "0x0000000000000000000000000000000000000000000000000000000000000001", "y": "0x000000000000000000000000000000000000000000000000000000000000000000", - "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + "proof": "0xb30b3d1e4faccc380557792c9a0374d58fa286f5f75fea48870585393f890909cd3c53cfe4897e799fb211b4be531e43" }, "output": null, "name": "verify_kzg_proof_case_invalid_y_b358a2e763727b70" }, { "input": { - "commitment": "0xb7f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb", + "commitment": "0x8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7", "z": "0x0000000000000000000000000000000000000000000000000000000000000001", "y": "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001", - "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + "proof": "0xb30b3d1e4faccc380557792c9a0374d58fa286f5f75fea48870585393f890909cd3c53cfe4897e799fb211b4be531e43" }, "output": null, "name": "verify_kzg_proof_case_invalid_y_eb0601fec84cc5e9" }, { "input": { - "commitment": "0xb7f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb", + "commitment": "0x8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7", "z": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "y": "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", - "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + "y": "0x60f840641ec0d0c0d2b77b2d5a393b329442721fad05ab78c7b98f2aa3c20ec9", + "proof": "0xb30b3d1e4faccc380557792c9a0374d58fa286f5f75fea48870585393f890909cd3c53cfe4897e799fb211b4be531e43" }, "output": null, "name": "verify_kzg_proof_case_invalid_z_35d08d612aad2197" }, { "input": { - "commitment": "0xb7f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb", + "commitment": "0x8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7", "z": "0xffffffffffffffffffffffffffffffff00000000000000000000000000000000", - "y": "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", - "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + "y": "0x60f840641ec0d0c0d2b77b2d5a393b329442721fad05ab78c7b98f2aa3c20ec9", + "proof": "0xb30b3d1e4faccc380557792c9a0374d58fa286f5f75fea48870585393f890909cd3c53cfe4897e799fb211b4be531e43" }, "output": null, "name": "verify_kzg_proof_case_invalid_z_4aa6def8c35c9097" }, { "input": { - "commitment": "0xb7f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb", + "commitment": "0x8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7", "z": "0x00000000000000000000000000000000000000000000000000000000000000", - "y": "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", - "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + "y": "0x60f840641ec0d0c0d2b77b2d5a393b329442721fad05ab78c7b98f2aa3c20ec9", + "proof": "0xb30b3d1e4faccc380557792c9a0374d58fa286f5f75fea48870585393f890909cd3c53cfe4897e799fb211b4be531e43" }, "output": null, "name": "verify_kzg_proof_case_invalid_z_4e51cef08a61606f" }, { "input": { - "commitment": "0xb7f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb", + "commitment": "0x8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7", "z": "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000002", - "y": "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", - "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + "y": "0x60f840641ec0d0c0d2b77b2d5a393b329442721fad05ab78c7b98f2aa3c20ec9", + "proof": "0xb30b3d1e4faccc380557792c9a0374d58fa286f5f75fea48870585393f890909cd3c53cfe4897e799fb211b4be531e43" }, "output": null, "name": "verify_kzg_proof_case_invalid_z_64b9ff2b8f7dddee" }, { "input": { - "commitment": "0xb7f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb", + "commitment": "0x8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7", "z": "0x000000000000000000000000000000000000000000000000000000000000000000", - "y": "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", - "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + "y": "0x60f840641ec0d0c0d2b77b2d5a393b329442721fad05ab78c7b98f2aa3c20ec9", + "proof": "0xb30b3d1e4faccc380557792c9a0374d58fa286f5f75fea48870585393f890909cd3c53cfe4897e799fb211b4be531e43" }, "output": null, "name": "verify_kzg_proof_case_invalid_z_b358a2e763727b70" }, { "input": { - "commitment": "0xb7f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb", + "commitment": "0x8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7", "z": "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001", - "y": "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", - "proof": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + "y": "0x60f840641ec0d0c0d2b77b2d5a393b329442721fad05ab78c7b98f2aa3c20ec9", + "proof": "0xb30b3d1e4faccc380557792c9a0374d58fa286f5f75fea48870585393f890909cd3c53cfe4897e799fb211b4be531e43" }, "output": null, "name": "verify_kzg_proof_case_invalid_z_eb0601fec84cc5e9" diff --git a/tests/cancun/eip4844_blobs/point_evaluation_vectors/requirements.txt b/tests/cancun/eip4844_blobs/point_evaluation_vectors/requirements.txt new file mode 100644 index 0000000000..1f35b28a48 --- /dev/null +++ b/tests/cancun/eip4844_blobs/point_evaluation_vectors/requirements.txt @@ -0,0 +1,2 @@ +# additional requirements for concat_kzg_vectors_to_json.py +PyYAML \ No newline at end of file diff --git a/tests/cancun/eip4844_blobs/test_blob_txs.py b/tests/cancun/eip4844_blobs/test_blob_txs.py index 5ae5db4f33..488550d30b 100644 --- a/tests/cancun/eip4844_blobs/test_blob_txs.py +++ b/tests/cancun/eip4844_blobs/test_blob_txs.py @@ -1,46 +1,49 @@ """ abstract: Tests blob type transactions for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844) - Test blob type transactions for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844). note: Adding a new test - Add a function that is named `test_` and takes at least the following arguments: - - blockchain_test + - blockchain_test or state_test - pre - env - - blocks + - block or txs All other `pytest.fixture` fixtures can be parametrized to generate new combinations and test cases. """ # noqa: E501 import itertools +from dataclasses import replace from typing import Dict, List, Optional, Tuple import pytest from ethereum_test_forks import Fork from ethereum_test_tools import ( + AccessList, Account, + Address, Block, BlockchainTestFiller, + BlockException, EngineAPIError, Environment, + Hash, Header, ) from ethereum_test_tools import Opcodes as Op from ethereum_test_tools import ( Removable, + StateTestFiller, Storage, TestAddress, TestAddress2, Transaction, + TransactionException, add_kzg_version, eip_2028_transaction_data_cost, - to_address, - to_hash_bytes, ) from .spec import Spec, SpecHelpers, ref_spec_4844 @@ -48,11 +51,14 @@ REFERENCE_SPEC_GIT_PATH = ref_spec_4844.git_path REFERENCE_SPEC_VERSION = ref_spec_4844.version +TestPreFundingKey = "0x0b2986cc45bd8a8d028c3fcf6f7a11a52f1df61f3ea5d63f05ca109dd73a3fa0" +TestPreFundingAddress = "0x97a7cb1de3cc7d556d0aa32433b035067709e1fc" + @pytest.fixture -def destination_account() -> str: +def destination_account() -> Address: """Default destination account for the blob transactions.""" - return to_address(0x100) + return Address(0x100) @pytest.fixture @@ -66,9 +72,21 @@ def tx_value() -> int: @pytest.fixture -def tx_gas() -> int: +def tx_gas( + tx_calldata: bytes, + tx_access_list: List[AccessList], +) -> int: """Default gas allocated to transactions sent during test.""" - return 21000 + access_list_gas = 0 + if tx_access_list: + ACCESS_LIST_ADDRESS_COST = 2400 + ACCESS_LIST_STORAGE_KEY_COST = 1900 + + for address in tx_access_list: + access_list_gas += ACCESS_LIST_ADDRESS_COST + access_list_gas += len(address.storage_keys) * ACCESS_LIST_STORAGE_KEY_COST + + return 21000 + eip_2028_transaction_data_cost(tx_calldata) + access_list_gas @pytest.fixture @@ -170,7 +188,7 @@ def blob_hashes_per_tx(blobs_per_tx: List[int]) -> List[List[bytes]]: """ return [ add_kzg_version( - [to_hash_bytes(x) for x in range(blob_count)], + [Hash(x) for x in range(blob_count)], Spec.BLOB_COMMITMENT_VERSION_KZG, ) for blob_count in blobs_per_tx @@ -181,9 +199,7 @@ def blob_hashes_per_tx(blobs_per_tx: List[int]) -> List[List[bytes]]: def total_account_minimum_balance( # noqa: D103 tx_gas: int, tx_value: int, - tx_calldata: bytes, tx_max_fee_per_gas: int, - tx_max_priority_fee_per_gas: int, tx_max_fee_per_blob_gas: int, blob_hashes_per_tx: List[List[bytes]], ) -> int: @@ -191,15 +207,33 @@ def total_account_minimum_balance( # noqa: D103 Calculates the minimum balance required for the account to be able to send the transactions in the block of the test. """ + minimum_cost = 0 + for tx_blob_count in [len(x) for x in blob_hashes_per_tx]: + blob_cost = tx_max_fee_per_blob_gas * Spec.GAS_PER_BLOB * tx_blob_count + minimum_cost += (tx_gas * tx_max_fee_per_gas) + tx_value + blob_cost + return minimum_cost + + +@pytest.fixture +def total_account_transactions_fee( # noqa: D103 + tx_gas: int, + tx_value: int, + blob_gasprice: int, + block_fee_per_gas: int, + tx_max_fee_per_gas: int, + tx_max_priority_fee_per_gas: int, + blob_hashes_per_tx: List[List[bytes]], +) -> int: + """ + Calculates the actual fee for the blob transactions in the block of the test. + """ total_cost = 0 for tx_blob_count in [len(x) for x in blob_hashes_per_tx]: - data_cost = tx_max_fee_per_blob_gas * Spec.GAS_PER_BLOB * tx_blob_count - total_cost += ( - (tx_gas * (tx_max_fee_per_gas + tx_max_priority_fee_per_gas)) - + tx_value - + eip_2028_transaction_data_cost(tx_calldata) - + data_cost + blob_cost = blob_gasprice * Spec.GAS_PER_BLOB * tx_blob_count + block_producer_fee = ( + tx_max_fee_per_gas - block_fee_per_gas if tx_max_priority_fee_per_gas else 0 ) + total_cost += (tx_gas * (block_fee_per_gas + block_producer_fee)) + tx_value + blob_cost return total_cost @@ -237,7 +271,17 @@ def tx_max_fee_per_blob_gas( # noqa: D103 @pytest.fixture -def tx_error() -> Optional[str]: +def tx_access_list() -> List[AccessList]: + """ + Default access list for transactions sent during test. + + Can be overloaded by a test case to provide a custom access list. + """ + return [] + + +@pytest.fixture +def tx_error() -> Optional[TransactionException]: """ Default expected error produced by the block transactions (no error). @@ -249,15 +293,16 @@ def tx_error() -> Optional[str]: @pytest.fixture(autouse=True) def txs( # noqa: D103 - destination_account: Optional[str], + destination_account: Optional[Address], tx_gas: int, tx_value: int, tx_calldata: bytes, tx_max_fee_per_gas: int, tx_max_fee_per_blob_gas: int, tx_max_priority_fee_per_gas: int, + tx_access_list: List[AccessList], blob_hashes_per_tx: List[List[bytes]], - tx_error: Optional[str], + tx_error: Optional[TransactionException], ) -> List[Transaction]: """ Prepare the list of transactions that are sent during the test. @@ -273,7 +318,7 @@ def txs( # noqa: D103 max_fee_per_gas=tx_max_fee_per_gas, max_priority_fee_per_gas=tx_max_priority_fee_per_gas, max_fee_per_blob_gas=tx_max_fee_per_blob_gas, - access_list=[], + access_list=tx_access_list, blob_versioned_hashes=blob_hashes, error=tx_error if tx_i == (len(blob_hashes_per_tx) - 1) else None, ) @@ -311,16 +356,43 @@ def pre( # noqa: D103 @pytest.fixture def env( parent_excess_blob_gas: Optional[int], + parent_blobs: int, ) -> Environment: """ - Prepare the environment for all test cases. + Prepare the environment of the genesis block for all blockchain tests. """ + excess_blob_gas = parent_excess_blob_gas if parent_excess_blob_gas else 0 + if parent_blobs: + # We increase the excess blob gas of the genesis because + # we cannot include blobs in the genesis, so the + # test blobs are actually in block 1. + excess_blob_gas += Spec.TARGET_BLOB_GAS_PER_BLOCK return Environment( - excess_blob_gas=parent_excess_blob_gas, + excess_blob_gas=excess_blob_gas, blob_gas_used=0, ) +@pytest.fixture +def state_env( + parent_excess_blob_gas: Optional[int], + parent_blobs: int, +) -> Environment: + """ + Prepare the environment for all state test cases. + + Main difference is that the excess blob gas is not increased by the target, as + there is no genesis block -> block 1 transition, and therefore the excess blob gas + is not decreased by the target. + """ + return Environment( + excess_blob_gas=SpecHelpers.calc_excess_blob_gas_from_blob_count( + parent_excess_blob_gas=parent_excess_blob_gas if parent_excess_blob_gas else 0, + parent_blob_count=parent_blobs, + ), + ) + + @pytest.fixture def engine_api_error_code() -> Optional[EngineAPIError]: """ @@ -331,7 +403,9 @@ def engine_api_error_code() -> Optional[EngineAPIError]: @pytest.fixture -def block_error(tx_error: Optional[str]) -> Optional[str]: +def block_error( + tx_error: Optional[TransactionException], +) -> Optional[TransactionException | BlockException]: """ Default expected error produced by the block transactions (no error). @@ -396,27 +470,54 @@ def expected_excess_blob_gas( @pytest.fixture -def blocks( +def header_verify( + txs: List[Transaction], expected_blob_gas_used: Optional[int | Removable], expected_excess_blob_gas: Optional[int | Removable], - txs: List[Transaction], - block_error: Optional[str], - engine_api_error_code: Optional[EngineAPIError], -) -> List[Block]: +) -> Header: """ - Prepare the list of blocks for all test cases. + Header fields to verify from the transition tool. """ - return [ - Block( - txs=txs, - exception=block_error, - engine_api_error_code=engine_api_error_code, - header_verify=Header( - blob_gas_used=expected_blob_gas_used, - excess_blob_gas=expected_excess_blob_gas, - ), - ) - ] + header_verify = Header() + header_verify.blob_gas_used = expected_blob_gas_used + header_verify.excess_blob_gas = expected_excess_blob_gas + if len([tx for tx in txs if not tx.error]) == 0: + header_verify.gas_used = 0 + return header_verify + + +@pytest.fixture +def rlp_modifier( + expected_blob_gas_used: Optional[int | Removable], +) -> Optional[Header]: + """ + Header fields to modify on the output block in the BlockchainTest. + """ + if expected_blob_gas_used == Header.EMPTY_FIELD: + return None + return Header( + blob_gas_used=expected_blob_gas_used, + ) + + +@pytest.fixture +def block( + txs: List[Transaction], + block_error: Optional[TransactionException | BlockException], + engine_api_error_code: Optional[EngineAPIError], + header_verify: Optional[Header], + rlp_modifier: Optional[Header], +) -> Block: + """ + Test block for all blockchain test cases. + """ + return Block( + txs=txs, + exception=block_error, + engine_api_error_code=engine_api_error_code, + header_verify=header_verify, + rlp_modifier=rlp_modifier, + ) def all_valid_blob_combinations() -> List[Tuple[int, ...]]: @@ -475,7 +576,7 @@ def test_valid_blob_tx_combinations( blockchain_test: BlockchainTestFiller, pre: Dict, env: Environment, - blocks: List[Block], + block: Block, ): """ Test all valid blob combinations in a single block, assuming a given value of @@ -491,7 +592,7 @@ def test_valid_blob_tx_combinations( blockchain_test( pre=pre, post={}, - blocks=blocks, + blocks=[block], genesis_environment=env, ) @@ -500,30 +601,34 @@ def test_valid_blob_tx_combinations( "parent_excess_blobs,parent_blobs,tx_max_fee_per_blob_gas,tx_error", [ # tx max_blob_gas_cost of the transaction is not enough - ( + pytest.param( SpecHelpers.get_min_excess_blobs_for_blob_gas_price(2) - 1, # blob gas price is 1 SpecHelpers.target_blobs_per_block() + 1, # blob gas cost increases to 2 1, # tx max_blob_gas_cost is 1 - "insufficient max fee per blob gas", + TransactionException.INSUFFICIENT_MAX_FEE_PER_BLOB_GAS, + id="insufficient_max_fee_per_blob_gas", ), # tx max_blob_gas_cost of the transaction is zero, which is invalid - ( + pytest.param( 0, # blob gas price is 1 0, # blob gas cost stays put at 1 0, # tx max_blob_gas_cost is 0 - "invalid max fee per blob gas", + TransactionException.INSUFFICIENT_MAX_FEE_PER_BLOB_GAS, + id="invalid_max_fee_per_blob_gas", ), ], - ids=["insufficient_max_fee_per_blob_gas", "invalid_max_fee_per_blob_gas"], ) +@pytest.mark.parametrize( + "account_balance_modifier", + [1_000_000_000], +) # Extra balance to cover block blob gas cost @pytest.mark.valid_from("Cancun") def test_invalid_tx_max_fee_per_blob_gas( blockchain_test: BlockchainTestFiller, pre: Dict, env: Environment, - blocks: List[Block], - parent_blobs: int, - non_zero_blob_gas_used_genesis_block: Block, + block: Block, + non_zero_blob_gas_used_genesis_block: Optional[Block], ): """ Reject blocks with invalid blob txs due to: @@ -531,12 +636,10 @@ def test_invalid_tx_max_fee_per_blob_gas( - tx max_fee_per_blob_gas is barely not enough - tx max_fee_per_blob_gas is zero """ - if parent_blobs: + blocks = [block] + if non_zero_blob_gas_used_genesis_block is not None: pre[TestAddress2] = Account(balance=10**9) - blocks.insert(0, non_zero_blob_gas_used_genesis_block) - if env.excess_blob_gas is not None: - assert isinstance(env.excess_blob_gas, int) - env.excess_blob_gas += Spec.TARGET_BLOB_GAS_PER_BLOCK + blocks = [non_zero_blob_gas_used_genesis_block, block] blockchain_test( pre=pre, post={}, @@ -545,34 +648,82 @@ def test_invalid_tx_max_fee_per_blob_gas( ) +@pytest.mark.parametrize( + "parent_excess_blobs,parent_blobs,tx_max_fee_per_blob_gas,tx_error", + [ + # tx max_blob_gas_cost of the transaction is not enough + pytest.param( + SpecHelpers.get_min_excess_blobs_for_blob_gas_price(2) - 1, # blob gas price is 1 + SpecHelpers.target_blobs_per_block() + 1, # blob gas cost increases to 2 + 1, # tx max_blob_gas_cost is 1 + TransactionException.INSUFFICIENT_MAX_FEE_PER_BLOB_GAS, + id="insufficient_max_fee_per_blob_gas", + ), + # tx max_blob_gas_cost of the transaction is zero, which is invalid + pytest.param( + 0, # blob gas price is 1 + 0, # blob gas cost stays put at 1 + 0, # tx max_blob_gas_cost is 0 + TransactionException.INSUFFICIENT_MAX_FEE_PER_BLOB_GAS, + id="invalid_max_fee_per_blob_gas", + ), + ], +) +@pytest.mark.valid_from("Cancun") +def test_invalid_tx_max_fee_per_blob_gas_state( + state_test_only: StateTestFiller, + state_env: Environment, + pre: Dict, + txs: List[Transaction], +): + """ + Reject an invalid blob transaction due to: + + - tx max_fee_per_blob_gas is barely not enough + - tx max_fee_per_blob_gas is zero + """ + assert len(txs) == 1 + state_test_only( + pre=pre, + post={}, + tx=txs[0], + env=state_env, + ) + + @pytest.mark.parametrize( "tx_max_fee_per_gas,tx_error", [ # max blob gas is ok, but max fee per gas is less than base fee per gas ( 6, - "insufficient max fee per gas", + TransactionException.INSUFFICIENT_MAX_FEE_PER_GAS, ), ], ids=["insufficient_max_fee_per_gas"], ) @pytest.mark.valid_from("Cancun") def test_invalid_normal_gas( - blockchain_test: BlockchainTestFiller, + state_test: StateTestFiller, + state_env: Environment, pre: Dict, - env: Environment, - blocks: List[Block], + txs: List[Transaction], + header_verify: Optional[Header], + rlp_modifier: Optional[Header], ): """ - Reject blocks with invalid blob txs due to: + Reject an invalid blob transaction due to: - Sufficient max fee per blob gas, but insufficient max fee per gas """ - blockchain_test( + assert len(txs) == 1 + state_test( pre=pre, post={}, - blocks=blocks, - genesis_environment=env, + tx=txs[0], + env=state_env, + blockchain_test_header_verify=header_verify, + blockchain_test_rlp_modifier=rlp_modifier, ) @@ -580,13 +731,15 @@ def test_invalid_normal_gas( "blobs_per_tx", invalid_blob_combinations(), ) -@pytest.mark.parametrize("block_error", ["invalid_blob_count"]) +@pytest.mark.parametrize( + "tx_error", [TransactionException.TYPE_3_TX_MAX_BLOB_GAS_ALLOWANCE_EXCEEDED], ids=[""] +) @pytest.mark.valid_from("Cancun") def test_invalid_block_blob_count( blockchain_test: BlockchainTestFiller, pre: Dict, env: Environment, - blocks: List[Block], + block: Block, ): """ Test all invalid blob combinations in a single block, where the sum of all blobs in a block is @@ -599,12 +752,18 @@ def test_invalid_block_blob_count( blockchain_test( pre=pre, post={}, - blocks=blocks, + blocks=[block], genesis_environment=env, ) -@pytest.mark.parametrize("tx_max_priority_fee_per_gas", [0, 8]) +@pytest.mark.parametrize( + "tx_access_list", + [[], [AccessList(address=100, storage_keys=[100, 200])]], + ids=["no_access_list", "access_list"], +) +@pytest.mark.parametrize("tx_max_fee_per_gas", [7, 14]) +@pytest.mark.parametrize("tx_max_priority_fee_per_gas", [0, 7]) @pytest.mark.parametrize("tx_value", [0, 1]) @pytest.mark.parametrize( "tx_calldata", @@ -613,43 +772,211 @@ def test_invalid_block_blob_count( ) @pytest.mark.parametrize("tx_max_fee_per_blob_gas", [1, 100, 10000]) @pytest.mark.parametrize("account_balance_modifier", [-1], ids=["exact_balance_minus_1"]) -@pytest.mark.parametrize("tx_error", ["insufficient_account_balance"], ids=[""]) +@pytest.mark.parametrize("tx_error", [TransactionException.INSUFFICIENT_ACCOUNT_FUNDS], ids=[""]) @pytest.mark.valid_from("Cancun") def test_insufficient_balance_blob_tx( - blockchain_test: BlockchainTestFiller, + state_test: StateTestFiller, + state_env: Environment, pre: Dict, - env: Environment, - blocks: List[Block], + txs: List[Transaction], ): """ Reject blocks where user cannot afford the blob gas specified (but max_fee_per_gas would be enough for current block), including: + - Transactions with max fee equal or higher than current block base fee - Transactions with and without priority fee - Transactions with and without value - Transactions with and without calldata - Transactions with max fee per blob gas lower or higher than the priority fee """ + assert len(txs) == 1 + state_test( + pre=pre, + post={}, + tx=txs[0], + env=state_env, + ) + + +@pytest.mark.parametrize( + "tx_access_list", + [[], [AccessList(address=100, storage_keys=[100, 200])]], + ids=["no_access_list", "access_list"], +) +@pytest.mark.parametrize("tx_max_fee_per_gas", [7, 14]) +@pytest.mark.parametrize("tx_max_priority_fee_per_gas", [0, 7]) +@pytest.mark.parametrize("tx_value", [0, 1]) +@pytest.mark.parametrize( + "tx_calldata", + [b"", b"\x00", b"\x01"], + ids=["no_calldata", "single_zero_calldata", "single_one_calldata"], +) +@pytest.mark.parametrize("tx_max_fee_per_blob_gas", [1, 100, 10000]) +@pytest.mark.valid_from("Cancun") +def test_sufficient_balance_blob_tx( + state_test: StateTestFiller, + state_env: Environment, + pre: Dict, + txs: List[Transaction], +): + """ + Check that transaction is accepted when user can exactly afford the blob gas specified (and + max_fee_per_gas would be enough for current block), including: + + - Transactions with max fee equal or higher than current block base fee + - Transactions with and without priority fee + - Transactions with and without value + - Transactions with and without calldata + - Transactions with max fee per blob gas lower or higher than the priority fee + """ + assert len(txs) == 1 + state_test( + pre=pre, + post={}, + tx=txs[0], + env=state_env, + ) + + +@pytest.mark.parametrize( + "tx_access_list", + [[], [AccessList(address=100, storage_keys=[100, 200])]], + ids=["no_access_list", "access_list"], +) +@pytest.mark.parametrize("tx_max_fee_per_gas", [7, 14]) +@pytest.mark.parametrize("tx_max_priority_fee_per_gas", [0, 7]) +@pytest.mark.parametrize("tx_value", [0, 1]) +@pytest.mark.parametrize( + "tx_calldata", + [b"", b"\x00", b"\x01"], + ids=["no_calldata", "single_zero_calldata", "single_one_calldata"], +) +@pytest.mark.parametrize("tx_max_fee_per_blob_gas", [1, 100, 10000]) +@pytest.mark.valid_from("Cancun") +def test_sufficient_balance_blob_tx_pre_fund_tx( + blockchain_test: BlockchainTestFiller, + total_account_minimum_balance: int, + env: Environment, + pre: Dict, + txs: List[Transaction], + header_verify: Optional[Header], +): + """ + Check that transaction is accepted when user can exactly afford the blob gas specified (and + max_fee_per_gas would be enough for current block) because a funding transaction is + prepended in the same block, including: + + - Transactions with max fee equal or higher than current block base fee + - Transactions with and without priority fee + - Transactions with and without value + - Transactions with and without calldata + - Transactions with max fee per blob gas lower or higher than the priority fee + """ + pre = { + TestPreFundingAddress: Account(balance=(21_000 * 100) + total_account_minimum_balance), + } + txs = [ + Transaction( + ty=2, + nonce=0, + to=TestAddress, + value=total_account_minimum_balance, + gas_limit=21_000, + max_fee_per_gas=100, + max_priority_fee_per_gas=0, + access_list=[], + secret_key=TestPreFundingKey, + ) + ] + txs blockchain_test( pre=pre, post={}, - blocks=blocks, + blocks=[ + Block( + txs=txs, + header_verify=header_verify, + ) + ], genesis_environment=env, ) +@pytest.mark.parametrize( + "tx_access_list", + [[], [AccessList(address=100, storage_keys=[100, 200])]], + ids=["no_access_list", "access_list"], +) +@pytest.mark.parametrize("tx_max_fee_per_gas", [7, 14]) +@pytest.mark.parametrize("tx_max_priority_fee_per_gas", [0, 7]) +@pytest.mark.parametrize("tx_value", [0, 1]) +@pytest.mark.parametrize( + "tx_calldata", + [b"", b"\x01"], + ids=["no_calldata", "single_non_zero_byte_calldata"], +) +@pytest.mark.parametrize("tx_max_fee_per_blob_gas", [1, 100]) +@pytest.mark.parametrize( + "tx_gas", [500_000], ids=[""] +) # Increase gas to account for contract code +@pytest.mark.parametrize( + "mid_tx_send_amount", [100] +) # Amount sent by the contract to the sender mid execution +@pytest.mark.valid_from("Cancun") +def test_blob_gas_subtraction_tx( + state_test: StateTestFiller, + state_env: Environment, + pre: Dict, + txs: List[Transaction], + destination_account: Address, + mid_tx_send_amount: int, + total_account_transactions_fee: int, +): + """ + Check that the blob gas fee for a transaction is subtracted from the sender balance before the + transaction is executed, including: + + - Transactions with max fee equal or higher than current block base fee + - Transactions with and without value + - Transactions with and without calldata + - Transactions with max fee per blob gas lower or higher than the priority fee + - Transactions where an externally owned account sends funds to the sender mid execution + """ + assert len(txs) == 1 + pre[destination_account] = Account( + balance=mid_tx_send_amount, + code=Op.SSTORE(0, Op.BALANCE(Op.ORIGIN)) + + Op.CALL(Op.GAS, Op.ORIGIN, mid_tx_send_amount, 0, 0, 0, 0) + + Op.SSTORE(1, Op.BALANCE(Op.ORIGIN)), + ) + post = { + destination_account: Account( + storage={ + 0: pre[TestAddress].balance - total_account_transactions_fee, + 1: pre[TestAddress].balance - total_account_transactions_fee + mid_tx_send_amount, + } + ) + } + state_test( + pre=pre, + post=post, + tx=txs[0], + env=state_env, + ) + + @pytest.mark.parametrize( "blobs_per_tx", all_valid_blob_combinations(), ) @pytest.mark.parametrize("account_balance_modifier", [-1], ids=["exact_balance_minus_1"]) -@pytest.mark.parametrize("tx_error", ["insufficient account balance"], ids=[""]) +@pytest.mark.parametrize("tx_error", [TransactionException.INSUFFICIENT_ACCOUNT_FUNDS], ids=[""]) @pytest.mark.valid_from("Cancun") def test_insufficient_balance_blob_tx_combinations( blockchain_test: BlockchainTestFiller, pre: Dict, env: Environment, - blocks: List[Block], + block: Block, ): """ Reject all valid blob transaction combinations in a block, but block is invalid due to: @@ -660,25 +987,30 @@ def test_insufficient_balance_blob_tx_combinations( blockchain_test( pre=pre, post={}, - blocks=blocks, + blocks=[block], genesis_environment=env, ) @pytest.mark.parametrize( - "blobs_per_tx,tx_error,block_error", + "blobs_per_tx,tx_error", [ - ([0], "zero blob tx", "zero blob tx"), - ([SpecHelpers.max_blobs_per_block() + 1], None, "too many blobs"), + ([0], TransactionException.TYPE_3_TX_ZERO_BLOBS), + ( + [SpecHelpers.max_blobs_per_block() + 1], + TransactionException.TYPE_3_TX_BLOB_COUNT_EXCEEDED, + ), ], ids=["too_few_blobs", "too_many_blobs"], ) @pytest.mark.valid_from("Cancun") def test_invalid_tx_blob_count( - blockchain_test: BlockchainTestFiller, + state_test: StateTestFiller, + state_env: Environment, pre: Dict, - env: Environment, - blocks: List[Block], + txs: List[Transaction], + header_verify: Optional[Header], + rlp_modifier: Optional[Header], ): """ Reject blocks that include blob transactions with invalid blob counts: @@ -686,72 +1018,104 @@ def test_invalid_tx_blob_count( - `blob count == 0` in type 3 transaction - `blob count > MAX_BLOBS_PER_BLOCK` in type 3 transaction """ - blockchain_test( + assert len(txs) == 1 + state_test( pre=pre, post={}, - blocks=blocks, - genesis_environment=env, + tx=txs[0], + env=state_env, + blockchain_test_header_verify=header_verify, + blockchain_test_rlp_modifier=rlp_modifier, + ) + + +@pytest.mark.parametrize( + "blob_hashes_per_tx", + [ + [[Hash(1)]], + [[Hash(x) for x in range(2)]], + [add_kzg_version([Hash(1)], Spec.BLOB_COMMITMENT_VERSION_KZG) + [Hash(2)]], + [[Hash(1)] + add_kzg_version([Hash(2)], Spec.BLOB_COMMITMENT_VERSION_KZG)], + ], + ids=[ + "single_blob", + "multiple_blobs", + "multiple_blobs_single_bad_hash_1", + "multiple_blobs_single_bad_hash_2", + ], +) +@pytest.mark.parametrize( + "tx_error", [TransactionException.TYPE_3_TX_INVALID_BLOB_VERSIONED_HASH], ids=[""] +) +@pytest.mark.valid_from("Cancun") +def test_invalid_blob_hash_versioning_single_tx( + state_test: StateTestFiller, + state_env: Environment, + pre: Dict, + txs: List[Transaction], + header_verify: Optional[Header], + rlp_modifier: Optional[Header], +): + """ + Reject blob transactions with invalid blob hash version, including: + + - Transaction with single blob with invalid version + - Transaction with multiple blobs all with invalid version + - Transaction with multiple blobs either with invalid version + """ + assert len(txs) == 1 + state_test( + pre=pre, + post={}, + tx=txs[0], + env=state_env, + blockchain_test_header_verify=header_verify, + blockchain_test_rlp_modifier=rlp_modifier, ) @pytest.mark.parametrize( "blob_hashes_per_tx", [ - [[to_hash_bytes(1)]], - [[to_hash_bytes(x) for x in range(2)]], - [ - add_kzg_version([to_hash_bytes(1)], Spec.BLOB_COMMITMENT_VERSION_KZG) - + [to_hash_bytes(2)] - ], - [ - [to_hash_bytes(1)] - + add_kzg_version([to_hash_bytes(2)], Spec.BLOB_COMMITMENT_VERSION_KZG) - ], [ - add_kzg_version([to_hash_bytes(1)], Spec.BLOB_COMMITMENT_VERSION_KZG), - [to_hash_bytes(2)], + add_kzg_version([Hash(1)], Spec.BLOB_COMMITMENT_VERSION_KZG), + [Hash(2)], ], [ - add_kzg_version([to_hash_bytes(1)], Spec.BLOB_COMMITMENT_VERSION_KZG), - [to_hash_bytes(x) for x in range(1, 3)], + add_kzg_version([Hash(1)], Spec.BLOB_COMMITMENT_VERSION_KZG), + [Hash(x) for x in range(1, 3)], ], [ - add_kzg_version([to_hash_bytes(1)], Spec.BLOB_COMMITMENT_VERSION_KZG), - [to_hash_bytes(2)] - + add_kzg_version([to_hash_bytes(3)], Spec.BLOB_COMMITMENT_VERSION_KZG), + add_kzg_version([Hash(1)], Spec.BLOB_COMMITMENT_VERSION_KZG), + [Hash(2)] + add_kzg_version([Hash(3)], Spec.BLOB_COMMITMENT_VERSION_KZG), ], [ - add_kzg_version([to_hash_bytes(1)], Spec.BLOB_COMMITMENT_VERSION_KZG), - add_kzg_version([to_hash_bytes(2)], Spec.BLOB_COMMITMENT_VERSION_KZG), - [to_hash_bytes(3)], + add_kzg_version([Hash(1)], Spec.BLOB_COMMITMENT_VERSION_KZG), + add_kzg_version([Hash(2)], Spec.BLOB_COMMITMENT_VERSION_KZG), + [Hash(3)], ], ], ids=[ - "single_tx_single_blob", - "single_tx_multiple_blobs", - "single_tx_multiple_blobs_single_bad_hash_1", - "single_tx_multiple_blobs_single_bad_hash_2", - "multiple_txs_single_blob", - "multiple_txs_multiple_blobs", - "multiple_txs_multiple_blobs_single_bad_hash_1", - "multiple_txs_multiple_blobs_single_bad_hash_2", + "single_blob", + "multiple_blobs", + "multiple_blobs_single_bad_hash_1", + "multiple_blobs_single_bad_hash_2", ], ) -@pytest.mark.parametrize("tx_error", ["invalid blob versioned hash"], ids=[""]) +@pytest.mark.parametrize( + "tx_error", [TransactionException.TYPE_3_TX_INVALID_BLOB_VERSIONED_HASH], ids=[""] +) @pytest.mark.valid_from("Cancun") -def test_invalid_blob_hash_versioning( +def test_invalid_blob_hash_versioning_multiple_txs( blockchain_test: BlockchainTestFiller, pre: Dict, env: Environment, - blocks: List[Block], + block: Block, ): """ Reject blocks that include blob transactions with invalid blob hash version, including: - - Single blob transaction with single blob with invalid version - - Single blob transaction with multiple blobs all with invalid version - - Single blob transaction with multiple blobs either with invalid version - Multiple blob transactions with single blob all with invalid version - Multiple blob transactions with multiple blobs all with invalid version - Multiple blob transactions with multiple blobs only one with invalid version @@ -759,30 +1123,41 @@ def test_invalid_blob_hash_versioning( blockchain_test( pre=pre, post={}, - blocks=blocks, + blocks=[block], genesis_environment=env, ) @pytest.mark.parametrize( - "destination_account,tx_error", [(None, "no_contract_creating_blob_txs")], ids=[""] -) -# TODO: Uncomment after #242 -> https://github.com/ethereum/execution-spec-tests/issues/242 -@pytest.mark.skip(reason="Unable to fill due to invalid field in transaction") + "tx_gas", [500_000], ids=[""] +) # Increase gas to account for contract creation @pytest.mark.valid_from("Cancun") def test_invalid_blob_tx_contract_creation( blockchain_test: BlockchainTestFiller, pre: Dict, env: Environment, - blocks: List[Block], + txs: List[Transaction], + header_verify: Optional[Header], ): """ Reject blocks that include blob transactions that have nil to value (contract creating). """ + assert len(txs) == 1 + assert txs[0].blob_versioned_hashes is not None and len(txs[0].blob_versioned_hashes) == 1 + # Replace the transaction with a contract creating one, only in the RLP version + contract_creating_tx = replace(txs[0], to=None).with_signature_and_sender() + txs[0] = replace(txs[0], rlp=contract_creating_tx.serialized_bytes()) blockchain_test( pre=pre, post={}, - blocks=blocks, + blocks=[ + Block( + txs=txs, + exception=TransactionException.TYPE_3_TX_CONTRACT_CREATION + | BlockException.RLP_STRUCTURES_ENCODING, + header_verify=header_verify, + ) + ], genesis_environment=env, ) @@ -854,12 +1229,12 @@ def opcode( @pytest.mark.parametrize("tx_gas", [500_000]) @pytest.mark.valid_from("Cancun") def test_blob_tx_attribute_opcodes( - blockchain_test: BlockchainTestFiller, + state_test: StateTestFiller, pre: Dict, opcode: Tuple[bytes, Storage.StorageDictType], - env: Environment, - blocks: List[Block], - destination_account: str, + state_env: Environment, + txs: List[Transaction], + destination_account: Address, ): """ Test opcodes that read transaction attributes work properly for blob type transactions: @@ -867,6 +1242,7 @@ def test_blob_tx_attribute_opcodes( - ORIGIN - CALLER """ + assert len(txs) == 1 code, storage = opcode pre[destination_account] = Account(code=code) post = { @@ -874,11 +1250,11 @@ def test_blob_tx_attribute_opcodes( storage=storage, ) } - blockchain_test( + state_test( pre=pre, post=post, - blocks=blocks, - genesis_environment=env, + tx=txs[0], + env=state_env, ) @@ -887,17 +1263,18 @@ def test_blob_tx_attribute_opcodes( @pytest.mark.parametrize("tx_gas", [500_000]) @pytest.mark.valid_from("Cancun") def test_blob_tx_attribute_value_opcode( - blockchain_test: BlockchainTestFiller, + state_test: StateTestFiller, pre: Dict, opcode: Tuple[bytes, Storage.StorageDictType], - env: Environment, - blocks: List[Block], + state_env: Environment, + txs: List[Transaction], tx_value: int, - destination_account: str, + destination_account: Address, ): """ Test the VALUE opcode with different blob type transaction value amounts. """ + assert len(txs) == 1 code, storage = opcode pre[destination_account] = Account(code=code) post = { @@ -906,11 +1283,11 @@ def test_blob_tx_attribute_value_opcode( balance=tx_value, ) } - blockchain_test( + state_test( pre=pre, post=post, - blocks=blocks, - genesis_environment=env, + tx=txs[0], + env=state_env, ) @@ -935,12 +1312,12 @@ def test_blob_tx_attribute_value_opcode( @pytest.mark.parametrize("tx_gas", [500_000]) @pytest.mark.valid_from("Cancun") def test_blob_tx_attribute_calldata_opcodes( - blockchain_test: BlockchainTestFiller, + state_test: StateTestFiller, pre: Dict, opcode: Tuple[bytes, Storage.StorageDictType], - env: Environment, - blocks: List[Block], - destination_account: str, + state_env: Environment, + txs: List[Transaction], + destination_account: Address, ): """ Test calldata related opcodes to verify their behavior is not affected by blobs: @@ -949,6 +1326,7 @@ def test_blob_tx_attribute_calldata_opcodes( - CALLDATASIZE - CALLDATACOPY """ + assert len(txs) == 1 code, storage = opcode pre[destination_account] = Account(code=code) post = { @@ -956,11 +1334,11 @@ def test_blob_tx_attribute_calldata_opcodes( storage=storage, ) } - blockchain_test( + state_test( pre=pre, post=post, - blocks=blocks, - genesis_environment=env, + tx=txs[0], + env=state_env, ) @@ -971,12 +1349,12 @@ def test_blob_tx_attribute_calldata_opcodes( @pytest.mark.parametrize("tx_gas", [500_000]) @pytest.mark.valid_from("Cancun") def test_blob_tx_attribute_gasprice_opcode( - blockchain_test: BlockchainTestFiller, + state_test: StateTestFiller, pre: Dict, opcode: Tuple[bytes, Storage.StorageDictType], - env: Environment, - blocks: List[Block], - destination_account: str, + state_env: Environment, + txs: List[Transaction], + destination_account: Address, ): """ Test GASPRICE opcode to sanity check that the blob gas fee does not affect @@ -986,6 +1364,7 @@ def test_blob_tx_attribute_gasprice_opcode( - Priority fee below data fee - Priority fee above data fee """ + assert len(txs) == 1 code, storage = opcode pre[destination_account] = Account(code=code) post = { @@ -993,11 +1372,11 @@ def test_blob_tx_attribute_gasprice_opcode( storage=storage, ) } - blockchain_test( + state_test( pre=pre, post=post, - blocks=blocks, - genesis_environment=env, + tx=txs[0], + env=state_env, ) @@ -1009,16 +1388,21 @@ def test_blob_tx_attribute_gasprice_opcode( "tx_error", ], [ - ([0], None, 1, "tx type 3 not allowed pre-Cancun"), - ([1], None, 1, "tx type 3 not allowed pre-Cancun"), + ( + [0], + None, + 1, + TransactionException.TYPE_3_TX_PRE_FORK | TransactionException.TYPE_3_TX_ZERO_BLOBS, + ), + ([1], None, 1, TransactionException.TYPE_3_TX_PRE_FORK), ], ids=["no_blob_tx", "one_blob_tx"], ) @pytest.mark.valid_at_transition_to("Cancun") def test_blob_type_tx_pre_fork( - blockchain_test: BlockchainTestFiller, + state_test: StateTestFiller, pre: Dict, - blocks: List[Block], + txs: List[Transaction], ): """ Reject blocks with blob type transactions before Cancun fork. @@ -1026,9 +1410,10 @@ def test_blob_type_tx_pre_fork( Blocks sent by NewPayloadV2 (Shanghai) that contain blob type transactions, furthermore blobs field within NewPayloadV2 method must be computed as INVALID, due to an invalid block hash. """ - blockchain_test( + assert len(txs) == 1 + state_test( pre=pre, post={}, - blocks=blocks, - genesis_environment=Environment(), # `env` fixture has blob fields + tx=txs[0], + env=Environment(), # `env` fixture has blob fields ) diff --git a/tests/cancun/eip4844_blobs/test_blob_txs_full.py b/tests/cancun/eip4844_blobs/test_blob_txs_full.py index 230774f39c..faae360da9 100644 --- a/tests/cancun/eip4844_blobs/test_blob_txs_full.py +++ b/tests/cancun/eip4844_blobs/test_blob_txs_full.py @@ -1,6 +1,5 @@ """ abstract: Tests full blob type transactions for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844) - Test full blob type transactions for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844). """ # noqa: E501 @@ -10,13 +9,15 @@ from ethereum_test_tools import ( Account, + Address, Block, BlockchainTestFiller, + BlockException, Environment, Header, TestAddress, Transaction, - to_address, + TransactionException, ) from .common import INF_POINT, Blob @@ -27,9 +28,9 @@ @pytest.fixture -def destination_account() -> str: +def destination_account() -> Address: """Default destination account for the blob transactions.""" - return to_address(0x100) + return Address(0x100) @pytest.fixture @@ -160,7 +161,7 @@ def tx_max_fee_per_blob_gas( # noqa: D103 @pytest.fixture -def tx_error() -> Optional[str]: +def tx_error() -> Optional[TransactionException]: """ Even though the final block we are producing in each of these tests is invalid, and some of the transactions will be invalid due to the format in the final block, none of the transactions @@ -172,7 +173,7 @@ def tx_error() -> Optional[str]: @pytest.fixture(autouse=True) def txs( # noqa: D103 - destination_account: Optional[str], + destination_account: Optional[Address], tx_gas: int, tx_value: int, tx_calldata: bytes, @@ -180,7 +181,7 @@ def txs( # noqa: D103 tx_max_fee_per_blob_gas: int, tx_max_priority_fee_per_gas: int, txs_versioned_hashes: List[List[bytes]], - tx_error: Optional[str], + tx_error: Optional[TransactionException], txs_blobs: List[List[Blob]], txs_wrapped_blobs: List[bool], ) -> List[Transaction]: @@ -254,7 +255,11 @@ def blocks( header_blob_gas_used = 0 block_error = None if any(txs_wrapped_blobs): - block_error = "invalid transaction" + # This is a block exception because the invalid block is only created in the RLP version, + # not in the transition tool. + block_error = ( + TransactionException.TYPE_3_TX_WITH_FULL_BLOBS | BlockException.RLP_STRUCTURES_ENCODING + ) if len(txs) > 0: header_blob_gas_used = ( sum( diff --git a/tests/cancun/eip4844_blobs/test_blobhash_opcode.py b/tests/cancun/eip4844_blobs/test_blobhash_opcode.py index de88b857d7..ba2dde3b47 100644 --- a/tests/cancun/eip4844_blobs/test_blobhash_opcode.py +++ b/tests/cancun/eip4844_blobs/test_blobhash_opcode.py @@ -1,11 +1,9 @@ """ abstract: Tests `BLOBHASH` opcode in [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844) - Test cases for the `BLOBHASH` opcode in [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844). note: Adding a new test - Add a function that is named `test_` and takes at least the following arguments: - blockchain_test @@ -23,13 +21,13 @@ from ethereum_test_tools import ( Account, + Address, Block, BlockchainTestFiller, CodeGasMeasure, + Hash, TestAddress, Transaction, - to_address, - to_hash_bytes, ) from ethereum_test_tools.vm.opcode import Opcodes as Op @@ -62,7 +60,7 @@ def blocks(): # noqa: D103 @pytest.fixture def template_tx(): # noqa: D103 return Transaction( - data=to_hash_bytes(0), + data=Hash(0), gas_limit=3000000, max_fee_per_gas=10, ) @@ -120,7 +118,7 @@ def test_blobhash_gas_cost( for i in blobhash_index_values ] for i, gas_code in enumerate(gas_measures_code): - address = to_address(0x100 + i * 0x100) + address = Address(0x100 + i * 0x100) pre[address] = Account(code=gas_code) blocks.append( Block( @@ -179,7 +177,7 @@ def test_blobhash_scenarios( b_hashes_list = BlobhashScenario.create_blob_hashes_list(length=TOTAL_BLOCKS) blobhash_calls = BlobhashScenario.generate_blobhash_bytecode(scenario) for i in range(TOTAL_BLOCKS): - address = to_address(0x100 + i * 0x100) + address = Address(0x100 + i * 0x100) pre[address] = Account(code=blobhash_calls) blocks.append( Block( @@ -236,7 +234,7 @@ def test_blobhash_invalid_blob_index( TOTAL_BLOCKS = 5 blobhash_calls = BlobhashScenario.generate_blobhash_bytecode(scenario) for i in range(TOTAL_BLOCKS): - address = to_address(0x100 + i * 0x100) + address = Address(0x100 + i * 0x100) pre[address] = Account(code=blobhash_calls) blob_per_block = (i % SpecHelpers.max_blobs_per_block()) + 1 blobs = [random_blob_hashes[blob] for blob in range(blob_per_block)] @@ -288,32 +286,32 @@ def test_blobhash_multiple_txs_in_block( pre = { **pre, **{ - to_address(address): Account(code=blobhash_bytecode) + Address(address): Account(code=blobhash_bytecode) for address in range(0x100, 0x500, 0x100) }, } blocks = [ Block( txs=[ - blob_tx(address=to_address(0x100), type=3, nonce=0), - blob_tx(address=to_address(0x100), type=2, nonce=1), + blob_tx(address=Address(0x100), type=3, nonce=0), + blob_tx(address=Address(0x100), type=2, nonce=1), ] ), Block( txs=[ - blob_tx(address=to_address(0x200), type=2, nonce=2), - blob_tx(address=to_address(0x200), type=3, nonce=3), + blob_tx(address=Address(0x200), type=2, nonce=2), + blob_tx(address=Address(0x200), type=3, nonce=3), ] ), Block( txs=[ - blob_tx(address=to_address(0x300), type=2, nonce=4), - blob_tx(address=to_address(0x400), type=3, nonce=5), + blob_tx(address=Address(0x300), type=2, nonce=4), + blob_tx(address=Address(0x400), type=3, nonce=5), ], ), ] post = { - to_address(address): Account( + Address(address): Account( storage={i: random_blob_hashes[i] for i in range(SpecHelpers.max_blobs_per_block())} ) if address in (0x200, 0x400) diff --git a/tests/cancun/eip4844_blobs/test_blobhash_opcode_contexts.py b/tests/cancun/eip4844_blobs/test_blobhash_opcode_contexts.py index d7d8ec87d2..749c9a8030 100644 --- a/tests/cancun/eip4844_blobs/test_blobhash_opcode_contexts.py +++ b/tests/cancun/eip4844_blobs/test_blobhash_opcode_contexts.py @@ -1,6 +1,5 @@ """ abstract: Tests `BLOBHASH` opcode in [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844) - Test case for `BLOBHASH` opcode calls across different contexts in [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844). @@ -12,10 +11,10 @@ Account, Block, BlockchainTestFiller, + Hash, TestAddress, Transaction, YulCompiler, - to_hash_bytes, ) from .common import BlobhashContext, simple_blob_hashes @@ -30,7 +29,7 @@ # Blob transaction template tx_type_3 = Transaction( ty=Spec.BLOB_TX_TYPE, - data=to_hash_bytes(0), + data=Hash(0), gas_limit=3000000, max_fee_per_gas=10, max_priority_fee_per_gas=10, @@ -101,7 +100,7 @@ def opcode_context(yul: YulCompiler, request): ), }, tx_type_3.with_fields( - data=to_hash_bytes(2**256 - 1) + to_hash_bytes(2**256 - 1), + data=Hash(2**256 - 1) + Hash(2**256 - 1), to=BlobhashContext.address("blobhash_sstore"), ), { @@ -117,7 +116,7 @@ def opcode_context(yul: YulCompiler, request): ), }, tx_type_3.with_fields( - data=to_hash_bytes(1) + to_hash_bytes(1), + data=Hash(1) + Hash(1), to=BlobhashContext.address("call"), blob_versioned_hashes=simple_blob_hashes[:2], ), @@ -138,7 +137,7 @@ def opcode_context(yul: YulCompiler, request): ), }, tx_type_3.with_fields( - data=to_hash_bytes(0) + to_hash_bytes(SpecHelpers.max_blobs_per_block() - 1), + data=Hash(0) + Hash(SpecHelpers.max_blobs_per_block() - 1), to=BlobhashContext.address("delegatecall"), ), { @@ -160,7 +159,7 @@ def opcode_context(yul: YulCompiler, request): ), }, tx_type_3.with_fields( - data=to_hash_bytes(0) + to_hash_bytes(SpecHelpers.max_blobs_per_block() - 1), + data=Hash(0) + Hash(SpecHelpers.max_blobs_per_block() - 1), to=BlobhashContext.address("staticcall"), ), { @@ -182,7 +181,7 @@ def opcode_context(yul: YulCompiler, request): ), }, tx_type_3.with_fields( - data=to_hash_bytes(0) + to_hash_bytes(SpecHelpers.max_blobs_per_block() - 1), + data=Hash(0) + Hash(SpecHelpers.max_blobs_per_block() - 1), to=BlobhashContext.address("callcode"), ), { @@ -236,7 +235,7 @@ def opcode_context(yul: YulCompiler, request): }, Transaction( ty=2, - data=to_hash_bytes(0), + data=Hash(0), to=BlobhashContext.address("blobhash_sstore"), gas_limit=3000000, max_fee_per_gas=10, @@ -256,7 +255,7 @@ def opcode_context(yul: YulCompiler, request): }, Transaction( ty=1, - data=to_hash_bytes(0), + data=Hash(0), to=BlobhashContext.address("blobhash_sstore"), gas_limit=3000000, gas_price=10, @@ -275,7 +274,7 @@ def opcode_context(yul: YulCompiler, request): }, Transaction( ty=0, - data=to_hash_bytes(0), + data=Hash(0), to=BlobhashContext.address("blobhash_sstore"), gas_limit=3000000, gas_price=10, diff --git a/tests/cancun/eip4844_blobs/test_excess_blob_gas.py b/tests/cancun/eip4844_blobs/test_excess_blob_gas.py index df7d3bb32b..f841e79026 100644 --- a/tests/cancun/eip4844_blobs/test_excess_blob_gas.py +++ b/tests/cancun/eip4844_blobs/test_excess_blob_gas.py @@ -1,10 +1,8 @@ """ abstract: Tests `excessBlobGas` and `blobGasUsed` block fields for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844) - Test `excessBlobGas` and `blobGasUsed` block fields for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844). note: Adding a new test - Add a function that is named `test_` and takes at least the following arguments: - blockchain_test @@ -27,16 +25,19 @@ import pytest -from ethereum_test_tools import Account, Block, BlockchainTestFiller, Environment, Header -from ethereum_test_tools import Opcodes as Op from ethereum_test_tools import ( - TestAddress, - TestAddress2, - Transaction, - add_kzg_version, - to_address, - to_hash_bytes, + Account, + Address, + Block, + BlockchainTestFiller, + BlockException, + Environment, + ExceptionType, + Hash, + Header, ) +from ethereum_test_tools import Opcodes as Op +from ethereum_test_tools import TestAddress, TestAddress2, Transaction, add_kzg_version from .spec import Spec, SpecHelpers, ref_spec_4844 @@ -171,14 +172,14 @@ def destination_account_bytecode() -> bytes: # noqa: D103 @pytest.fixture -def destination_account() -> str: # noqa: D103 - return to_address(0x100) +def destination_account() -> Address: # noqa: D103 + return Address(0x100) @pytest.fixture def pre( # noqa: D103 - destination_account: str, destination_account_bytecode: bytes, tx_exact_cost: int -) -> Mapping[str, Account]: + destination_account: Address, destination_account_bytecode: bytes, tx_exact_cost: int +) -> Mapping[Address, Account]: return { TestAddress: Account(balance=tx_exact_cost), TestAddress2: Account(balance=10**40), @@ -188,8 +189,8 @@ def pre( # noqa: D103 @pytest.fixture def post( # noqa: D103 - destination_account: str, tx_value: int, block_fee_per_blob_gas: int -) -> Mapping[str, Account]: + destination_account: Address, tx_value: int, block_fee_per_blob_gas: int +) -> Mapping[Address, Account]: return { destination_account: Account( storage={0: block_fee_per_blob_gas}, @@ -204,7 +205,7 @@ def tx( # noqa: D103 tx_max_fee_per_gas: int, tx_max_fee_per_blob_gas: int, tx_gas_limit: int, - destination_account: str, + destination_account: Address, ): if new_blobs == 0: # Send a normal type two tx instead @@ -230,7 +231,7 @@ def tx( # noqa: D103 max_fee_per_blob_gas=tx_max_fee_per_blob_gas, access_list=[], blob_versioned_hashes=add_kzg_version( - [to_hash_bytes(x) for x in range(new_blobs)], + [Hash(x) for x in range(new_blobs)], Spec.BLOB_COMMITMENT_VERSION_KZG, ), ) @@ -263,7 +264,9 @@ def blocks( # noqa: D103 else [non_zero_blob_gas_used_genesis_block] ) - def add_block(header_modifier: Optional[Dict] = None, exception_message: Optional[str] = None): + def add_block( + header_modifier: Optional[Dict] = None, exception_message: Optional[ExceptionType] = None + ): """ Utility function to add a block to the blocks list. """ @@ -282,13 +285,20 @@ def add_block(header_modifier: Optional[Dict] = None, exception_message: Optiona if header_excess_blob_gas is not None: add_block( header_modifier={"excess_blob_gas": header_excess_blob_gas}, - exception_message="invalid excess blob gas", + exception_message=BlockException.INCORRECT_EXCESS_BLOB_GAS, ) elif header_blob_gas_used is not None: - add_block( - header_modifier={"blob_gas_used": header_blob_gas_used}, - exception_message="invalid blob gas used", - ) + if header_blob_gas_used > Spec.MAX_BLOB_GAS_PER_BLOCK: + add_block( + header_modifier={"blob_gas_used": header_blob_gas_used}, + exception_message=BlockException.BLOB_GAS_USED_ABOVE_LIMIT + | BlockException.INCORRECT_BLOB_GAS_USED, + ) + else: + add_block( + header_modifier={"blob_gas_used": header_blob_gas_used}, + exception_message=BlockException.INCORRECT_BLOB_GAS_USED, + ) else: add_block() @@ -301,9 +311,9 @@ def add_block(header_modifier: Optional[Dict] = None, exception_message: Optiona def test_correct_excess_blob_gas_calculation( blockchain_test: BlockchainTestFiller, env: Environment, - pre: Mapping[str, Account], + pre: Mapping[Address, Account], blocks: List[Block], - post: Mapping[str, Account], + post: Mapping[Address, Account], correct_excess_blob_gas: int, ): """ @@ -346,9 +356,9 @@ def test_correct_excess_blob_gas_calculation( def test_correct_increasing_blob_gas_costs( blockchain_test: BlockchainTestFiller, env: Environment, - pre: Mapping[str, Account], + pre: Mapping[Address, Account], blocks: List[Block], - post: Mapping[str, Account], + post: Mapping[Address, Account], correct_excess_blob_gas: int, ): """ @@ -380,9 +390,9 @@ def test_correct_increasing_blob_gas_costs( def test_correct_decreasing_blob_gas_costs( blockchain_test: BlockchainTestFiller, env: Environment, - pre: Mapping[str, Account], + pre: Mapping[Address, Account], blocks: List[Block], - post: Mapping[str, Account], + post: Mapping[Address, Account], correct_excess_blob_gas: int, ): """ @@ -406,7 +416,7 @@ def test_correct_decreasing_blob_gas_costs( def test_invalid_zero_excess_blob_gas_in_header( blockchain_test: BlockchainTestFiller, env: Environment, - pre: Mapping[str, Account], + pre: Mapping[Address, Account], blocks: List[Block], correct_excess_blob_gas: int, header_excess_blob_gas: Optional[int], @@ -455,7 +465,7 @@ def all_invalid_blob_gas_used_combinations() -> Iterator[Tuple[int, int]]: def test_invalid_blob_gas_used_in_header( blockchain_test: BlockchainTestFiller, env: Environment, - pre: Mapping[str, Account], + pre: Mapping[Address, Account], blocks: List[Block], new_blobs: int, header_blob_gas_used: Optional[int], @@ -494,7 +504,7 @@ def test_invalid_blob_gas_used_in_header( def test_invalid_excess_blob_gas_above_target_change( blockchain_test: BlockchainTestFiller, env: Environment, - pre: Mapping[str, Account], + pre: Mapping[Address, Account], blocks: List[Block], correct_excess_blob_gas: int, header_excess_blob_gas: Optional[int], @@ -538,7 +548,7 @@ def test_invalid_excess_blob_gas_above_target_change( def test_invalid_static_excess_blob_gas( blockchain_test: BlockchainTestFiller, env: Environment, - pre: Mapping[str, Account], + pre: Mapping[Address, Account], blocks: List[Block], correct_excess_blob_gas: int, parent_excess_blob_gas: int, @@ -551,7 +561,7 @@ def test_invalid_static_excess_blob_gas( """ blocks[-1].rlp_modifier = Header(excess_blob_gas=parent_excess_blob_gas) blocks[-1].header_verify = None - blocks[-1].exception = "invalid excess blob gas" + blocks[-1].exception = BlockException.INCORRECT_EXCESS_BLOB_GAS blockchain_test( pre=pre, post={}, @@ -573,7 +583,7 @@ def test_invalid_static_excess_blob_gas( def test_invalid_excess_blob_gas_target_blobs_increase_from_zero( blockchain_test: BlockchainTestFiller, env: Environment, - pre: Mapping[str, Account], + pre: Mapping[Address, Account], blocks: List[Block], correct_excess_blob_gas: int, header_excess_blob_gas: Optional[int], @@ -614,7 +624,7 @@ def test_invalid_excess_blob_gas_target_blobs_increase_from_zero( def test_invalid_static_excess_blob_gas_from_zero_on_blobs_above_target( blockchain_test: BlockchainTestFiller, env: Environment, - pre: Mapping[str, Account], + pre: Mapping[Address, Account], blocks: List[Block], correct_excess_blob_gas: int, header_excess_blob_gas: Optional[int], @@ -664,7 +674,7 @@ def test_invalid_static_excess_blob_gas_from_zero_on_blobs_above_target( def test_invalid_excess_blob_gas_change( blockchain_test: BlockchainTestFiller, env: Environment, - pre: Mapping[str, Account], + pre: Mapping[Address, Account], blocks: List[Block], correct_excess_blob_gas: int, header_excess_blob_gas: Optional[int], @@ -707,7 +717,7 @@ def test_invalid_excess_blob_gas_change( def test_invalid_negative_excess_blob_gas( blockchain_test: BlockchainTestFiller, env: Environment, - pre: Mapping[str, Account], + pre: Mapping[Address, Account], blocks: List[Block], correct_excess_blob_gas: int, header_excess_blob_gas: Optional[int], @@ -753,7 +763,7 @@ def test_invalid_negative_excess_blob_gas( def test_invalid_non_multiple_excess_blob_gas( blockchain_test: BlockchainTestFiller, env: Environment, - pre: Mapping[str, Account], + pre: Mapping[Address, Account], blocks: List[Block], correct_excess_blob_gas: int, header_excess_blob_gas: Optional[int], diff --git a/tests/cancun/eip4844_blobs/test_excess_blob_gas_fork_transition.py b/tests/cancun/eip4844_blobs/test_excess_blob_gas_fork_transition.py index 267f558a59..136078342a 100644 --- a/tests/cancun/eip4844_blobs/test_excess_blob_gas_fork_transition.py +++ b/tests/cancun/eip4844_blobs/test_excess_blob_gas_fork_transition.py @@ -1,6 +1,5 @@ """ abstract: Tests `excessBlobGas` and `blobGasUsed` block fields for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844) at fork transition. - Test `excessBlobGas` and `blobGasUsed` block fields for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844) at fork transition. """ # noqa: E501 @@ -10,16 +9,17 @@ from ethereum_test_tools import ( Account, + Address, Block, BlockchainTestFiller, + BlockException, EngineAPIError, Environment, + Hash, Header, TestAddress, Transaction, add_kzg_version, - to_address, - to_hash_bytes, ) from .spec import Spec, SpecHelpers, ref_spec_4844 @@ -41,7 +41,7 @@ def env() -> Environment: # noqa: D103 @pytest.fixture -def pre() -> Mapping[str, Account]: # noqa: D103 +def pre() -> Mapping[Address, Account]: # noqa: D103 return { TestAddress: Account(balance=10**40), } @@ -74,13 +74,13 @@ def blob_count_per_block() -> int: @pytest.fixture -def destination_account() -> str: # noqa: D103 - return to_address(0x100) +def destination_account() -> Address: # noqa: D103 + return Address(0x100) @pytest.fixture def post_fork_blocks( - destination_account: str, + destination_account: Address, post_fork_block_count: int, blob_count_per_block: int, ): @@ -101,7 +101,7 @@ def post_fork_blocks( max_fee_per_blob_gas=100, access_list=[], blob_versioned_hashes=add_kzg_version( - [to_hash_bytes(x) for x in range(blob_count_per_block)], + [Hash(x) for x in range(blob_count_per_block)], Spec.BLOB_COMMITMENT_VERSION_KZG, ), ) @@ -125,8 +125,8 @@ def post_fork_blocks( @pytest.fixture def post( # noqa: D103 post_fork_block_count: int, - destination_account: str, -) -> Mapping[str, Account]: + destination_account: Address, +) -> Mapping[Address, Account]: return { destination_account: Account(balance=post_fork_block_count), } @@ -143,7 +143,7 @@ def post( # noqa: D103 def test_invalid_pre_fork_block_with_blob_fields( blockchain_test: BlockchainTestFiller, env: Environment, - pre: Mapping[str, Account], + pre: Mapping[Address, Account], pre_fork_blocks: List[Block], excess_blob_gas_present: bool, blob_gas_used_present: bool, @@ -153,7 +153,7 @@ def test_invalid_pre_fork_block_with_blob_fields( block. Blocks sent by NewPayloadV2 (Shanghai) that contain `excessBlobGas` and `blobGasUsed` fields - must be rejected with the `-32602: Invalid params` error. + must be rejected with the appropriate `EngineAPIError.InvalidParams` error error. """ header_modifier = Header() if excess_blob_gas_present: @@ -168,7 +168,7 @@ def test_invalid_pre_fork_block_with_blob_fields( Block( timestamp=(FORK_TIMESTAMP - 1), rlp_modifier=header_modifier, - exception="invalid pre fork blob fields", + exception=BlockException.INCORRECT_BLOCK_FORMAT, engine_api_error_code=EngineAPIError.InvalidParams, ) ], @@ -188,7 +188,7 @@ def test_invalid_pre_fork_block_with_blob_fields( def test_invalid_post_fork_block_without_blob_fields( blockchain_test: BlockchainTestFiller, env: Environment, - pre: Mapping[str, Account], + pre: Mapping[Address, Account], pre_fork_blocks: List[Block], excess_blob_gas_missing: bool, blob_gas_used_missing: bool, @@ -198,7 +198,7 @@ def test_invalid_post_fork_block_without_blob_fields( post-fork block. Blocks sent by NewPayloadV3 (Cancun) without `excessBlobGas` and `blobGasUsed` fields must be - rejected with the `-32602: Invalid params` error. + rejected with the appropriate `EngineAPIError.InvalidParams` error. """ header_modifier = Header() if excess_blob_gas_missing: @@ -213,7 +213,7 @@ def test_invalid_post_fork_block_without_blob_fields( Block( timestamp=FORK_TIMESTAMP, rlp_modifier=header_modifier, - exception="blob fields missing post fork", + exception=BlockException.INCORRECT_BLOCK_FORMAT, engine_api_error_code=EngineAPIError.InvalidParams, ) ], @@ -239,10 +239,10 @@ def test_invalid_post_fork_block_without_blob_fields( def test_fork_transition_excess_blob_gas( blockchain_test: BlockchainTestFiller, env: Environment, - pre: Mapping[str, Account], + pre: Mapping[Address, Account], pre_fork_blocks: List[Block], post_fork_blocks: List[Block], - post: Mapping[str, Account], + post: Mapping[Address, Account], ): """ Test `excessBlobGas` calculation in the header when the fork is activated. diff --git a/tests/cancun/eip4844_blobs/test_point_evaluation_precompile.py b/tests/cancun/eip4844_blobs/test_point_evaluation_precompile.py index 4da73cc9e9..c2443f71d3 100644 --- a/tests/cancun/eip4844_blobs/test_point_evaluation_precompile.py +++ b/tests/cancun/eip4844_blobs/test_point_evaluation_precompile.py @@ -1,13 +1,11 @@ """ abstract: Tests point evaluation precompile for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844) - Test point evaluation precompile for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844). note: Adding a new test - Add a function that is named `test_` and takes at least the following arguments: - - blockchain_test + - blockchain_test | state_test - pre - tx - post @@ -31,20 +29,21 @@ import glob import json import os -from typing import Dict, Iterator, List +from typing import Dict, Iterator, List, Optional import pytest from ethereum_test_tools import ( Account, - Auto, + Address, Block, BlockchainTestFiller, + Environment, + StateTestFiller, Storage, TestAddress, Transaction, eip_2028_transaction_data_cost, - to_address, ) from ethereum_test_tools.vm.opcode import Opcodes as Op @@ -54,12 +53,10 @@ REFERENCE_SPEC_GIT_PATH = ref_spec_4844.git_path REFERENCE_SPEC_VERSION = ref_spec_4844.version -auto = Auto() - @pytest.fixture def precompile_input( - versioned_hash: bytes | int | Auto, + versioned_hash: Optional[bytes | int], kzg_commitment: bytes | int, z: bytes | int, y: bytes | int, @@ -76,7 +73,7 @@ def precompile_input( kzg_commitment = kzg_commitment.to_bytes(48, "big") if isinstance(kzg_proof, int): kzg_proof = kzg_proof.to_bytes(48, "big") - if isinstance(versioned_hash, Auto): + if versioned_hash is None: versioned_hash = Spec.kzg_to_versioned_hash(kzg_commitment) elif isinstance(versioned_hash, int): versioned_hash = versioned_hash.to_bytes(32, "big") @@ -156,17 +153,17 @@ def precompile_caller_account(call_type: Op, call_gas: int) -> Account: @pytest.fixture -def precompile_caller_address() -> str: +def precompile_caller_address() -> Address: """ Address of the precompile caller account. """ - return to_address(0x100) + return Address(0x100) @pytest.fixture def pre( precompile_caller_account: Account, - precompile_caller_address: str, + precompile_caller_address: Address, ) -> Dict: """ Prepares the pre state of all test cases, by setting the balance of the @@ -183,7 +180,7 @@ def pre( @pytest.fixture def tx( - precompile_caller_address: str, + precompile_caller_address: Address, precompile_input: bytes, ) -> Transaction: """ @@ -204,7 +201,7 @@ def tx( @pytest.fixture def post( success: bool, - precompile_caller_address: str, + precompile_caller_address: Address, precompile_input: bytes, ) -> Dict: """ @@ -245,13 +242,13 @@ def post( @pytest.mark.parametrize( "z,y,kzg_commitment,kzg_proof,versioned_hash", [ - pytest.param(Spec.BLS_MODULUS - 1, 0, INF_POINT, INF_POINT, auto, id="in_bounds_z"), + pytest.param(Spec.BLS_MODULUS - 1, 0, INF_POINT, INF_POINT, None, id="in_bounds_z"), ], ) @pytest.mark.parametrize("success", [True]) @pytest.mark.valid_from("Cancun") def test_valid_precompile_calls( - blockchain_test: BlockchainTestFiller, + state_test: StateTestFiller, pre: Dict, tx: Transaction, post: Dict, @@ -262,25 +259,26 @@ def test_valid_precompile_calls( - `kzg_commitment` and `kzg_proof` are set to values such that `p(z)==0` for all values of `z`, hence `y` is tested to be zero, and call to be successful. """ - blockchain_test( + state_test( + env=Environment(), pre=pre, post=post, - blocks=[Block(txs=[tx])], + tx=tx, ) @pytest.mark.parametrize( "z,y,kzg_commitment,kzg_proof,versioned_hash", [ - (Spec.BLS_MODULUS, 0, INF_POINT, INF_POINT, auto), - (0, Spec.BLS_MODULUS, INF_POINT, INF_POINT, auto), - (Z, 0, INF_POINT, INF_POINT[:-1], auto), - (Z, 0, INF_POINT, INF_POINT[0:1], auto), - (Z, 0, INF_POINT, INF_POINT + bytes([0]), auto), - (Z, 0, INF_POINT, INF_POINT + bytes([0] * 1023), auto), + (Spec.BLS_MODULUS, 0, INF_POINT, INF_POINT, None), + (0, Spec.BLS_MODULUS, INF_POINT, INF_POINT, None), + (Z, 0, INF_POINT, INF_POINT[:-1], None), + (Z, 0, INF_POINT, INF_POINT[0:1], None), + (Z, 0, INF_POINT, INF_POINT + bytes([0]), None), + (Z, 0, INF_POINT, INF_POINT + bytes([0] * 1023), None), (bytes(), bytes(), bytes(), bytes(), bytes()), (0, 0, 0, 0, 0), - (0, 0, 0, 0, auto), + (0, 0, 0, 0, None), (Z, 0, INF_POINT, INF_POINT, Spec.kzg_to_versioned_hash(0xC0 << 376, 0x00)), (Z, 0, INF_POINT, INF_POINT, Spec.kzg_to_versioned_hash(0xC0 << 376, 0x02)), (Z, 0, INF_POINT, INF_POINT, Spec.kzg_to_versioned_hash(0xC0 << 376, 0xFF)), @@ -303,7 +301,7 @@ def test_valid_precompile_calls( @pytest.mark.parametrize("success", [False]) @pytest.mark.valid_from("Cancun") def test_invalid_precompile_calls( - blockchain_test: BlockchainTestFiller, + state_test: StateTestFiller, pre: Dict, tx: Transaction, post: Dict, @@ -317,10 +315,11 @@ def test_invalid_precompile_calls( - Zero inputs - Correct proof, commitment, z and y, but incorrect version versioned hash """ - blockchain_test( + state_test( + env=Environment(), pre=pre, post=post, - blocks=[Block(txs=[tx])], + tx=tx, ) @@ -417,10 +416,10 @@ def all_external_vectors() -> List: "z,y,kzg_commitment,kzg_proof,success", all_external_vectors(), ) -@pytest.mark.parametrize("versioned_hash", [auto]) +@pytest.mark.parametrize("versioned_hash", [None]) @pytest.mark.valid_from("Cancun") def test_point_evaluation_precompile_external_vectors( - blockchain_test: BlockchainTestFiller, + state_test: StateTestFiller, pre: Dict, tx: Transaction, post: Dict, @@ -431,10 +430,11 @@ def test_point_evaluation_precompile_external_vectors( - `go_kzg_4844_verify_kzg_proof.json`: test vectors from the [go-kzg-4844](https://github.com/crate-crypto/go-kzg-4844) repository. """ - blockchain_test( + state_test( + env=Environment(), pre=pre, post=post, - blocks=[Block(txs=[tx])], + tx=tx, ) @@ -458,12 +458,12 @@ def test_point_evaluation_precompile_external_vectors( ) @pytest.mark.parametrize( "z,kzg_commitment,kzg_proof,versioned_hash", - [[Z, INF_POINT, INF_POINT, auto]], + [[Z, INF_POINT, INF_POINT, None]], ids=[""], ) @pytest.mark.valid_from("Cancun") def test_point_evaluation_precompile_calls( - blockchain_test: BlockchainTestFiller, + state_test: StateTestFiller, pre: Dict, tx: Transaction, post: Dict, @@ -476,10 +476,11 @@ def test_point_evaluation_precompile_calls( - Using correct and incorrect proofs - Using barely insufficient gas """ - blockchain_test( + state_test( + env=Environment(), pre=pre, post=post, - blocks=[Block(txs=[tx])], + tx=tx, ) @@ -495,14 +496,14 @@ def test_point_evaluation_precompile_calls( @pytest.mark.parametrize( "z,y,kzg_commitment,kzg_proof,versioned_hash,proof_correct", [ - [Z, 0, INF_POINT, INF_POINT, auto, True], - [Z, 1, INF_POINT, INF_POINT, auto, False], + [Z, 0, INF_POINT, INF_POINT, None, True], + [Z, 1, INF_POINT, INF_POINT, None, False], ], ids=["correct_proof", "incorrect_proof"], ) @pytest.mark.valid_from("Cancun") def test_point_evaluation_precompile_gas_tx_to( - blockchain_test: BlockchainTestFiller, + state_test: StateTestFiller, precompile_input: bytes, call_gas: int, proof_correct: bool, @@ -540,7 +541,7 @@ def test_point_evaluation_precompile_gas_tx_to( ty=2, nonce=0, data=precompile_input, - to=to_address(Spec.POINT_EVALUATION_PRECOMPILE_ADDRESS), + to=Address(Spec.POINT_EVALUATION_PRECOMPILE_ADDRESS), value=0, gas_limit=call_gas + intrinsic_gas_cost, max_fee_per_gas=7, @@ -554,20 +555,80 @@ def test_point_evaluation_precompile_gas_tx_to( ) } - blockchain_test( + state_test( + env=Environment(), pre=pre, post=post, - blocks=[Block(txs=[tx])], + tx=tx, ) @pytest.mark.parametrize( "z,y,kzg_commitment,kzg_proof,versioned_hash", - [[Z, 0, INF_POINT, INF_POINT, auto]], + [[Z, 0, INF_POINT, INF_POINT, None]], ids=["correct_proof"], ) @pytest.mark.valid_at_transition_to("Cancun") def test_point_evaluation_precompile_before_fork( + state_test: StateTestFiller, + pre: Dict, + tx: Transaction, +): + """ + Test calling the Point Evaluation Precompile before the appropriate fork. + """ + precompile_caller_code = Op.SSTORE( + Op.NUMBER, + Op.CALL( + Op.GAS, + Spec.POINT_EVALUATION_PRECOMPILE_ADDRESS, + 1, # Value + 0, # Zero-length calldata + 0, + 0, # Zero-length return + 0, + ), + ) + precompile_caller_address = Address(0x100) + + pre = { + TestAddress: Account( + nonce=0, + balance=0x10**18, + ), + precompile_caller_address: Account( + nonce=0, + code=precompile_caller_code, + balance=0x10**18, + ), + } + + post = { + precompile_caller_address: Account( + storage={1: 1}, + # The call succeeds because precompile is not there yet + ), + Address(Spec.POINT_EVALUATION_PRECOMPILE_ADDRESS): Account( + balance=1, + ), + } + + state_test( + tag="point_evaluation_precompile_before_fork", + pre=pre, + env=Environment(timestamp=7_500), + post=post, + tx=tx, + ) + + +@pytest.mark.parametrize( + "z,y,kzg_commitment,kzg_proof,versioned_hash", + [[Z, 0, INF_POINT, INF_POINT, None]], + ids=["correct_proof"], +) +@pytest.mark.valid_at_transition_to("Cancun") +def test_point_evaluation_precompile_during_fork( blockchain_test: BlockchainTestFiller, pre: Dict, tx: Transaction, @@ -587,7 +648,7 @@ def test_point_evaluation_precompile_before_fork( 0, ), ) - precompile_caller_address = to_address(0x100) + precompile_caller_address = Address(0x100) pre = { TestAddress: Account( @@ -620,9 +681,9 @@ def tx_generator() -> Iterator[Transaction]: post = { precompile_caller_address: Account( storage={b: 1 for b in range(1, len(PRE_FORK_BLOCK_RANGE) + 1)}, - # The tx in last block succeeds; storage 0 by default. + # Only the call in the last block's tx fails; storage 0 by default. ), - to_address(Spec.POINT_EVALUATION_PRECOMPILE_ADDRESS): Account( + Address(Spec.POINT_EVALUATION_PRECOMPILE_ADDRESS): Account( balance=len(PRE_FORK_BLOCK_RANGE), ), } diff --git a/tests/cancun/eip4844_blobs/test_point_evaluation_precompile_gas.py b/tests/cancun/eip4844_blobs/test_point_evaluation_precompile_gas.py index efd324b3ff..8d8ccf5475 100644 --- a/tests/cancun/eip4844_blobs/test_point_evaluation_precompile_gas.py +++ b/tests/cancun/eip4844_blobs/test_point_evaluation_precompile_gas.py @@ -1,6 +1,5 @@ """ abstract: Tests gas usage on point evaluation precompile for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844) - Test gas usage on point evaluation precompile for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844). """ # noqa: E501 @@ -10,13 +9,13 @@ from ethereum_test_tools import ( Account, - Block, - BlockchainTestFiller, + Address, CodeGasMeasure, + Environment, + StateTestFiller, TestAddress, Transaction, copy_opcode_cost, - to_address, ) from ethereum_test_tools.vm.opcode import Opcodes as Op @@ -125,17 +124,17 @@ def precompile_caller_account( @pytest.fixture -def precompile_caller_address() -> str: +def precompile_caller_address() -> Address: """ Address of the precompile caller account. """ - return to_address(0x100) + return Address(0x100) @pytest.fixture def pre( precompile_caller_account: Account, - precompile_caller_address: str, + precompile_caller_address: Address, ) -> Dict: """ Prepares the pre state of all test cases, by setting the balance of the @@ -152,7 +151,7 @@ def pre( @pytest.fixture def tx( - precompile_caller_address: str, + precompile_caller_address: Address, precompile_input: bytes, ) -> Transaction: """ @@ -172,7 +171,7 @@ def tx( @pytest.fixture def post( - precompile_caller_address: str, + precompile_caller_address: Address, proof: Literal["correct", "incorrect"], call_gas: int, ) -> Dict: @@ -213,7 +212,7 @@ def post( @pytest.mark.parametrize("proof", ["correct", "incorrect"]) @pytest.mark.valid_from("Cancun") def test_point_evaluation_precompile_gas_usage( - blockchain_test: BlockchainTestFiller, + state_test: StateTestFiller, pre: Dict, tx: Transaction, post: Dict, @@ -225,8 +224,9 @@ def test_point_evaluation_precompile_gas_usage( - Test using different gas limits (exact gas, insufficient gas, extra gas) - Test using correct and incorrect proofs """ - blockchain_test( + state_test( + env=Environment(), pre=pre, post=post, - blocks=[Block(txs=[tx])], + tx=tx, ) diff --git a/tests/cancun/eip5656_mcopy/test_mcopy.py b/tests/cancun/eip5656_mcopy/test_mcopy.py index 52187ccb5d..1a88ce438b 100644 --- a/tests/cancun/eip5656_mcopy/test_mcopy.py +++ b/tests/cancun/eip5656_mcopy/test_mcopy.py @@ -1,6 +1,5 @@ """ abstract: Tests [EIP-5656: MCOPY - Memory copying instruction](https://eips.ethereum.org/EIPS/eip-5656) - Test copy operations of [EIP-5656: MCOPY - Memory copying instruction](https://eips.ethereum.org/EIPS/eip-5656) """ # noqa: E501 @@ -9,7 +8,7 @@ import pytest from ethereum.crypto.hash import keccak256 -from ethereum_test_tools import Account, Environment +from ethereum_test_tools import Account, Environment, Hash from ethereum_test_tools import Opcodes as Op from ethereum_test_tools import ( StateTestFiller, @@ -17,7 +16,6 @@ TestAddress, Transaction, ceiling_division, - to_hash_bytes, ) from .common import REFERENCE_SPEC_GIT_PATH, REFERENCE_SPEC_VERSION, mcopy @@ -117,7 +115,7 @@ def pre(bytecode_storage: Tuple[bytes, Storage]) -> Mapping: # noqa: D103 def tx(dest: int, src: int, length: int) -> Transaction: # noqa: D103 return Transaction( to=code_address, - data=to_hash_bytes(dest) + to_hash_bytes(src) + to_hash_bytes(length), + data=Hash(dest) + Hash(src) + Hash(length), gas_limit=1_000_000, ) @@ -197,7 +195,7 @@ def test_valid_mcopy_operations( env=Environment(), pre=pre, post=post, - txs=[tx], + tx=tx, ) @@ -219,5 +217,5 @@ def test_mcopy_on_empty_memory( env=Environment(), pre=pre, post=post, - txs=[tx], + tx=tx, ) diff --git a/tests/cancun/eip5656_mcopy/test_mcopy_contexts.py b/tests/cancun/eip5656_mcopy/test_mcopy_contexts.py index ac69389986..19eba65954 100644 --- a/tests/cancun/eip5656_mcopy/test_mcopy_contexts.py +++ b/tests/cancun/eip5656_mcopy/test_mcopy_contexts.py @@ -1,6 +1,5 @@ """ abstract: Tests [EIP-5656: MCOPY - Memory copying instruction](https://eips.ethereum.org/EIPS/eip-5656) - Test memory copy under different call contexts [EIP-5656: MCOPY - Memory copying instruction](https://eips.ethereum.org/EIPS/eip-5656) """ # noqa: E501 @@ -214,5 +213,5 @@ def test_no_memory_corruption_on_upper_call_stack_levels( env=Environment(), pre=pre, post=post, - txs=[tx], + tx=tx, ) diff --git a/tests/cancun/eip5656_mcopy/test_mcopy_memory_expansion.py b/tests/cancun/eip5656_mcopy/test_mcopy_memory_expansion.py index 8de58d9f5d..4eb1b7b000 100644 --- a/tests/cancun/eip5656_mcopy/test_mcopy_memory_expansion.py +++ b/tests/cancun/eip5656_mcopy/test_mcopy_memory_expansion.py @@ -1,6 +1,5 @@ """ abstract: Tests [EIP-5656: MCOPY - Memory copying instruction](https://eips.ethereum.org/EIPS/eip-5656) - Test copy operations of [EIP-5656: MCOPY - Memory copying instruction](https://eips.ethereum.org/EIPS/eip-5656) that produce a memory expansion, and potentially an out-of-gas error. @@ -215,7 +214,7 @@ def test_mcopy_memory_expansion( env=Environment(), pre=pre, post=post, - txs=[tx], + tx=tx, ) @@ -276,5 +275,5 @@ def test_mcopy_huge_memory_expansion( env=Environment(), pre=pre, post=post, - txs=[tx], + tx=tx, ) diff --git a/tests/cancun/eip6780_selfdestruct/test_dynamic_create2_selfdestruct_collision.py b/tests/cancun/eip6780_selfdestruct/test_dynamic_create2_selfdestruct_collision.py new file mode 100644 index 0000000000..1303e859ea --- /dev/null +++ b/tests/cancun/eip6780_selfdestruct/test_dynamic_create2_selfdestruct_collision.py @@ -0,0 +1,705 @@ +""" +Suicide scenario requested test +https://github.com/ethereum/execution-spec-tests/issues/381 +""" + +from itertools import count +from typing import Dict, Union + +import pytest + +from ethereum_test_forks import Cancun, Fork +from ethereum_test_tools import ( + Account, + Address, + Block, + BlockchainTestFiller, + Conditional, + Environment, + Initcode, + StateTestFiller, + TestAddress, + Transaction, + compute_create2_address, +) +from ethereum_test_tools.vm.opcode import Opcodes as Op + +REFERENCE_SPEC_GIT_PATH = "EIPS/eip-6780.md" +REFERENCE_SPEC_VERSION = "2f8299df31bb8173618901a03a8366a3183479b0" + + +@pytest.fixture +def env(): # noqa: D103 + return Environment( + coinbase="0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + difficulty=0x020000, + gas_limit=71794957647893862, + number=1, + timestamp=1000, + ) + + +@pytest.mark.valid_from("Paris") +@pytest.mark.parametrize( + "create2_dest_already_in_state", + (True, False), +) +@pytest.mark.parametrize( + "call_create2_contract_in_between,call_create2_contract_at_the_end", + [ + (True, True), + (True, False), + (False, True), + ], +) +def test_dynamic_create2_selfdestruct_collision( + env: Environment, + fork: Fork, + create2_dest_already_in_state: bool, + call_create2_contract_in_between: bool, + call_create2_contract_at_the_end: bool, + state_test: StateTestFiller, +): + """Dynamic Create2->Suicide->Create2 collision scenario: + + Perform a CREATE2, make sure that the initcode sets at least a couple of storage keys, + then on a different call, in the same tx, perform a self-destruct. + Then: + a) on the same tx, attempt to recreate the contract <=== Covered in this test + 1) and create2 contract already in the state + 2) and create2 contract is not in the state + b) on a different tx, attempt to recreate the contract + Perform a CREATE2, make sure that the initcode sets at least a couple of storage keys, + then in a different tx, perform a self-destruct. + Then: + a) on the same tx, attempt to recreate the contract + b) on a different tx, attempt to recreate the contract + Verify that the test case described + in https://wiki.hyperledger.org/pages/viewpage.action?pageId=117440824 is covered + """ + assert call_create2_contract_in_between or call_create2_contract_at_the_end, "invalid test" + + # Storage locations + create2_constructor_worked = 1 + first_create2_result = 2 + second_create2_result = 3 + code_worked = 4 + + # Pre-Existing Addresses + address_zero = Address(0x00) + address_to = Address(0x0600) + address_code = Address(0x0601) + address_create2_storage = Address(0x0512) + sendall_destination = Address(0x03E8) + + # CREATE2 Initcode + create2_salt = 1 + deploy_code = Op.SELFDESTRUCT(sendall_destination) + initcode = Initcode( + deploy_code=deploy_code, + initcode_prefix=Op.SSTORE(create2_constructor_worked, 1) + + Op.CALL(Op.GAS(), address_create2_storage, 0, 0, 0, 0, 0), + ) + + # Created addresses + create2_address = compute_create2_address(address_code, create2_salt, initcode) + call_address_in_between = create2_address if call_create2_contract_in_between else address_zero + call_address_in_the_end = create2_address if call_create2_contract_at_the_end else address_zero + + # Values + pre_existing_create2_balance = 1 + first_create2_value = 10 + first_call_value = 100 + second_create2_value = 1000 + second_call_value = 10000 + + pre = { + address_to: Account( + balance=100000000, + nonce=0, + code=Op.JUMPDEST() + # Make a subcall that do CREATE2 and returns its the result + + Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE()) + + Op.CALL(100000, address_code, first_create2_value, 0, Op.CALLDATASIZE(), 0, 32) + + Op.SSTORE( + first_create2_result, + Op.MLOAD(0), + ) + # In case the create2 didn't work, flush account balance + + Op.CALL(100000, address_code, 0, 0, 0, 0, 0) + # Call to the created account to trigger selfdestruct + + Op.CALL(100000, call_address_in_between, first_call_value, 0, 0, 0, 0) + # Make a subcall that do CREATE2 collision and returns its address as the result + + Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE()) + + Op.CALL(100000, address_code, second_create2_value, 0, Op.CALLDATASIZE(), 0, 32) + + Op.SSTORE( + second_create2_result, + Op.MLOAD(0), + ) + # Call to the created account to trigger selfdestruct + + Op.CALL(100000, call_address_in_the_end, second_call_value, 0, 0, 0, 0) + + Op.SSTORE(code_worked, 1), + storage={first_create2_result: 0xFF, second_create2_result: 0xFF}, + ), + address_code: Account( + balance=0, + nonce=0, + code=Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE()) + + Op.MSTORE( + 0, + Op.CREATE2(Op.SELFBALANCE(), 0, Op.CALLDATASIZE(), create2_salt), + ) + + Op.RETURN(0, 32), + storage={}, + ), + address_create2_storage: Account( + balance=7000000000000000000, + nonce=0, + code=Op.SSTORE(1, 1), + storage={}, + ), + TestAddress: Account( + balance=7000000000000000000, + nonce=0, + code="0x", + storage={}, + ), + } + + if create2_dest_already_in_state: + # Create2 address already in the state, e.g. deployed in a previous block + pre[create2_address] = Account( + balance=pre_existing_create2_balance, + nonce=1, + code=deploy_code, + storage={}, + ) + + post: Dict[Address, Union[Account, object]] = {} + + # Create2 address only exists if it was pre-existing and after cancun + post[create2_address] = ( + Account(balance=0, nonce=1, code=deploy_code, storage={create2_constructor_worked: 0x00}) + if create2_dest_already_in_state and fork >= Cancun + else Account.NONEXISTENT + ) + + # Create2 initcode is only executed if the contract did not already exist + post[address_create2_storage] = Account( + storage={create2_constructor_worked: int(not create2_dest_already_in_state)} + ) + + # Entry code that makes the calls to the create2 contract creator + post[address_to] = Account( + storage={ + code_worked: 0x01, + # First create2 only works if the contract was not preexisting + first_create2_result: 0x00 if create2_dest_already_in_state else create2_address, + # Second create2 must never work + second_create2_result: 0x00, + } + ) + + # Calculate the destination account expected balance for the selfdestruct/sendall calls + sendall_destination_balance = ( + pre_existing_create2_balance if create2_dest_already_in_state else first_create2_value + ) + + if call_create2_contract_in_between: + sendall_destination_balance += first_call_value + + if call_create2_contract_at_the_end: + sendall_destination_balance += second_call_value + + post[sendall_destination] = Account(balance=sendall_destination_balance) + + tx = Transaction( + ty=0x0, + chain_id=0x0, + nonce=0, + to=address_to, + gas_price=10, + protected=False, + data=initcode.bytecode if initcode.bytecode is not None else bytes(), + gas_limit=5000000, + value=0, + ) + + state_test(env=env, pre=pre, post=post, tx=tx) + + +@pytest.mark.valid_from("Paris") +@pytest.mark.parametrize( + "create2_dest_already_in_state", + (True, False), +) +@pytest.mark.parametrize( + "call_create2_contract_at_the_end", + [ + (True, False), + ], +) +def test_dynamic_create2_selfdestruct_collision_two_different_transactions( + env: Environment, + fork: Fork, + create2_dest_already_in_state: bool, + call_create2_contract_at_the_end: bool, + blockchain_test: BlockchainTestFiller, +): + """Dynamic Create2->Suicide->Create2 collision scenario: + + Perform a CREATE2, make sure that the initcode sets at least a couple of storage keys, + then on a different call, in the same tx, perform a self-destruct. + Then: + a) on the same tx, attempt to recreate the contract + 1) and create2 contract already in the state + 2) and create2 contract is not in the state + b) on a different tx, attempt to recreate the contract <=== Covered in this test + Perform a CREATE2, make sure that the initcode sets at least a couple of storage keys, + then in a different tx, perform a self-destruct. + Then: + a) on the same tx, attempt to recreate the contract + b) on a different tx, attempt to recreate the contract + Verify that the test case described + in https://wiki.hyperledger.org/pages/viewpage.action?pageId=117440824 is covered + """ + # assert call_create2_contract_at_the_end, "invalid test" + + # Storage locations + create2_constructor_worked = 1 + first_create2_result = 2 + second_create2_result = 3 + code_worked = 4 + + # Pre-Existing Addresses + address_zero = Address(0x00) + address_to = Address(0x0600) + address_to_second = Address(0x0700) + address_code = Address(0x0601) + address_create2_storage = Address(0x0512) + sendall_destination = Address(0x03E8) + + # CREATE2 Initcode + create2_salt = 1 + deploy_code = Op.SELFDESTRUCT(sendall_destination) + initcode = Initcode( + deploy_code=deploy_code, + initcode_prefix=Op.SSTORE(create2_constructor_worked, 1) + + Op.CALL(Op.GAS(), address_create2_storage, 0, 0, 0, 0, 0), + ) + + # Created addresses + create2_address = compute_create2_address(address_code, create2_salt, initcode) + call_address_in_the_end = create2_address if call_create2_contract_at_the_end else address_zero + + # Values + pre_existing_create2_balance = 1 + first_create2_value = 10 + first_call_value = 100 + second_create2_value = 1000 + second_call_value = 10000 + + pre = { + address_to: Account( + balance=100000000, + nonce=0, + code=Op.JUMPDEST() + # Make a subcall that do CREATE2 and returns its the result + + Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE()) + + Op.CALL(100000, address_code, first_create2_value, 0, Op.CALLDATASIZE(), 0, 32) + + Op.SSTORE( + first_create2_result, + Op.MLOAD(0), + ) + # In case the create2 didn't work, flush account balance + + Op.CALL(100000, address_code, 0, 0, 0, 0, 0) + # Call to the created account to trigger selfdestruct + + Op.CALL(100000, create2_address, first_call_value, 0, 0, 0, 0) + + Op.SSTORE(code_worked, 1), + storage={first_create2_result: 0xFF}, + ), + address_to_second: Account( + balance=100000000, + nonce=0, + code=Op.JUMPDEST() + # Make a subcall that do CREATE2 collision and returns its address as the result + + Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE()) + + Op.CALL(100000, address_code, second_create2_value, 0, Op.CALLDATASIZE(), 0, 32) + + Op.SSTORE( + second_create2_result, + Op.MLOAD(0), + ) + # Call to the created account to trigger selfdestruct + + Op.CALL(200000, call_address_in_the_end, second_call_value, 0, 0, 0, 0) + + Op.SSTORE(code_worked, 1), + storage={second_create2_result: 0xFF}, + ), + address_code: Account( + balance=0, + nonce=0, + code=Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE()) + + Op.MSTORE( + 0, + Op.CREATE2(Op.SELFBALANCE(), 0, Op.CALLDATASIZE(), create2_salt), + ) + + Op.RETURN(0, 32), + storage={}, + ), + address_create2_storage: Account( + balance=7000000000000000000, + nonce=0, + code=Op.SSTORE(1, 1), + storage={}, + ), + TestAddress: Account( + balance=7000000000000000000, + nonce=0, + code="0x", + storage={}, + ), + } + + if create2_dest_already_in_state: + # Create2 address already in the state, e.g. deployed in a previous block + pre[create2_address] = Account( + balance=pre_existing_create2_balance, + nonce=1, + code=deploy_code, + storage={}, + ) + + post: Dict[Address, Union[Account, object]] = {} + + # Create2 address only exists if it was pre-existing and after cancun + post[create2_address] = ( + Account(balance=0, nonce=1, code=deploy_code, storage={create2_constructor_worked: 0x00}) + if create2_dest_already_in_state and fork >= Cancun + else Account.NONEXISTENT + ) + + # after Cancun Create2 initcode is only executed if the contract did not already exist + # and before it will always be executed as the first tx deletes the account + post[address_create2_storage] = Account( + storage={ + create2_constructor_worked: int(fork < Cancun or not create2_dest_already_in_state) + } + ) + + # Entry code that makes the calls to the create2 contract creator + post[address_to] = Account( + storage={ + code_worked: 0x01, + # First create2 only works if the contract was not preexisting + first_create2_result: 0x00 if create2_dest_already_in_state else create2_address, + } + ) + post[address_to_second] = Account( + storage={ + code_worked: 0x01, + # Second create2 will not collide before Cancun as the first tx calls selfdestruct + # After cancun it will collide only if create2_dest_already_in_state otherwise the + # first tx creates and deletes it + second_create2_result: ( + (0x00 if create2_dest_already_in_state else create2_address) + if fork >= Cancun + else create2_address + ), + } + ) + + # Calculate the destination account expected balance for the selfdestruct/sendall calls + sendall_destination_balance = 0 + + if create2_dest_already_in_state: + sendall_destination_balance += pre_existing_create2_balance + if fork >= Cancun: + # first create2 fails, but first calls ok. the account is not removed on cancun + # therefor with the second create2 it is not successful + sendall_destination_balance += first_call_value + else: + # first create2 fails, first calls totally removes the account + # in the second transaction second create2 is successful + sendall_destination_balance += first_call_value + second_create2_value + else: + # if no account in the state, first create2 successful, first call successful and removes + # because it is removed in the next transaction second create2 successful + sendall_destination_balance = first_create2_value + first_call_value + second_create2_value + + if call_create2_contract_at_the_end: + sendall_destination_balance += second_call_value + + post[sendall_destination] = Account(balance=sendall_destination_balance) + + nonce = count() + + blockchain_test( + genesis_environment=Environment(), + pre=pre, + post=post, + blocks=[ + Block( + txs=[ + Transaction( + ty=0x0, + chain_id=0x0, + nonce=next(nonce), + to=address_to, + gas_price=10, + protected=False, + data=initcode.bytecode if initcode.bytecode is not None else bytes(), + gas_limit=5000000, + value=0, + ), + Transaction( + ty=0x0, + chain_id=0x0, + nonce=next(nonce), + to=address_to_second, + gas_price=10, + protected=False, + data=initcode.bytecode if initcode.bytecode is not None else bytes(), + gas_limit=5000000, + value=0, + ), + ] + ) + ], + ) + + +@pytest.mark.valid_from("Paris") +@pytest.mark.parametrize( + "selfdestruct_on_first_tx,recreate_on_first_tx", + [ + (False, False), + (True, False), + (True, True), + ], +) +def test_dynamic_create2_selfdestruct_collision_multi_tx( + fork: Fork, + selfdestruct_on_first_tx: bool, + recreate_on_first_tx: bool, + blockchain_test: BlockchainTestFiller, +): + """Dynamic Create2->Suicide->Create2 collision scenario over multiple transactions: + + Perform a CREATE2, make sure that the initcode sets at least a couple of storage keys, + then on a different call, in the same or different tx but same block, perform a self-destruct. + Then: + a) on the same tx, attempt to recreate the contract + b) on a different tx, attempt to recreate the contract + Perform a CREATE2, make sure that the initcode sets at least a couple of storage keys, + then in a different tx, perform a self-destruct. + Then: + a) on the same tx, attempt to recreate the contract <=== Covered in this test + b) on a different tx, attempt to recreate the contract <=== Covered in this test + Verify that the test case described + in https://wiki.hyperledger.org/pages/viewpage.action?pageId=117440824 is covered + """ + if recreate_on_first_tx: + assert selfdestruct_on_first_tx, "invalid test" + + # Storage locations + create2_constructor_worked = 1 + first_create2_result = 2 + second_create2_result = 3 + part_1_worked = 4 + part_2_worked = 5 + + # Pre-Existing Addresses + address_to = Address(0x0600) + address_code = Address(0x0601) + address_create2_storage = Address(0x0512) + sendall_destination = Address(0x03E8) + + # CREATE2 Initcode + create2_salt = 1 + deploy_code = Op.SELFDESTRUCT(sendall_destination) + initcode = Initcode( + deploy_code=deploy_code, + initcode_prefix=Op.SSTORE(create2_constructor_worked, 1) + + Op.CALL(Op.GAS(), address_create2_storage, 0, 0, 0, 0, 0), + ) + + # Created addresses + create2_address = compute_create2_address(address_code, create2_salt, initcode) + + # Values + first_create2_value = 3 + first_call_value = 5 + second_create2_value = 7 + second_call_value = 11 + + # Code is divided in two transactions part of the same block + first_tx_code = bytes() + second_tx_code = bytes() + + first_tx_code += ( + Op.JUMPDEST() + # Make a subcall that do CREATE2 and returns its the result + + Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE()) + + Op.CALL(100000, address_code, first_create2_value, 0, Op.CALLDATASIZE(), 0, 32) + + Op.SSTORE( + first_create2_result, + Op.MLOAD(0), + ) + ) + + if selfdestruct_on_first_tx: + first_tx_code += ( + # Call to the created account to trigger selfdestruct + Op.CALL(100000, create2_address, first_call_value, 0, 0, 0, 0) + ) + else: + second_tx_code += ( + # Call to the created account to trigger selfdestruct + Op.CALL(100000, create2_address, first_call_value, 0, 0, 0, 0) + ) + + if recreate_on_first_tx: + first_tx_code += ( + # Make a subcall that do CREATE2 collision and returns its address as the result + Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE()) + + Op.CALL(100000, address_code, second_create2_value, 0, Op.CALLDATASIZE(), 0, 32) + + Op.SSTORE( + second_create2_result, + Op.MLOAD(0), + ) + ) + + else: + second_tx_code += ( + # Make a subcall that do CREATE2 collision and returns its address as the result + Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE()) + + Op.CALL(100000, address_code, second_create2_value, 0, Op.CALLDATASIZE(), 0, 32) + + Op.SSTORE( + second_create2_result, + Op.MLOAD(0), + ) + ) + + # Second tx code always calls the create2 contract at the end + second_tx_code += Op.CALL(100000, create2_address, second_call_value, 0, 0, 0, 0) + + first_tx_code += Op.SSTORE(part_1_worked, 1) + second_tx_code += Op.SSTORE(part_2_worked, 1) + + pre = { + address_to: Account( + balance=100000000, + nonce=0, + code=Conditional( + # Depending on the tx, execute the first or second tx code + condition=Op.EQ(Op.SLOAD(part_1_worked), 0), + if_true=first_tx_code, + if_false=second_tx_code, + ), + storage={first_create2_result: 0xFF, second_create2_result: 0xFF}, + ), + address_code: Account( + balance=0, + nonce=0, + code=Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE()) + + Op.MSTORE( + 0, + Op.CREATE2(Op.SELFBALANCE(), 0, Op.CALLDATASIZE(), create2_salt), + ) + + Op.RETURN(0, 32), + storage={}, + ), + address_create2_storage: Account( + balance=7000000000000000000, + nonce=0, + code=Op.SSTORE(1, 1), + storage={}, + ), + TestAddress: Account( + balance=7000000000000000000, + nonce=0, + code="0x", + storage={}, + ), + } + + post: Dict[Address, Union[Account, object]] = {} + + # Create2 address only exists if it was pre-existing and after cancun + account_will_exist_with_code = not selfdestruct_on_first_tx and fork >= Cancun + # If the contract is self-destructed and we also attempt to recreate it on the first tx, + # the second call on the second tx will only place balance in the account + account_will_exist_with_balance = selfdestruct_on_first_tx and recreate_on_first_tx + + post[create2_address] = ( + Account(balance=0, nonce=1, code=deploy_code, storage={create2_constructor_worked: 0x01}) + if account_will_exist_with_code + else ( + Account(balance=second_call_value, nonce=0) + if account_will_exist_with_balance + else Account.NONEXISTENT + ) + ) + + # Create2 initcode saves storage unconditionally + post[address_create2_storage] = Account(storage={create2_constructor_worked: 0x01}) + + # Entry code that makes the calls to the create2 contract creator + post[address_to] = Account( + storage={ + part_1_worked: 0x01, + part_2_worked: 0x01, + # First create2 always works + first_create2_result: create2_address, + # Second create2 only works if we successfully self-destructed on the first tx + second_create2_result: ( + create2_address if selfdestruct_on_first_tx and not recreate_on_first_tx else 0x00 + ), + } + ) + + # Calculate the destination account expected balance for the selfdestruct/sendall calls + sendall_destination_balance = first_create2_value + first_call_value + + if not account_will_exist_with_balance: + sendall_destination_balance += second_call_value + + if selfdestruct_on_first_tx and not recreate_on_first_tx: + sendall_destination_balance += second_create2_value + + post[sendall_destination] = Account(balance=sendall_destination_balance) + + nonce = count() + + blockchain_test( + genesis_environment=Environment(), + pre=pre, + post=post, + blocks=[ + Block( + txs=[ + Transaction( + ty=0x0, + chain_id=0x0, + nonce=next(nonce), + to=address_to, + gas_price=10, + protected=False, + data=initcode.bytecode if initcode.bytecode is not None else bytes(), + gas_limit=5000000, + value=0, + ), + Transaction( + ty=0x0, + chain_id=0x0, + nonce=next(nonce), + to=address_to, + gas_price=10, + protected=False, + data=initcode.bytecode if initcode.bytecode is not None else bytes(), + gas_limit=5000000, + value=0, + ), + ] + ) + ], + ) diff --git a/tests/cancun/eip6780_selfdestruct/test_reentrancy_selfdestruct_revert.py b/tests/cancun/eip6780_selfdestruct/test_reentrancy_selfdestruct_revert.py new file mode 100644 index 0000000000..d5661cfe90 --- /dev/null +++ b/tests/cancun/eip6780_selfdestruct/test_reentrancy_selfdestruct_revert.py @@ -0,0 +1,157 @@ +""" +Suicide scenario requested test +https://github.com/ethereum/tests/issues/1325 +""" + +import pytest + +from ethereum_test_forks import Cancun, Fork +from ethereum_test_tools import ( + Account, + Address, + Environment, + StateTestFiller, + TestAddress, + TestAddress2, + Transaction, +) +from ethereum_test_tools.vm.opcode import Opcodes as Op + +REFERENCE_SPEC_GIT_PATH = "EIPS/eip-6780.md" +REFERENCE_SPEC_VERSION = "2f8299df31bb8173618901a03a8366a3183479b0" + + +@pytest.fixture +def env(): # noqa: D103 + return Environment( + coinbase="0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + difficulty=0x020000, + gas_limit=71794957647893862, + number=1, + timestamp=1000, + ) + + +@pytest.mark.valid_from("Paris") +@pytest.mark.parametrize("first_suicide", [Op.CALL, Op.CALLCODE, Op.DELEGATECALL]) +@pytest.mark.parametrize("second_suicide", [Op.CALL, Op.CALLCODE, Op.DELEGATECALL]) +def test_reentrancy_selfdestruct_revert( + env: Environment, + fork: Fork, + first_suicide: Op, + second_suicide: Op, + state_test: StateTestFiller, +): + """ + Suicide reentrancy scenario: + + Call|Callcode|Delegatecall the contract S. + S self destructs. + Call the revert proxy contract R. + R Calls|Callcode|Delegatecall S. + S self destructs (for the second time). + R reverts (including the effects of the second selfdestruct). + It is expected the S is self destructed after the transaction. + """ + address_to = TestAddress2 + address_s = Address(0x1000000000000000000000000000000000000001) + address_r = Address(0x1000000000000000000000000000000000000002) + suicide_d = Address(0x03E8) + + def construct_call_s(call_type: Op, money: int): + if call_type in [Op.CALLCODE, Op.CALL]: + return call_type(Op.GAS, address_s, money, 0, 0, 0, 0) + else: + return call_type(Op.GAS, address_s, money, 0, 0, 0) + + pre = { + address_to: Account( + balance=1000000000000000000, + nonce=0, + code=Op.SSTORE(1, construct_call_s(first_suicide, 0)) + + Op.SSTORE(2, Op.CALL(Op.GAS, address_r, 0, 0, 0, 0, 0)) + + Op.RETURNDATACOPY(0, 0, Op.RETURNDATASIZE()) + + Op.SSTORE(3, Op.MLOAD(0)), + storage={0x01: 0x0100, 0x02: 0x0100, 0x03: 0x0100}, + ), + address_s: Account( + balance=3000000000000000000, + nonce=0, + code=Op.SELFDESTRUCT(1000), + storage={}, + ), + address_r: Account( + balance=5000000000000000000, + nonce=0, + # Send money when calling it suicide second time to make sure the funds not transferred + code=Op.MSTORE(0, Op.ADD(15, construct_call_s(second_suicide, 100))) + + Op.REVERT(0, 32), + storage={}, + ), + TestAddress: Account( + balance=7000000000000000000, + nonce=0, + code="0x", + storage={}, + ), + } + + post = { + # Second caller unchanged as call gets reverted + address_r: Account(balance=5000000000000000000, storage={}), + } + + if first_suicide in [Op.CALLCODE, Op.DELEGATECALL]: + if fork >= Cancun: + # On Cancun even callcode/delegatecall does not remove the account, so the value remain + post[address_to] = Account( + storage={ + 0x01: 0x01, # First call to contract S->suicide success + 0x02: 0x00, # Second call to contract S->suicide reverted + 0x03: 16, # Reverted value to check that revert really worked + }, + ) + else: + # Callcode executed first suicide from sender. sender is deleted + post[address_to] = Account.NONEXISTENT # type: ignore + + # Original suicide account remains in state + post[address_s] = Account(balance=3000000000000000000, storage={}) + # Suicide destination + post[suicide_d] = Account( + balance=1000000000000000000, + ) + + # On Cancun suicide no longer destroys the account from state, just cleans the balance + if first_suicide in [Op.CALL]: + post[address_to] = Account( + storage={ + 0x01: 0x01, # First call to contract S->suicide success + 0x02: 0x00, # Second call to contract S->suicide reverted + 0x03: 16, # Reverted value to check that revert really worked + }, + ) + if fork >= Cancun: + # On Cancun suicide does not remove the account, just sends the balance + post[address_s] = Account(balance=0, code="0x6103e8ff", storage={}) + else: + post[address_s] = Account.NONEXISTENT # type: ignore + + # Suicide destination + post[suicide_d] = Account( + balance=3000000000000000000, + ) + + tx = Transaction( + ty=0x0, + chain_id=0x0, + nonce=0, + to=address_to, + gas_price=10, + protected=False, + data="", + gas_limit=500000, + value=0, + ) + + state_test(env=env, pre=pre, post=post, tx=tx) diff --git a/tests/cancun/eip6780_selfdestruct/test_selfdestruct.py b/tests/cancun/eip6780_selfdestruct/test_selfdestruct.py index bcfa98a505..9e323f19a9 100644 --- a/tests/cancun/eip6780_selfdestruct/test_selfdestruct.py +++ b/tests/cancun/eip6780_selfdestruct/test_selfdestruct.py @@ -1,6 +1,5 @@ """ abstract: Tests [EIP-6780: SELFDESTRUCT only in same transaction](https://eips.ethereum.org/EIPS/eip-6780) - Tests for [EIP-6780: SELFDESTRUCT only in same transaction](https://eips.ethereum.org/EIPS/eip-6780). """ # noqa: E501 @@ -11,12 +10,14 @@ import pytest from ethereum.crypto.hash import keccak256 -from ethereum_test_forks import Cancun, Fork, is_fork +from ethereum_test_forks import Cancun, Fork from ethereum_test_tools import ( Account, + Address, Block, BlockchainTestFiller, Environment, + Hash, Initcode, StateTestFiller, Storage, @@ -25,8 +26,6 @@ YulCompiler, compute_create2_address, compute_create_address, - to_address, - to_hash_bytes, ) from ethereum_test_tools.vm.opcode import Opcodes as Op @@ -35,22 +34,22 @@ SELFDESTRUCT_ENABLE_FORK = Cancun -PRE_EXISTING_SELFDESTRUCT_ADDRESS = "0x1111111111111111111111111111111111111111" +PRE_EXISTING_SELFDESTRUCT_ADDRESS = Address("0x1111111111111111111111111111111111111111") """ Address of a pre-existing contract that self-destructs. """ # Sentinel value to indicate that the self-destructing contract address should be used, only for # use in `pytest.mark.parametrize`, not for use within the test method itself. -SELF_ADDRESS = "0x1" +SELF_ADDRESS = Address(0x01) # Sentinel value to indicate that the contract should not self-destruct. -NO_SELFDESTRUCT = "0x0" +NO_SELFDESTRUCT = Address(0x00) @pytest.fixture def eip_enabled(fork: Fork) -> bool: """Whether the EIP is enabled or not.""" - return is_fork(fork, SELFDESTRUCT_ENABLE_FORK) + return fork >= SELFDESTRUCT_ENABLE_FORK @pytest.fixture @@ -62,14 +61,14 @@ def env() -> Environment: @pytest.fixture -def sendall_recipient_addresses() -> List[str]: +def sendall_recipient_addresses() -> List[Address]: """List of possible addresses that can receive a SENDALL operation.""" - return [to_address(0x1234)] + return [Address(0x1234)] def selfdestruct_code_preset( *, - sendall_recipient_addresses: List[str], + sendall_recipient_addresses: List[Address], yul: YulCompiler, ) -> SupportsBytes: """Return a bytecode that self-destructs.""" @@ -99,7 +98,9 @@ def selfdestruct_code_preset( assert sendall_recipient != NO_SELFDESTRUCT, "test error" if sendall_recipient == SELF_ADDRESS: # Use the self address of the contract we are creating - sendall_recipient = "address()" + # sendall_recipient = "address()" + # TODO: Fix this + pass return yul( f""" {{ @@ -113,7 +114,7 @@ def selfdestruct_code_preset( @pytest.fixture def selfdestruct_code( - sendall_recipient_addresses: List[str], + sendall_recipient_addresses: List[Address], yul: YulCompiler, ) -> SupportsBytes: """ @@ -147,13 +148,13 @@ def selfdestruct_contract_initcode( @pytest.fixture -def initcode_copy_from_address() -> str: +def initcode_copy_from_address() -> Address: """Address of a pre-existing contract we use to simply copy initcode from.""" - return to_address(0xABCD) + return Address(0xABCD) @pytest.fixture -def entry_code_address() -> str: +def entry_code_address() -> Address: """Address where the entry code will run.""" return compute_create_address(TestAddress, 0) @@ -161,9 +162,9 @@ def entry_code_address() -> str: @pytest.fixture def selfdestruct_contract_address( create_opcode: Op, - entry_code_address: str, + entry_code_address: Address, selfdestruct_contract_initcode: SupportsBytes, -) -> str: +) -> Address: """Returns the address of the self-destructing contract.""" if create_opcode == Op.CREATE: return compute_create_address(entry_code_address, 1) @@ -176,13 +177,13 @@ def selfdestruct_contract_address( @pytest.fixture def pre( - initcode_copy_from_address: str, + initcode_copy_from_address: Address, selfdestruct_contract_initcode: SupportsBytes, - selfdestruct_contract_address: str, + selfdestruct_contract_address: Address, selfdestruct_contract_initial_balance: int, - sendall_recipient_addresses: List[str], + sendall_recipient_addresses: List[Address], yul: YulCompiler, -) -> Dict[str, Account]: +) -> Dict[Address, Account]: """Pre-state of all tests""" pre = { TestAddress: Account(balance=100_000_000_000_000_000_000), @@ -228,7 +229,7 @@ def pre( [ pytest.param( 1, - [to_address(0x1000)], + [Address(0x1000)], id="single_call", ), pytest.param( @@ -237,30 +238,40 @@ def pre( id="single_call_self", ), pytest.param( - 10, - [to_address(0x1000)], + 2, + [Address(0x1000)], id="multiple_calls_single_sendall_recipient", ), pytest.param( - 10, - [to_address(0x1000), to_address(0x2000), to_address(0x3000)], - id="multiple_calls_multiple_sendall_recipients", + 2, + [SELF_ADDRESS], + id="multiple_calls_single_self_recipient", ), pytest.param( - 10, - [SELF_ADDRESS, to_address(0x2000), to_address(0x3000)], - id="multiple_calls_multiple_sendall_recipients_including_self", + 3, + [Address(0x1000), Address(0x2000), Address(0x3000)], + id="multiple_calls_multiple_sendall_recipients", ), pytest.param( - 10, - [to_address(0x1000), to_address(0x2000), SELF_ADDRESS], - id="multiple_calls_multiple_sendall_recipients_including_self_different_order", + 3, + [SELF_ADDRESS, Address(0x2000), Address(0x3000)], + id="multiple_calls_multiple_sendall_recipients_including_self", ), pytest.param( 3, - [to_address(0x1000), to_address(0x2000), SELF_ADDRESS], + [Address(0x1000), Address(0x2000), SELF_ADDRESS], id="multiple_calls_multiple_sendall_recipients_including_self_last", ), + pytest.param( + 6, + [SELF_ADDRESS, Address(0x2000), Address(0x3000)], + id="multiple_calls_multiple_repeating_sendall_recipients_including_self", + ), + pytest.param( + 6, + [Address(0x1000), Address(0x2000), SELF_ADDRESS], + id="multiple_calls_multiple_repeating_sendall_recipients_including_self_last", + ), ], ) @pytest.mark.parametrize("selfdestruct_contract_initial_balance", [0, 100_000]) @@ -268,13 +279,13 @@ def pre( def test_create_selfdestruct_same_tx( state_test: StateTestFiller, env: Environment, - pre: Dict[str, Account], - entry_code_address: str, + pre: Dict[Address, Account], + entry_code_address: Address, selfdestruct_code: SupportsBytes, selfdestruct_contract_initcode: SupportsBytes, - selfdestruct_contract_address: str, - sendall_recipient_addresses: List[str], - initcode_copy_from_address: str, + selfdestruct_contract_address: Address, + sendall_recipient_addresses: List[Address], + initcode_copy_from_address: Address, create_opcode: Op, call_times: int, selfdestruct_contract_initial_balance: int, @@ -314,7 +325,7 @@ def test_create_selfdestruct_same_tx( entry_code = ( # Initcode is already deployed at `initcode_copy_from_address`, so just copy it Op.EXTCODECOPY( - Op.PUSH20(initcode_copy_from_address), + initcode_copy_from_address, 0, 0, len(bytes(selfdestruct_contract_initcode)), @@ -329,23 +340,23 @@ def test_create_selfdestruct_same_tx( # Store the EXTCODE* properties of the created address entry_code += Op.SSTORE( entry_code_storage.store_next(len(bytes(selfdestruct_code))), - Op.EXTCODESIZE(Op.PUSH20(selfdestruct_contract_address)), + Op.EXTCODESIZE(selfdestruct_contract_address), ) entry_code += Op.SSTORE( entry_code_storage.store_next(keccak256(bytes(selfdestruct_code))), - Op.EXTCODEHASH(Op.PUSH20(selfdestruct_contract_address)), + Op.EXTCODEHASH(selfdestruct_contract_address), ) # Call the self-destructing contract multiple times as required, increasing the wei sent each # time for i, sendall_recipient in zip(range(call_times), cycle(sendall_recipient_addresses)): - entry_code += Op.MSTORE(0, Op.PUSH20(sendall_recipient)) + entry_code += Op.MSTORE(0, sendall_recipient) entry_code += Op.SSTORE( entry_code_storage.store_next(1), Op.CALL( Op.GASLIMIT, # Gas - Op.PUSH20(selfdestruct_contract_address), # Address + selfdestruct_contract_address, # Address i, # Value 0, 32, @@ -365,25 +376,25 @@ def test_create_selfdestruct_same_tx( entry_code += Op.SSTORE( entry_code_storage.store_next(0), - Op.BALANCE(Op.PUSH20(selfdestruct_contract_address)), + Op.BALANCE(selfdestruct_contract_address), ) # Check the EXTCODE* properties of the self-destructing contract again entry_code += Op.SSTORE( entry_code_storage.store_next(len(bytes(selfdestruct_code))), - Op.EXTCODESIZE(Op.PUSH20(selfdestruct_contract_address)), + Op.EXTCODESIZE(selfdestruct_contract_address), ) entry_code += Op.SSTORE( entry_code_storage.store_next(keccak256(bytes(selfdestruct_code))), - Op.EXTCODEHASH(Op.PUSH20(selfdestruct_contract_address)), + Op.EXTCODEHASH(selfdestruct_contract_address), ) # Lastly return zero so the entry point contract is created and we can retain the stored # values for verification. entry_code += Op.RETURN(max(len(bytes(selfdestruct_contract_initcode)), 32), 1) - post: Dict[str, Account] = { + post: Dict[Address, Account] = { entry_code_address: Account( code="0x00", storage=entry_code_storage, @@ -412,7 +423,7 @@ def test_create_selfdestruct_same_tx( protected=False, ) - state_test(env=env, pre=pre, post=post, txs=[tx]) + state_test(env=env, pre=pre, post=post, tx=tx) @pytest.mark.parametrize("create_opcode", [Op.CREATE, Op.CREATE2]) @@ -423,12 +434,12 @@ def test_create_selfdestruct_same_tx( def test_self_destructing_initcode( state_test: StateTestFiller, env: Environment, - pre: Dict[str, Account], - entry_code_address: str, + pre: Dict[Address, Account], + entry_code_address: Address, selfdestruct_contract_initcode: SupportsBytes, - selfdestruct_contract_address: str, - sendall_recipient_addresses: List[str], - initcode_copy_from_address: str, + selfdestruct_contract_address: Address, + sendall_recipient_addresses: List[Address], + initcode_copy_from_address: Address, create_opcode: Op, call_times: int, # Number of times to call the self-destructing contract in the same tx selfdestruct_contract_initial_balance: int, @@ -462,7 +473,7 @@ def test_self_destructing_initcode( entry_code = ( # Initcode is already deployed at `initcode_copy_from_address`, so just copy it Op.EXTCODECOPY( - Op.PUSH20(initcode_copy_from_address), + initcode_copy_from_address, 0, 0, len(bytes(selfdestruct_contract_initcode)), @@ -477,12 +488,12 @@ def test_self_destructing_initcode( # Store the EXTCODE* properties of the created address entry_code += Op.SSTORE( entry_code_storage.store_next(0), - Op.EXTCODESIZE(Op.PUSH20(selfdestruct_contract_address)), + Op.EXTCODESIZE(selfdestruct_contract_address), ) entry_code += Op.SSTORE( entry_code_storage.store_next(keccak256(bytes())), - Op.EXTCODEHASH(Op.PUSH20(selfdestruct_contract_address)), + Op.EXTCODEHASH(selfdestruct_contract_address), ) # Call the self-destructing contract multiple times as required, increasing the wei sent each @@ -492,7 +503,7 @@ def test_self_destructing_initcode( entry_code_storage.store_next(1), Op.CALL( Op.GASLIMIT, # Gas - Op.PUSH20(selfdestruct_contract_address), # Address + selfdestruct_contract_address, # Address i, # Value 0, 0, @@ -503,7 +514,7 @@ def test_self_destructing_initcode( entry_code += Op.SSTORE( entry_code_storage.store_next(0), - Op.BALANCE(Op.PUSH20(selfdestruct_contract_address)), + Op.BALANCE(selfdestruct_contract_address), ) # Lastly return zero so the entry point contract is created and we can retain the stored @@ -515,7 +526,7 @@ def test_self_destructing_initcode( # which must be included in the send-all operation sendall_amount += selfdestruct_contract_initial_balance - post: Dict[str, Account] = { + post: Dict[Address, Account] = { entry_code_address: Account( code="0x00", storage=entry_code_storage, @@ -540,7 +551,7 @@ def test_self_destructing_initcode( protected=False, ) - state_test(env=env, pre=pre, post=post, txs=[tx]) + state_test(env=env, pre=pre, post=post, tx=tx) @pytest.mark.parametrize("tx_value", [0, 100_000]) @@ -551,13 +562,13 @@ def test_self_destructing_initcode( def test_self_destructing_initcode_create_tx( state_test: StateTestFiller, env: Environment, - pre: Dict[str, Account], + pre: Dict[Address, Account], tx_value: int, - entry_code_address: str, + entry_code_address: Address, selfdestruct_contract_initcode: SupportsBytes, - selfdestruct_contract_address: str, - sendall_recipient_addresses: List[str], - initcode_copy_from_address: str, + selfdestruct_contract_address: Address, + sendall_recipient_addresses: List[Address], + initcode_copy_from_address: Address, selfdestruct_contract_initial_balance: int, ): """ @@ -574,7 +585,7 @@ def test_self_destructing_initcode_create_tx( # Our entry point is an initcode that in turn creates a self-destructing contract sendall_amount = selfdestruct_contract_initial_balance + tx_value - post: Dict[str, Account] = { + post: Dict[Address, Account] = { selfdestruct_contract_address: Account.NONEXISTENT, # type: ignore initcode_copy_from_address: Account( code=selfdestruct_contract_initcode, @@ -595,7 +606,7 @@ def test_self_destructing_initcode_create_tx( protected=False, ) - state_test(env=env, pre=pre, post=post, txs=[tx]) + state_test(env=env, pre=pre, post=post, tx=tx) @pytest.mark.parametrize("create_opcode", [Op.CREATE2]) # Can only recreate using CREATE2 @@ -603,7 +614,7 @@ def test_self_destructing_initcode_create_tx( "sendall_recipient_addresses", [ pytest.param( - [to_address(0x1000)], + [Address(0x1000)], id="selfdestruct_other_address", ), pytest.param( @@ -619,13 +630,13 @@ def test_self_destructing_initcode_create_tx( def test_recreate_self_destructed_contract_different_txs( blockchain_test: BlockchainTestFiller, env: Environment, - pre: Dict[str, Account], - entry_code_address: str, + pre: Dict[Address, Account], + entry_code_address: Address, selfdestruct_contract_initcode: SupportsBytes, - selfdestruct_contract_address: str, + selfdestruct_contract_address: Address, selfdestruct_contract_initial_balance: int, - sendall_recipient_addresses: List[str], - initcode_copy_from_address: str, + sendall_recipient_addresses: List[Address], + initcode_copy_from_address: Address, create_opcode: Op, recreate_times: int, # Number of times to recreate the contract in different transactions call_times: int, # Number of times to call the self-destructing contract in the same tx @@ -651,7 +662,7 @@ def test_recreate_self_destructed_contract_different_txs( entry_code = ( # Initcode is already deployed at initcode_copy_from_address, so just copy it Op.EXTCODECOPY( - Op.PUSH20(initcode_copy_from_address), + initcode_copy_from_address, 0, 0, len(bytes(selfdestruct_contract_initcode)), @@ -665,7 +676,7 @@ def test_recreate_self_destructed_contract_different_txs( for i in range(call_times): entry_code += Op.CALL( Op.GASLIMIT, - Op.PUSH20(selfdestruct_contract_address), + selfdestruct_contract_address, i, 0, 0, @@ -682,7 +693,7 @@ def test_recreate_self_destructed_contract_different_txs( txs.append( Transaction( ty=0x0, - data=to_hash_bytes(i), + data=Hash(i), chain_id=0x0, nonce=next(nonce), to=entry_code_address, @@ -694,7 +705,7 @@ def test_recreate_self_destructed_contract_different_txs( entry_code_storage[i] = selfdestruct_contract_address pre[entry_code_address] = Account(code=entry_code) - post: Dict[str, Account] = { + post: Dict[Address, Account] = { entry_code_address: Account( code=entry_code, storage=entry_code_storage, @@ -715,39 +726,49 @@ def test_recreate_self_destructed_contract_different_txs( [ pytest.param( 1, - [to_address(0x1000)], + [Address(0x1000)], id="single_call", ), pytest.param( 1, [PRE_EXISTING_SELFDESTRUCT_ADDRESS], - id="single_call_self_sendall_recipient", + id="single_call_self", ), pytest.param( - 10, - [to_address(0x1000)], + 2, + [Address(0x1000)], id="multiple_calls_single_sendall_recipient", ), pytest.param( - 10, - [to_address(0x1000), to_address(0x2000), to_address(0x3000)], - id="multiple_calls_multiple_sendall_recipients", + 2, + [PRE_EXISTING_SELFDESTRUCT_ADDRESS], + id="multiple_calls_single_self_recipient", ), pytest.param( - 10, - [PRE_EXISTING_SELFDESTRUCT_ADDRESS, to_address(0x2000), to_address(0x3000)], - id="multiple_calls_multiple_sendall_recipients_including_self", + 3, + [Address(0x1000), Address(0x2000), Address(0x3000)], + id="multiple_calls_multiple_sendall_recipients", ), pytest.param( - 10, - [to_address(0x1000), to_address(0x2000), PRE_EXISTING_SELFDESTRUCT_ADDRESS], - id="multiple_calls_multiple_sendall_recipients_including_self_different_order", + 3, + [PRE_EXISTING_SELFDESTRUCT_ADDRESS, Address(0x2000), Address(0x3000)], + id="multiple_calls_multiple_sendall_recipients_including_self", ), pytest.param( 3, - [to_address(0x1000), to_address(0x2000), PRE_EXISTING_SELFDESTRUCT_ADDRESS], + [Address(0x1000), Address(0x2000), PRE_EXISTING_SELFDESTRUCT_ADDRESS], id="multiple_calls_multiple_sendall_recipients_including_self_last", ), + pytest.param( + 6, + [PRE_EXISTING_SELFDESTRUCT_ADDRESS, Address(0x2000), Address(0x3000)], + id="multiple_calls_multiple_repeating_sendall_recipients_including_self", + ), + pytest.param( + 6, + [Address(0x1000), Address(0x2000), PRE_EXISTING_SELFDESTRUCT_ADDRESS], + id="multiple_calls_multiple_repeating_sendall_recipients_including_self_last", + ), ], ) @pytest.mark.parametrize("selfdestruct_contract_initial_balance", [0, 100_000]) @@ -759,12 +780,12 @@ def test_selfdestruct_pre_existing( state_test: StateTestFiller, eip_enabled: bool, env: Environment, - pre: Dict[str, Account], - entry_code_address: str, - selfdestruct_contract_address: str, + pre: Dict[Address, Account], + entry_code_address: Address, + selfdestruct_contract_address: Address, selfdestruct_code: SupportsBytes, selfdestruct_contract_initial_balance: int, - sendall_recipient_addresses: List[str], + sendall_recipient_addresses: List[Address], call_times: int, ): """ @@ -793,12 +814,12 @@ def test_selfdestruct_pre_existing( # Call the self-destructing contract multiple times as required, increasing the wei sent each # time for i, sendall_recipient in zip(range(call_times), cycle(sendall_recipient_addresses)): - entry_code += Op.MSTORE(0, Op.PUSH20(sendall_recipient)) + entry_code += Op.MSTORE(0, sendall_recipient) entry_code += Op.SSTORE( entry_code_storage.store_next(1), Op.CALL( Op.GASLIMIT, # Gas - Op.PUSH20(selfdestruct_contract_address), # Address + selfdestruct_contract_address, # Address i, # Value 0, 32, @@ -819,25 +840,25 @@ def test_selfdestruct_pre_existing( entry_code += Op.SSTORE( entry_code_storage.store_next(selfdestruct_contract_current_balance), - Op.BALANCE(Op.PUSH20(selfdestruct_contract_address)), + Op.BALANCE(selfdestruct_contract_address), ) # Check the EXTCODE* properties of the self-destructing contract entry_code += Op.SSTORE( entry_code_storage.store_next(len(bytes(selfdestruct_code))), - Op.EXTCODESIZE(Op.PUSH20(selfdestruct_contract_address)), + Op.EXTCODESIZE(selfdestruct_contract_address), ) entry_code += Op.SSTORE( entry_code_storage.store_next(keccak256(bytes(selfdestruct_code))), - Op.EXTCODEHASH(Op.PUSH20(selfdestruct_contract_address)), + Op.EXTCODEHASH(selfdestruct_contract_address), ) # Lastly return zero so the entry point contract is created and we can retain the stored # values for verification. entry_code += Op.RETURN(32, 1) - post: Dict[str, Account] = { + post: Dict[Address, Account] = { entry_code_address: Account( code="0x00", storage=entry_code_storage, @@ -872,7 +893,7 @@ def test_selfdestruct_pre_existing( protected=False, ) - state_test(env=env, pre=pre, post=post, txs=[tx]) + state_test(env=env, pre=pre, post=post, tx=tx) @pytest.mark.parametrize("selfdestruct_contract_initial_balance", [0, 1]) @@ -886,13 +907,13 @@ def test_selfdestruct_created_same_block_different_tx( blockchain_test: BlockchainTestFiller, eip_enabled: bool, env: Environment, - pre: Dict[str, Account], - entry_code_address: str, - selfdestruct_contract_address: str, + pre: Dict[Address, Account], + entry_code_address: Address, + selfdestruct_contract_address: Address, selfdestruct_code: SupportsBytes, selfdestruct_contract_initcode: SupportsBytes, selfdestruct_contract_initial_balance: int, - sendall_recipient_addresses: List[str], + sendall_recipient_addresses: List[Address], call_times: int, ): """ @@ -913,7 +934,7 @@ def test_selfdestruct_created_same_block_different_tx( entry_code_storage.store_next(1), Op.CALL( Op.GASLIMIT, # Gas - Op.PUSH20(selfdestruct_contract_address), # Address + selfdestruct_contract_address, # Address i, # Value 0, 0, @@ -926,25 +947,25 @@ def test_selfdestruct_created_same_block_different_tx( entry_code += Op.SSTORE( entry_code_storage.store_next(0), - Op.BALANCE(Op.PUSH20(selfdestruct_contract_address)), + Op.BALANCE(selfdestruct_contract_address), ) # Check the EXTCODE* properties of the self-destructing contract entry_code += Op.SSTORE( entry_code_storage.store_next(len(bytes(selfdestruct_code))), - Op.EXTCODESIZE(Op.PUSH20(selfdestruct_contract_address)), + Op.EXTCODESIZE(selfdestruct_contract_address), ) entry_code += Op.SSTORE( entry_code_storage.store_next(keccak256(bytes(selfdestruct_code))), - Op.EXTCODEHASH(Op.PUSH20(selfdestruct_contract_address)), + Op.EXTCODEHASH(selfdestruct_contract_address), ) # Lastly return zero so the entry point contract is created and we can retain the stored # values for verification. entry_code += Op.RETURN(32, 1) - post: Dict[str, Account] = { + post: Dict[Address, Account] = { entry_code_address: Account( code="0x00", storage=entry_code_storage, @@ -994,7 +1015,7 @@ def test_selfdestruct_created_same_block_different_tx( pytest.param( Op.DELEGATECALL( Op.GAS, - Op.PUSH20(PRE_EXISTING_SELFDESTRUCT_ADDRESS), + PRE_EXISTING_SELFDESTRUCT_ADDRESS, 0, 0, 0, @@ -1005,7 +1026,7 @@ def test_selfdestruct_created_same_block_different_tx( pytest.param( Op.CALLCODE( Op.GAS, - Op.PUSH20(PRE_EXISTING_SELFDESTRUCT_ADDRESS), + PRE_EXISTING_SELFDESTRUCT_ADDRESS, 0, 0, 0, @@ -1023,13 +1044,13 @@ def test_selfdestruct_created_same_block_different_tx( def test_delegatecall_from_new_contract_to_pre_existing_contract( state_test: StateTestFiller, env: Environment, - pre: Dict[str, Account], - entry_code_address: str, + pre: Dict[Address, Account], + entry_code_address: Address, selfdestruct_code: SupportsBytes, selfdestruct_contract_initcode: SupportsBytes, - selfdestruct_contract_address: str, - sendall_recipient_addresses: List[str], - initcode_copy_from_address: str, + selfdestruct_contract_address: Address, + sendall_recipient_addresses: List[Address], + initcode_copy_from_address: Address, create_opcode: Op, call_times: int, selfdestruct_contract_initial_balance: int, @@ -1057,7 +1078,7 @@ def test_delegatecall_from_new_contract_to_pre_existing_contract( entry_code = ( # Initcode is already deployed at `initcode_copy_from_address`, so just copy it Op.EXTCODECOPY( - Op.PUSH20(initcode_copy_from_address), + initcode_copy_from_address, 0, 0, len(bytes(selfdestruct_contract_initcode)), @@ -1072,12 +1093,12 @@ def test_delegatecall_from_new_contract_to_pre_existing_contract( # Store the EXTCODE* properties of the created address entry_code += Op.SSTORE( entry_code_storage.store_next(len(bytes(selfdestruct_code))), - Op.EXTCODESIZE(Op.PUSH20(selfdestruct_contract_address)), + Op.EXTCODESIZE(selfdestruct_contract_address), ) entry_code += Op.SSTORE( entry_code_storage.store_next(keccak256(bytes(selfdestruct_code))), - Op.EXTCODEHASH(Op.PUSH20(selfdestruct_contract_address)), + Op.EXTCODEHASH(selfdestruct_contract_address), ) # Call the self-destructing contract multiple times as required, increasing the wei sent each @@ -1087,7 +1108,7 @@ def test_delegatecall_from_new_contract_to_pre_existing_contract( entry_code_storage.store_next(1), Op.CALL( Op.GASLIMIT, # Gas - Op.PUSH20(selfdestruct_contract_address), # Address + selfdestruct_contract_address, # Address i, # Value 0, 0, @@ -1100,18 +1121,18 @@ def test_delegatecall_from_new_contract_to_pre_existing_contract( entry_code += Op.SSTORE( entry_code_storage.store_next(0), - Op.BALANCE(Op.PUSH20(selfdestruct_contract_address)), + Op.BALANCE(selfdestruct_contract_address), ) # Check the EXTCODE* properties of the self-destructing contract again entry_code += Op.SSTORE( entry_code_storage.store_next(len(bytes(selfdestruct_code))), - Op.EXTCODESIZE(Op.PUSH20(selfdestruct_contract_address)), + Op.EXTCODESIZE(selfdestruct_contract_address), ) entry_code += Op.SSTORE( entry_code_storage.store_next(keccak256(bytes(selfdestruct_code))), - Op.EXTCODEHASH(Op.PUSH20(selfdestruct_contract_address)), + Op.EXTCODEHASH(selfdestruct_contract_address), ) # Lastly return zero so the entry point contract is created and we can retain the stored @@ -1123,7 +1144,7 @@ def test_delegatecall_from_new_contract_to_pre_existing_contract( # which must be included in the send-all operation sendall_amount += selfdestruct_contract_initial_balance - post: Dict[str, Account] = { + post: Dict[Address, Account] = { entry_code_address: Account( code="0x00", storage=entry_code_storage, @@ -1148,7 +1169,7 @@ def test_delegatecall_from_new_contract_to_pre_existing_contract( protected=False, ) - state_test(env=env, pre=pre, post=post, txs=[tx]) + state_test(env=env, pre=pre, post=post, tx=tx) @pytest.mark.parametrize("create_opcode", [Op.CREATE, Op.CREATE2]) @@ -1160,13 +1181,13 @@ def test_delegatecall_from_pre_existing_contract_to_new_contract( state_test: StateTestFiller, eip_enabled: bool, env: Environment, - pre: Dict[str, Account], - entry_code_address: str, + pre: Dict[Address, Account], + entry_code_address: Address, selfdestruct_code: SupportsBytes, selfdestruct_contract_initcode: SupportsBytes, - selfdestruct_contract_address: str, - sendall_recipient_addresses: List[str], - initcode_copy_from_address: str, + selfdestruct_contract_address: Address, + sendall_recipient_addresses: List[Address], + initcode_copy_from_address: Address, call_opcode: Op, create_opcode: Op, call_times: int, @@ -1178,10 +1199,10 @@ def test_delegatecall_from_pre_existing_contract_to_new_contract( is not deleted. """ # Add the contract that delegate calls to the newly created contract - delegate_caller_address = "0x2222222222222222222222222222222222222222" + delegate_caller_address = Address("0x2222222222222222222222222222222222222222") call_args: List[int | bytes] = [ Op.GAS(), - Op.PUSH20(selfdestruct_contract_address), + selfdestruct_contract_address, 0, 0, 0, @@ -1212,7 +1233,7 @@ def test_delegatecall_from_pre_existing_contract_to_new_contract( entry_code = ( # Initcode is already deployed at `initcode_copy_from_address`, so just copy it Op.EXTCODECOPY( - Op.PUSH20(initcode_copy_from_address), + initcode_copy_from_address, 0, 0, len(bytes(selfdestruct_contract_initcode)), @@ -1227,12 +1248,12 @@ def test_delegatecall_from_pre_existing_contract_to_new_contract( # Store the EXTCODE* properties of the pre-existing address entry_code += Op.SSTORE( entry_code_storage.store_next(len(delegate_caller_code)), - Op.EXTCODESIZE(Op.PUSH20(delegate_caller_address)), + Op.EXTCODESIZE(delegate_caller_address), ) entry_code += Op.SSTORE( entry_code_storage.store_next(keccak256(delegate_caller_code)), - Op.EXTCODEHASH(Op.PUSH20(delegate_caller_address)), + Op.EXTCODEHASH(delegate_caller_address), ) # Now instead of calling the newly created contract directly, we delegate call to it @@ -1242,7 +1263,7 @@ def test_delegatecall_from_pre_existing_contract_to_new_contract( entry_code_storage.store_next(1), Op.CALL( Op.GASLIMIT, # Gas - Op.PUSH20(delegate_caller_address), # Address + delegate_caller_address, # Address i, # Value 0, 0, @@ -1255,25 +1276,25 @@ def test_delegatecall_from_pre_existing_contract_to_new_contract( entry_code += Op.SSTORE( entry_code_storage.store_next(0), - Op.BALANCE(Op.PUSH20(delegate_caller_address)), + Op.BALANCE(delegate_caller_address), ) # Check the EXTCODE* properties of the pre-existing address again entry_code += Op.SSTORE( entry_code_storage.store_next(len(bytes(delegate_caller_code))), - Op.EXTCODESIZE(Op.PUSH20(delegate_caller_address)), + Op.EXTCODESIZE(delegate_caller_address), ) entry_code += Op.SSTORE( entry_code_storage.store_next(keccak256(bytes(delegate_caller_code))), - Op.EXTCODEHASH(Op.PUSH20(delegate_caller_address)), + Op.EXTCODEHASH(delegate_caller_address), ) # Lastly return zero so the entry point contract is created and we can retain the stored # values for verification. entry_code += Op.RETURN(max(len(bytes(selfdestruct_contract_initcode)), 32), 1) - post: Dict[str, Account] = { + post: Dict[Address, Account] = { entry_code_address: Account( code="0x00", storage=entry_code_storage, @@ -1305,4 +1326,4 @@ def test_delegatecall_from_pre_existing_contract_to_new_contract( protected=False, ) - state_test(env=env, pre=pre, post=post, txs=[tx]) + state_test(env=env, pre=pre, post=post, tx=tx) diff --git a/tests/cancun/eip6780_selfdestruct/test_selfdestruct_revert.py b/tests/cancun/eip6780_selfdestruct/test_selfdestruct_revert.py index 9ffc218941..e7f05a7bfc 100644 --- a/tests/cancun/eip6780_selfdestruct/test_selfdestruct_revert.py +++ b/tests/cancun/eip6780_selfdestruct/test_selfdestruct_revert.py @@ -10,6 +10,7 @@ from ethereum_test_forks import Cancun from ethereum_test_tools import ( Account, + Address, Environment, Initcode, StateTestFiller, @@ -18,7 +19,6 @@ Transaction, YulCompiler, compute_create_address, - to_address, ) from ethereum_test_tools.vm.opcode import Opcodes as Op @@ -29,15 +29,15 @@ @pytest.fixture -def entry_code_address() -> str: +def entry_code_address() -> Address: """Address where the entry code will run.""" return compute_create_address(TestAddress, 0) @pytest.fixture -def recursive_revert_contract_address(): +def recursive_revert_contract_address() -> Address: """Address where the recursive revert contract address exists""" - return to_address(0xDEADBEEF) + return Address(0xDEADBEEF) @pytest.fixture @@ -49,9 +49,9 @@ def env() -> Environment: @pytest.fixture -def selfdestruct_recipient_address() -> str: +def selfdestruct_recipient_address() -> Address: """List of possible addresses that can receive a SELFDESTRUCT operation.""" - return to_address(0x1234) + return Address(0x1234) @pytest.fixture @@ -65,7 +65,7 @@ def recursive_revert_contract_code( yul: YulCompiler, selfdestruct_on_outer_call: int, selfdestruct_with_transfer_contract_code: SupportsBytes, - selfdestruct_with_transfer_contract_address: str, + selfdestruct_with_transfer_contract_address: Address, ) -> SupportsBytes: """ Contract code that: @@ -129,7 +129,7 @@ def recursive_revert_contract_code( @pytest.fixture -def selfdestruct_with_transfer_contract_address(entry_code_address: str) -> str: +def selfdestruct_with_transfer_contract_address(entry_code_address: Address) -> Address: """Contract address for contract that can selfdestruct and receive value""" res = compute_create_address(entry_code_address, 1) return res @@ -137,7 +137,7 @@ def selfdestruct_with_transfer_contract_address(entry_code_address: str) -> str: @pytest.fixture def selfdestruct_with_transfer_contract_code( - yul: YulCompiler, selfdestruct_recipient_address: str + yul: YulCompiler, selfdestruct_recipient_address: Address ) -> SupportsBytes: """Contract that can selfdestruct and receive value""" return yul( @@ -146,7 +146,7 @@ def selfdestruct_with_transfer_contract_code( let operation := calldataload(0) switch operation - case 0 /* no-op used for transfering value to this contract */ {{ + case 0 /* no-op used for transferring value to this contract */ {{ let times_called := sload(0) times_called := add(times_called, 1) sstore(0, times_called) @@ -175,20 +175,20 @@ def selfdestruct_with_transfer_contract_initcode( @pytest.fixture -def selfdestruct_with_transfer_initcode_copy_from_address() -> str: +def selfdestruct_with_transfer_initcode_copy_from_address() -> Address: """Address of a pre-existing contract we use to simply copy initcode from.""" - return to_address(0xABCD) + return Address(0xABCD) @pytest.fixture def pre( - recursive_revert_contract_address: str, + recursive_revert_contract_address: Address, recursive_revert_contract_code: SupportsBytes, - selfdestruct_with_transfer_initcode_copy_from_address: str, + selfdestruct_with_transfer_initcode_copy_from_address: Address, selfdestruct_with_transfer_contract_initcode: SupportsBytes, - selfdestruct_with_transfer_contract_address: str, + selfdestruct_with_transfer_contract_address: Address, yul: YulCompiler, -) -> Dict[str, Account]: +) -> Dict[Address, Account]: """Prestate for test_selfdestruct_not_created_in_same_tx_with_revert""" return { TestAddress: Account(balance=100_000_000_000_000_000_000), @@ -212,15 +212,15 @@ def pre( def test_selfdestruct_created_in_same_tx_with_revert( # noqa SC200 state_test: StateTestFiller, env: Environment, - pre: Dict[str, Account], - entry_code_address: str, + pre: Dict[Address, Account], + entry_code_address: Address, selfdestruct_on_outer_call: int, selfdestruct_with_transfer_contract_code: SupportsBytes, selfdestruct_with_transfer_contract_initcode: SupportsBytes, - selfdestruct_with_transfer_contract_address: str, - selfdestruct_recipient_address: str, - selfdestruct_with_transfer_initcode_copy_from_address: str, - recursive_revert_contract_address: str, + selfdestruct_with_transfer_contract_address: Address, + selfdestruct_recipient_address: Address, + selfdestruct_with_transfer_initcode_copy_from_address: Address, + recursive_revert_contract_address: Address, recursive_revert_contract_code: SupportsBytes, ): """ @@ -231,7 +231,7 @@ def test_selfdestruct_created_in_same_tx_with_revert( # noqa SC200 Recurse into a new call from transfers value to A, calls A.selfdestruct, and reverts. """ # noqa: E501 entry_code = Op.EXTCODECOPY( - Op.PUSH20(selfdestruct_with_transfer_initcode_copy_from_address), + selfdestruct_with_transfer_initcode_copy_from_address, 0, 0, len(bytes(selfdestruct_with_transfer_contract_initcode)), @@ -246,7 +246,7 @@ def test_selfdestruct_created_in_same_tx_with_revert( # noqa SC200 entry_code += Op.CALL( Op.GASLIMIT(), - Op.PUSH20(recursive_revert_contract_address), + recursive_revert_contract_address, 0, # value 0, # arg offset 0, # arg length @@ -254,7 +254,7 @@ def test_selfdestruct_created_in_same_tx_with_revert( # noqa SC200 0, # ret length ) - post: Dict[str, Account] = { + post: Dict[Address, Account] = { entry_code_address: Account( code="0x", storage=Storage({0: selfdestruct_with_transfer_contract_address}) ), @@ -300,18 +300,18 @@ def test_selfdestruct_created_in_same_tx_with_revert( # noqa SC200 protected=False, ) - state_test(env=env, pre=pre, post=post, txs=[tx]) + state_test(env=env, pre=pre, post=post, tx=tx) @pytest.fixture def pre_with_selfdestructable( # noqa: SC200 - recursive_revert_contract_address: str, + recursive_revert_contract_address: Address, recursive_revert_contract_code: SupportsBytes, - selfdestruct_with_transfer_initcode_copy_from_address: str, + selfdestruct_with_transfer_initcode_copy_from_address: Address, selfdestruct_with_transfer_contract_initcode: SupportsBytes, - selfdestruct_with_transfer_contract_address: str, + selfdestruct_with_transfer_contract_address: Address, yul: YulCompiler, -) -> Dict[str, Account]: +) -> Dict[Address, Account]: """Preset for selfdestruct_not_created_in_same_tx_with_revert""" return { TestAddress: Account(balance=100_000_000_000_000_000_000), @@ -335,12 +335,12 @@ def pre_with_selfdestructable( # noqa: SC200 def test_selfdestruct_not_created_in_same_tx_with_revert( state_test: StateTestFiller, env: Environment, - entry_code_address: str, + entry_code_address: Address, selfdestruct_on_outer_call: int, selfdestruct_with_transfer_contract_code: SupportsBytes, - selfdestruct_with_transfer_contract_address: str, - selfdestruct_recipient_address: str, - recursive_revert_contract_address: str, + selfdestruct_with_transfer_contract_address: Address, + selfdestruct_recipient_address: Address, + recursive_revert_contract_address: Address, recursive_revert_contract_code: SupportsBytes, ): """ @@ -349,7 +349,7 @@ def test_selfdestruct_not_created_in_same_tx_with_revert( """ entry_code = Op.CALL( Op.GASLIMIT(), - Op.PUSH20(recursive_revert_contract_address), + recursive_revert_contract_address, 0, # value 0, # arg offset 0, # arg length @@ -357,7 +357,7 @@ def test_selfdestruct_not_created_in_same_tx_with_revert( 0, # ret length ) - pre: Dict[str, Account] = { + pre: Dict[Address, Account] = { TestAddress: Account(balance=100_000_000_000_000_000_000), selfdestruct_with_transfer_contract_address: Account( code=selfdestruct_with_transfer_contract_code @@ -367,7 +367,7 @@ def test_selfdestruct_not_created_in_same_tx_with_revert( ), } - post: Dict[str, Account] = { + post: Dict[Address, Account] = { entry_code_address: Account(code="0x"), } @@ -415,4 +415,4 @@ def test_selfdestruct_not_created_in_same_tx_with_revert( protected=False, ) - state_test(env=env, pre=pre, post=post, txs=[tx]) + state_test(env=env, pre=pre, post=post, tx=tx) diff --git a/tests/cancun/eip7516_blobgasfee/test_blobgasfee_opcode.py b/tests/cancun/eip7516_blobgasfee/test_blobgasfee_opcode.py index ab29b0ca64..ca3f9f04fb 100644 --- a/tests/cancun/eip7516_blobgasfee/test_blobgasfee_opcode.py +++ b/tests/cancun/eip7516_blobgasfee/test_blobgasfee_opcode.py @@ -1,15 +1,16 @@ """ abstract: Tests [EIP-7516: BLOBBASEFEE opcode](https://eips.ethereum.org/EIPS/eip-7516) - Test BLOBGASFEE opcode [EIP-7516: BLOBBASEFEE opcode](https://eips.ethereum.org/EIPS/eip-7516) """ # noqa: E501 + +from dataclasses import replace from itertools import count from typing import Dict import pytest -from ethereum_test_tools import Account, Block, BlockchainTestFiller, Environment +from ethereum_test_tools import Account, Address, Block, BlockchainTestFiller, Environment from ethereum_test_tools import Opcodes as Op from ethereum_test_tools import StateTestFiller, Storage, TestAddress, Transaction @@ -17,8 +18,8 @@ REFERENCE_SPEC_VERSION = "2ade0452efe8124378f35284676ddfd16dd56ecd" # Code address used to call the test bytecode on every test case. -code_caller_address = 0x100 -code_callee_address = 0x200 +code_caller_address = Address(0x100) +code_callee_address = Address(0x200) BLOBBASEFEE_GAS = 2 @@ -38,7 +39,7 @@ def caller_code( """ Bytecode used to call the bytecode containing the BLOBBASEFEE opcode. """ - return Op.SSTORE(Op.NUMBER, Op.CALL(call_gas, Op.PUSH20(code_callee_address), 0, 0, 0, 0, 0)) + return Op.SSTORE(Op.NUMBER, Op.CALL(call_gas, code_callee_address, 0, 0, 0, 0, 0)) @pytest.fixture @@ -114,7 +115,7 @@ def test_blobbasefee_stack_overflow( state_test( env=Environment(), pre=pre, - txs=[tx], + tx=tx, post=post, ) @@ -147,21 +148,54 @@ def test_blobbasefee_out_of_gas( state_test( env=Environment(), pre=pre, - txs=[tx], + tx=tx, post=post, ) @pytest.mark.valid_at_transition_to("Cancun") def test_blobbasefee_before_fork( - blockchain_test: BlockchainTestFiller, + state_test: StateTestFiller, pre: Dict, tx: Transaction, ): """ Tests that the BLOBBASEFEE opcode results on exception when called before the fork. """ - code_caller_storage = Storage() + # Fork happens at timestamp 15_000 + timestamp = 7_500 + code_caller_pre_storage = Storage({1: 1}) + pre[code_caller_address] = replace(pre[code_caller_address], storage=code_caller_pre_storage) + post = { + code_caller_address: Account( + storage={1: 0}, + ), + code_callee_address: Account( + balance=0, + ), + } + state_test( + env=Environment( + timestamp=timestamp, + ), + pre=pre, + tx=tx, + post=post, + ) + + +@pytest.mark.valid_at_transition_to("Cancun") +def test_blobbasefee_during_fork( + blockchain_test: BlockchainTestFiller, + pre: Dict, + tx: Transaction, +): + """ + Tests that the BLOBBASEFEE opcode results on exception when called before the fork and + succeeds when called after the fork. + """ + code_caller_pre_storage = Storage() + code_caller_post_storage = Storage() nonce = count(0) @@ -169,18 +203,21 @@ def test_blobbasefee_before_fork( blocks = [] - for number, timestamp in enumerate(timestamps): + for block_number, timestamp in enumerate(timestamps, start=1): blocks.append( Block( txs=[tx.with_nonce(next(nonce))], timestamp=timestamp, ), ) - code_caller_storage[number + 1] = 0 if timestamp < 15_000 else 1 + # pre-set storage just to make sure we detect the change + code_caller_pre_storage[block_number] = 0xFF + code_caller_post_storage[block_number] = 0 if timestamp < 15_000 else 1 + pre[code_caller_address] = replace(pre[code_caller_address], storage=code_caller_pre_storage) post = { code_caller_address: Account( - storage=code_caller_storage, + storage=code_caller_post_storage, ), code_callee_address: Account( balance=0, diff --git a/tests/conftest.py b/tests/conftest.py deleted file mode 100644 index a405e109fa..0000000000 --- a/tests/conftest.py +++ /dev/null @@ -1,23 +0,0 @@ -""" -Pytest definitions applied to all tests. -""" -import warnings - -import pytest - - -def pytest_collection_modifyitems(items, config): - """ - Modify tests post collection. - - Here we override the default behavior of the `yul` fixture so that - solc compiles with shanghai instead of cancun (which is unavailable - in solc 0.8.20/0.8.21). - """ - for item in items: - if "Cancun" in item.name and "yul" in item.fixturenames: - if config.getoption("verbose") >= 2: - warnings.warn(f"Compiling Yul source for f{item.name} with Shanghai, not Cancun.") - else: - warnings.warn("Compiling Yul source with Shanghai, not Cancun.") - item.add_marker(pytest.mark.compile_yul_with("Shanghai")) diff --git a/tests/frontier/opcodes/test_call_and_callcode_gas_calculation.py b/tests/frontier/opcodes/test_call_and_callcode_gas_calculation.py new file mode 100644 index 0000000000..cffb53a624 --- /dev/null +++ b/tests/frontier/opcodes/test_call_and_callcode_gas_calculation.py @@ -0,0 +1,159 @@ +""" +abstract: Tests the nested CALL/CALLCODE opcode gas consumption with a positive value transfer. + This test is designed to investigate an issue identified in EthereumJS, as reported in: + https://github.com/ethereumjs/ethereumjs-monorepo/issues/3194. + + The issue pertains to the incorrect gas calculation for CALL/CALLCODE operations with a + positive value transfer, due to the pre-addition of the gas stipend (2300) to the currently + available gas instead of adding it to the new call frame. This bug was specific to the case + where insufficient gas was provided for the CALL/CALLCODE operation. Due to the pre-addition + of the stipend to the currently available gas, the case for insufficient gas was not properly + failing with an out-of-gas error. + + Test setup: Given two smart contract accounts, 0x0A (caller) and 0x0B (callee): + 1) An arbitrary transaction calls into the contract 0x0A. + 2) Contract 0x0A executes a CALL to contract 0x0B with a specific gas limit (X). + 3) Contract 0x0B then attempts a CALL/CALLCODE to a non-existent contract 0x0C, + with a positive value transfer (activating the gas stipend). + 4) If the gas X provided by contract 0x0A to 0x0B is sufficient, contract 0x0B + will push 0x01 onto the stack after returning to the call frame in 0x0A. Otherwise, it + should push 0x00, indicating the insufficiency of gas X (for the bug in EthereumJS, the + CALL/CALLCODE operation would return 0x01 due to the pre-addition of the gas stipend). + 5) The resulting stack value is saved into contract 0x0A's storage, allowing us to + verify whether the provided gas was sufficient or insufficient. +""" + +from dataclasses import dataclass +from typing import Dict + +import pytest + +from ethereum_test_tools import ( + Account, + Address, + Environment, + StateTestFiller, + TestAddress, + Transaction, +) +from ethereum_test_tools.vm.opcode import Opcodes as Op + +""" +PUSH opcode cost is 3, GAS opcode cost is 2. +We need 6 PUSH's and one GAS to fill the stack for both CALL & CALLCODE, in the callee contract. +""" +CALLEE_INIT_STACK_GAS = 6 * 3 + 2 + +""" +CALL gas breakdowns: (https://www.evm.codes/#f1) +memory_exp_cost + code_exec_cost + address_access_cost + positive_value_cost + empty_account_cost += 0 + 0 + 2600 + 9000 + 25000 = 36600 +""" +CALL_GAS = 36600 +CALL_SUFFICIENT_GAS = CALL_GAS + CALLEE_INIT_STACK_GAS + +""" +CALLCODE gas breakdowns: (https://www.evm.codes/#f2) +memory_exp_cost + code_exec_cost + address_access_cost + positive_value_cost += 0 + 0 + 2600 + 9000 = 11600 +""" +CALLCODE_GAS = 11600 +CALLCODE_SUFFICIENT_GAS = CALLCODE_GAS + CALLEE_INIT_STACK_GAS + + +@dataclass(frozen=True) +class Contract: + """Contract accounts used in the test.""" + + caller: int = 0x0A + callee: int = 0x0B + nonexistent: int = 0x0C + + +@pytest.fixture +def caller_code(caller_gas_limit: int) -> bytes: + """ + Code to CALL the callee contract: + PUSH1 0x00 * 5 + PUSH2 Contract.callee + PUSH2 caller_gas <- gas limit set for CALL to callee contract + CALL + PUSH1 0x00 + SSTORE + """ + return Op.SSTORE(0, Op.CALL(caller_gas_limit, Contract.callee, 0, 0, 0, 0, 0)) + + +@pytest.fixture +def callee_code(callee_opcode: Op) -> bytes: + """ + Code called by the caller contract: + PUSH1 0x00 * 4 + PUSH1 0x01 <- for positive value transfer + PUSH2 Contract.nonexistent + GAS <- value doesn't matter + CALL/CALLCODE + """ + return callee_opcode(Op.GAS(), Contract.nonexistent, 1, 0, 0, 0, 0) + + +@pytest.fixture +def caller_tx() -> Transaction: + """Transaction that performs the call to the caller contract.""" + return Transaction( + chain_id=0x01, + nonce=0, + to=Address(Contract.caller), + value=1, + gas_limit=500000, + gas_price=7, + ) + + +@pytest.fixture +def pre(caller_code: bytes, callee_code: bytes) -> Dict[Address, Account]: # noqa: D103 + return { + Address(Contract.caller): Account( + balance=0x03, + code=caller_code, + nonce=1, + ), + Address(Contract.callee): Account( + balance=0x03, + code=callee_code, + nonce=1, + ), + TestAddress: Account( + balance=0x0BA1A9CE, + ), + } + + +@pytest.fixture +def post(is_sufficient_gas: bool) -> Dict[Address, Account]: # noqa: D103 + return { + Address(Contract.caller): Account(storage={0x00: 0x01 if is_sufficient_gas else 0x00}), + } + + +@pytest.mark.parametrize( + "callee_opcode, caller_gas_limit, is_sufficient_gas", + [ + (Op.CALL, CALL_SUFFICIENT_GAS, True), + (Op.CALL, CALL_SUFFICIENT_GAS - 1, False), + (Op.CALLCODE, CALLCODE_SUFFICIENT_GAS, True), + (Op.CALLCODE, CALLCODE_SUFFICIENT_GAS - 1, False), + ], +) +@pytest.mark.valid_from("London") +@pytest.mark.valid_until("Shanghai") +def test_value_transfer_gas_calculation( + state_test: StateTestFiller, + pre: Dict[str, Account], + caller_tx: Transaction, + post: Dict[str, Account], +): + """ + Tests the nested CALL/CALLCODE opcode gas consumption with a positive value transfer. + """ + state_test(env=Environment(), pre=pre, post=post, tx=caller_tx) diff --git a/tests/frontier/opcodes/test_dup.py b/tests/frontier/opcodes/test_dup.py index e6bd27ef50..00894dd284 100644 --- a/tests/frontier/opcodes/test_dup.py +++ b/tests/frontier/opcodes/test_dup.py @@ -1,21 +1,43 @@ """ abstract: Test DUP - Test the DUP opcodes. """ +import pytest + from ethereum_test_forks import Frontier, Homestead -from ethereum_test_tools import ( - Account, - Environment, - StateTestFiller, - Storage, - Transaction, - to_address, -) +from ethereum_test_tools import Account, Address, Environment +from ethereum_test_tools import Opcodes as Op +from ethereum_test_tools import StateTestFiller, Storage, TestAddress, Transaction -def test_dup(state_test: StateTestFiller, fork: str): +@pytest.mark.parametrize( + "dup_opcode", + [ + Op.DUP1, + Op.DUP2, + Op.DUP3, + Op.DUP4, + Op.DUP5, + Op.DUP6, + Op.DUP7, + Op.DUP8, + Op.DUP9, + Op.DUP10, + Op.DUP11, + Op.DUP12, + Op.DUP13, + Op.DUP14, + Op.DUP15, + Op.DUP16, + ], + ids=lambda op: str(op), +) +def test_dup( + state_test: StateTestFiller, + fork: str, + dup_opcode: Op, +): """ Test the DUP1-DUP16 opcodes. @@ -25,84 +47,59 @@ def test_dup(state_test: StateTestFiller, fork: str): by Ori Pomerantz. """ # noqa: E501 env = Environment() - pre = {"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": Account(balance=1000000000000000000000)} - txs = [] + pre = {TestAddress: Account(balance=1000000000000000000000)} post = {} + account = Address(0x100) + + # Push 0x00 - 0x10 onto the stack + account_code = b"".join([Op.PUSH1(i) for i in range(0x11)]) + + # Use the DUP opcode + account_code += dup_opcode + + # Save each stack value into different keys in storage + account_code += b"".join([Op.PUSH1(i) + Op.SSTORE for i in range(0x11)]) + + pre[account] = Account(code=account_code) + + tx = Transaction( + ty=0x0, + nonce=0, + to=account, + gas_limit=500000, + gas_price=10, + protected=False if fork in [Frontier, Homestead] else True, + data="", + ) + """ - We are setting up 16 accounts, ranging from 0x100 to 0x10f. - They push values into the stack from 0-16, but each contract uses a - different DUP opcode, and depending on the opcode used, the item copied - into the storage changes. + Storage will be structured as follows: + + 0x00: 0x10-0x01 (Depending on DUP opcode) + 0x01: 0x10 + 0x02: 0x0F + 0x03: 0x0E + 0x04: 0x0D + 0x05: 0x0C + 0x06: 0x0B + 0x07: 0x0A + 0x08: 0x09 + 0x09: 0x08 + 0x0A: 0x07 + 0x0B: 0x06 + 0x0C: 0x05 + 0x0D: 0x04 + 0x0E: 0x03 + 0x0F: 0x02 + 0x10: 0x01 + + DUP1 copies the first element of the stack (0x10). + DUP16 copies the 16th element of the stack (0x01). """ - for i in range(0, 16): - """ - Account 0x100 uses DUP1, - Account 0x10f uses DUP16. - """ - account = to_address(0x100 + i) - dup_opcode = 0x80 + i - - pre[account] = Account( - code=( - # Push 0 - 16 onto the stack - """0x6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 - 600A 600B 600C 600D 600E 600F 6010""" - + - # Use the DUP opcode for this account - hex(dup_opcode)[2:] - + - # Save each stack value into different keys in storage - """6000 55 6001 55 6002 55 6003 55 6004 55 6005 55 - 6006 55 6007 55 6008 55 6009 55 600A 55 600B 55 - 600C 55 600D 55 600E 55 600F 55 6010 55""" - ) - ) - - """ - Also we are sending one transaction to each account. - The storage of each will only change by one item: storage[0] - The value depends on the DUP opcode used. - """ - - tx = Transaction( - ty=0x0, - nonce=i, - to=account, - gas_limit=500000, - gas_price=10, - protected=False if fork in [Frontier, Homestead] else True, - data="", - ) - txs.append(tx) - - """ - Storage will be structured as follows: - - 0x00: 0x10-0x01 (Depending on DUP opcode) - 0x01: 0x10 - 0x02: 0x0F - 0x03: 0x0E - 0x04: 0x0D - 0x05: 0x0C - 0x06: 0x0B - 0x07: 0x0A - 0x08: 0x09 - 0x09: 0x08 - 0x0A: 0x07 - 0x0B: 0x06 - 0x0C: 0x05 - 0x0D: 0x04 - 0x0E: 0x03 - 0x0F: 0x02 - 0x10: 0x01 - - DUP1 copies the first element of the stack (0x10). - DUP16 copies the 16th element of the stack (0x01). - """ - s: Storage.StorageDictType = dict(zip(range(1, 17), range(16, 0, -1))) - s[0] = 16 - i - - post[account] = Account(storage=s) - - state_test(env=env, pre=pre, post=post, txs=txs) + s: Storage.StorageDictType = dict(zip(range(1, 17), range(16, 0, -1))) + s[0] = 16 - (dup_opcode.int() - 0x80) + + post[account] = Account(storage=s) + + state_test(env=env, pre=pre, post=post, tx=tx) diff --git a/tests/homestead/yul/test_yul_example.py b/tests/homestead/yul/test_yul_example.py index b499613944..1e6fad07a2 100644 --- a/tests/homestead/yul/test_yul_example.py +++ b/tests/homestead/yul/test_yul_example.py @@ -59,4 +59,4 @@ def test_yul(state_test: StateTestFiller, yul: YulCompiler, fork: Fork): ), } - state_test(env=env, pre=pre, post=post, txs=[tx]) + state_test(env=env, pre=pre, post=post, tx=tx) diff --git a/tests/istanbul/eip1344_chainid/__init__.py b/tests/istanbul/eip1344_chainid/__init__.py index a1c2e63acb..bb41fcd2e7 100644 --- a/tests/istanbul/eip1344_chainid/__init__.py +++ b/tests/istanbul/eip1344_chainid/__init__.py @@ -1,5 +1,4 @@ """ abstract: Tests [EIP-1344: ChainID Opcode](https://eips.ethereum.org/EIPS/eip-1344) - Test cases for [EIP-1344: ChainID Opcode](https://eips.ethereum.org/EIPS/eip-1344). """ diff --git a/tests/istanbul/eip1344_chainid/test_chainid.py b/tests/istanbul/eip1344_chainid/test_chainid.py index 32eaffc327..e5a8d127ce 100644 --- a/tests/istanbul/eip1344_chainid/test_chainid.py +++ b/tests/istanbul/eip1344_chainid/test_chainid.py @@ -1,6 +1,5 @@ """ abstract: Tests [EIP-1344: CHAINID opcode](https://eips.ethereum.org/EIPS/eip-1344) - Test cases for [EIP-1344: CHAINID opcode](https://eips.ethereum.org/EIPS/eip-1344). """ @@ -8,11 +7,11 @@ from ethereum_test_tools import ( Account, + Address, Environment, StateTestFiller, TestAddress, Transaction, - to_address, ) from ethereum_test_tools.vm.opcode import Opcodes as Op @@ -34,7 +33,7 @@ def test_chainid(state_test: StateTestFiller): ) pre = { - to_address(0x100): Account(code=Op.SSTORE(1, Op.CHAINID) + Op.STOP), + Address(0x100): Account(code=Op.SSTORE(1, Op.CHAINID) + Op.STOP), TestAddress: Account(balance=1000000000000000000000), } @@ -42,13 +41,13 @@ def test_chainid(state_test: StateTestFiller): ty=0x0, chain_id=0x01, nonce=0, - to=to_address(0x100), + to=Address(0x100), gas_limit=100000000, gas_price=10, ) post = { - to_address(0x100): Account(code="0x4660015500", storage={"0x01": "0x01"}), + Address(0x100): Account(code="0x4660015500", storage={"0x01": "0x01"}), } - state_test(env=env, pre=pre, post=post, txs=[tx]) + state_test(env=env, pre=pre, post=post, tx=tx) diff --git a/tests/merge/__init__.py b/tests/merge/__init__.py deleted file mode 100644 index 57393da90f..0000000000 --- a/tests/merge/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -""" -Test cases for EVM functionality introduced in the Merge. -""" diff --git a/tests/paris/__init__.py b/tests/paris/__init__.py new file mode 100644 index 0000000000..2fd68e2b89 --- /dev/null +++ b/tests/paris/__init__.py @@ -0,0 +1,3 @@ +""" +Test cases for EVM functionality introduced in Paris (Merge). +""" diff --git a/tests/merge/security/__init__.py b/tests/paris/security/__init__.py similarity index 100% rename from tests/merge/security/__init__.py rename to tests/paris/security/__init__.py diff --git a/tests/merge/security/test_selfdestruct_balance_bug.py b/tests/paris/security/test_selfdestruct_balance_bug.py similarity index 83% rename from tests/merge/security/test_selfdestruct_balance_bug.py rename to tests/paris/security/test_selfdestruct_balance_bug.py index 349c55d62a..2cb766c135 100644 --- a/tests/merge/security/test_selfdestruct_balance_bug.py +++ b/tests/paris/security/test_selfdestruct_balance_bug.py @@ -14,6 +14,7 @@ from ethereum_test_tools import ( Account, + Address, Block, BlockchainTestFiller, Initcode, @@ -21,12 +22,11 @@ Transaction, YulCompiler, compute_create_address, - to_address, ) from ethereum_test_tools.vm.opcode import Opcodes as Op -@pytest.mark.compile_yul_with("Merge") # Shanghai refuses to compile SELFDESTRUCT +@pytest.mark.compile_yul_with("Paris") # Shanghai refuses to compile SELFDESTRUCT @pytest.mark.valid_from("Constantinople") def test_tx_selfdestruct_balance_bug(blockchain_test: BlockchainTestFiller, yul: YulCompiler): """ @@ -81,23 +81,23 @@ def test_tx_selfdestruct_balance_bug(blockchain_test: BlockchainTestFiller, yul: 0, Op.EXTCODESIZE(0xAA), ) - + Op.SSTORE(0xCA1101, Op.CALL(100000, Op.PUSH20(aa_location), 0, 0, 0, 0, 0)) - + Op.CALL(100000, Op.PUSH20(aa_location), 1, 0, 0, 0, 0) + + Op.SSTORE(0xCA1101, Op.CALL(100000, aa_location, 0, 0, 0, 0, 0)) + + Op.CALL(100000, aa_location, 1, 0, 0, 0, 0) ) - balance_code = Op.SSTORE(0xBA1AA, Op.BALANCE(Op.PUSH20(aa_location))) + balance_code = Op.SSTORE(0xBA1AA, Op.BALANCE(aa_location)) pre = { # sender TestAddress: Account(balance=1000000000), # caller - to_address(0xCC): Account(balance=1000000000, code=cc_code, nonce=1), + Address(0xCC): Account(balance=1000000000, code=cc_code, nonce=1), # stores balance of 0xaa after each tx 1 - to_address(0xBA11): Account(code=balance_code), + Address(0xBA11): Account(code=balance_code), # stores balance of 0xaa after each tx 2 - to_address(0xBA12): Account(code=balance_code), + Address(0xBA12): Account(code=balance_code), # Initcode of the self-destruct contract - to_address(0xAA): Account(code=aa_code), + Address(0xAA): Account(code=aa_code), } blocks = [ @@ -107,14 +107,14 @@ def test_tx_selfdestruct_balance_bug(blockchain_test: BlockchainTestFiller, yul: # calling with 1 wei call Transaction( nonce=0, - to=to_address(0xCC), + to=Address(0xCC), gas_limit=1000000, gas_price=10, ), # Dummy tx to store balance of 0xaa after first TX. Transaction( nonce=1, - to=to_address(0xBA11), + to=Address(0xBA11), gas_limit=100000, gas_price=10, ), @@ -129,7 +129,7 @@ def test_tx_selfdestruct_balance_bug(blockchain_test: BlockchainTestFiller, yul: # Dummy tx to store balance of 0xaa after second TX. Transaction( nonce=3, - to=to_address(0xBA12), + to=Address(0xBA12), gas_limit=100000, gas_price=10, ), @@ -139,13 +139,13 @@ def test_tx_selfdestruct_balance_bug(blockchain_test: BlockchainTestFiller, yul: post = { # Check call from caller has succeeded. - to_address(0xCC): Account(nonce=2, storage={0xCA1101: 1}), + Address(0xCC): Account(nonce=2, storage={0xCA1101: 1}), # Check balance of 0xaa after tx 1 is 0 wei, i.e self-destructed. # Vulnerable versions should return 1 wei. - to_address(0xBA11): Account(storage={0xBA1AA: 0}), + Address(0xBA11): Account(storage={0xBA1AA: 0}), # Check that 0xaa exists and balance after tx 2 is 5 wei. # Vulnerable versions should return 6 wei. - to_address(0xBA12): Account(storage={0xBA1AA: 5}), + Address(0xBA12): Account(storage={0xBA1AA: 5}), aa_location: Account(storage={0: 0}), } diff --git a/tests/prague/__init__.py b/tests/prague/__init__.py new file mode 100644 index 0000000000..8dfd9d4de8 --- /dev/null +++ b/tests/prague/__init__.py @@ -0,0 +1,3 @@ +""" +Test cases for EVM functionality introduced in Prague. +""" diff --git a/tests/shanghai/eip3651_warm_coinbase/__init__.py b/tests/shanghai/eip3651_warm_coinbase/__init__.py index 9db9ed8309..f24c6b66a5 100644 --- a/tests/shanghai/eip3651_warm_coinbase/__init__.py +++ b/tests/shanghai/eip3651_warm_coinbase/__init__.py @@ -1,5 +1,4 @@ """ abstract: Tests [EIP-3651: Warm COINBASE](https://eips.ethereum.org/EIPS/eip-3651) - Tests for [EIP-3651: Warm COINBASE](https://eips.ethereum.org/EIPS/eip-3651). """ diff --git a/tests/shanghai/eip3651_warm_coinbase/test_warm_coinbase.py b/tests/shanghai/eip3651_warm_coinbase/test_warm_coinbase.py index 32324eff19..78d7b0dabb 100644 --- a/tests/shanghai/eip3651_warm_coinbase/test_warm_coinbase.py +++ b/tests/shanghai/eip3651_warm_coinbase/test_warm_coinbase.py @@ -1,23 +1,21 @@ """ abstract: Tests [EIP-3651: Warm COINBASE](https://eips.ethereum.org/EIPS/eip-3651) - Tests for [EIP-3651: Warm COINBASE](https://eips.ethereum.org/EIPS/eip-3651). note: Tests ported from: - - [ethereum/tests/pull/1082](https://github.com/ethereum/tests/pull/1082). """ import pytest -from ethereum_test_forks import Shanghai, is_fork +from ethereum_test_forks import Shanghai from ethereum_test_tools import ( Account, + Address, CodeGasMeasure, Environment, TestAddress, Transaction, - to_address, ) from ethereum_test_tools.vm.opcode import Opcodes as Op @@ -103,7 +101,7 @@ def test_warm_coinbase_call_out_of_gas( pre = { TestAddress: Account(balance=1000000000000000000000), caller_address: Account(code=caller_code), - to_address(contract_under_test_address): Account(code=contract_under_test_code), + Address(contract_under_test_address): Account(code=contract_under_test_code), } tx = Transaction( @@ -117,7 +115,7 @@ def test_warm_coinbase_call_out_of_gas( post = {} - if use_sufficient_gas and is_fork(fork=fork, which=Shanghai): + if use_sufficient_gas and fork >= Shanghai: post[caller_address] = Account( storage={ # On shanghai and beyond, calls with only 100 gas to @@ -138,7 +136,7 @@ def test_warm_coinbase_call_out_of_gas( env=env, pre=pre, post=post, - txs=[tx], + tx=tx, tag="opcode_" + opcode, ) @@ -211,7 +209,7 @@ def test_warm_coinbase_call_out_of_gas( ] -@pytest.mark.valid_from("Merge") # these tests fill for fork >= Berlin +@pytest.mark.valid_from("Paris") # these tests fill for fork >= Berlin @pytest.mark.parametrize( "opcode,code_gas_measure", gas_measured_opcodes, @@ -238,13 +236,13 @@ def test_warm_coinbase_gas_usage(state_test, fork, opcode, code_gas_measure): timestamp=1000, ) - measure_address = to_address(0x100) + measure_address = Address(0x100) pre = { TestAddress: Account(balance=1000000000000000000000), measure_address: Account(code=code_gas_measure, balance=1000000000000000000000), } - if is_fork(fork, Shanghai): + if fork >= Shanghai: expected_gas = GAS_REQUIRED_CALL_WARM_ACCOUNT # Warm account access cost after EIP-3651 else: expected_gas = 2600 # Cold account access cost before EIP-3651 @@ -269,6 +267,6 @@ def test_warm_coinbase_gas_usage(state_test, fork, opcode, code_gas_measure): env=env, pre=pre, post=post, - txs=[tx], + tx=tx, tag="opcode_" + opcode.lower(), ) diff --git a/tests/shanghai/eip3855_push0/__init__.py b/tests/shanghai/eip3855_push0/__init__.py index 70c81df320..a8372e6ccf 100644 --- a/tests/shanghai/eip3855_push0/__init__.py +++ b/tests/shanghai/eip3855_push0/__init__.py @@ -1,5 +1,4 @@ """ abstract: Tests [EIP-3855: PUSH0 Instruction](https://eips.ethereum.org/EIPS/eip-3855) - Tests for [EIP-3855: PUSH0 Instruction](https://eips.ethereum.org/EIPS/eip-3855). """ diff --git a/tests/shanghai/eip3855_push0/test_push0.py b/tests/shanghai/eip3855_push0/test_push0.py index 582bc9e83a..456a0fa8d4 100644 --- a/tests/shanghai/eip3855_push0/test_push0.py +++ b/tests/shanghai/eip3855_push0/test_push0.py @@ -1,10 +1,8 @@ """ abstract: Tests [EIP-3855: PUSH0 Instruction](https://eips.ethereum.org/EIPS/eip-3855) - Tests for [EIP-3855: PUSH0 Instruction](https://eips.ethereum.org/EIPS/eip-3855). note: Tests ported from: - - [ethereum/tests/pull/1033](https://github.com/ethereum/tests/pull/1033). """ @@ -12,12 +10,12 @@ from ethereum_test_tools import ( Account, + Address, CodeGasMeasure, Environment, StateTestFiller, TestAddress, Transaction, - to_address, ) from ethereum_test_tools.vm.opcode import Opcodes as Op @@ -44,7 +42,7 @@ def post(): # noqa: D103 @pytest.fixture def addr_1(): # noqa: D103 - return to_address(0x100) + return Address(0x100) @pytest.fixture @@ -71,7 +69,7 @@ def test_push0_key_sstore( pre[addr_1] = Account(code=code) post[addr_1] = Account(storage={0x00: 0x01}) - state_test(env=env, pre=pre, post=post, txs=[tx], tag="key_sstore") + state_test(env=env, pre=pre, post=post, tx=tx, tag="key_sstore") def test_push0_fill_stack( @@ -92,7 +90,7 @@ def test_push0_fill_stack( pre[addr_1] = Account(code=code) post[addr_1] = Account(storage={0x00: 0x01}) - state_test(env=env, pre=pre, post=post, txs=[tx], tag="fill_stack") + state_test(env=env, pre=pre, post=post, tx=tx, tag="fill_stack") def test_push0_stack_overflow( @@ -112,7 +110,7 @@ def test_push0_stack_overflow( pre[addr_1] = Account(code=code) post[addr_1] = Account(storage={0x00: 0x00}) - state_test(env=env, pre=pre, post=post, txs=[tx], tag="stack_overflow") + state_test(env=env, pre=pre, post=post, tx=tx, tag="stack_overflow") def test_push0_storage_overwrite( @@ -131,7 +129,7 @@ def test_push0_storage_overwrite( pre[addr_1] = Account(code=code, storage={0x00: 0x0A, 0x01: 0x0A}) post[addr_1] = Account(storage={0x00: 0x02, 0x01: 0x00}) - state_test(env=env, pre=pre, post=post, txs=[tx], tag="storage_overwrite") + state_test(env=env, pre=pre, post=post, tx=tx, tag="storage_overwrite") def test_push0_during_staticcall( @@ -145,7 +143,7 @@ def test_push0_during_staticcall( """ Test PUSH0 during STATICCALL. """ - addr_2 = to_address(0x200) + addr_2 = Address(0x200) code_1 = ( Op.SSTORE(0, Op.STATICCALL(100000, 0x200, 0, 0, 0, 0)) @@ -159,7 +157,7 @@ def test_push0_during_staticcall( pre[addr_2] = Account(code=code_2) post[addr_1] = Account(storage={0x00: 0x01, 0x01: 0xFF}) - state_test(env=env, pre=pre, post=post, txs=[tx], tag="during_staticcall") + state_test(env=env, pre=pre, post=post, tx=tx, tag="during_staticcall") def test_push0_before_jumpdest( @@ -178,7 +176,7 @@ def test_push0_before_jumpdest( pre[addr_1] = Account(code=code) post[addr_1] = Account(storage={0x00: 0x01}) - state_test(env=env, pre=pre, post=post, txs=[tx], tag="before_jumpdest") + state_test(env=env, pre=pre, post=post, tx=tx, tag="before_jumpdest") def test_push0_gas_cost( @@ -200,4 +198,4 @@ def test_push0_gas_cost( pre[addr_1] = Account(code=code) post[addr_1] = Account(storage={0x00: 0x02}) - state_test(env=env, pre=pre, post=post, txs=[tx], tag="gas_cost") + state_test(env=env, pre=pre, post=post, tx=tx, tag="gas_cost") diff --git a/tests/shanghai/eip3860_initcode/__init__.py b/tests/shanghai/eip3860_initcode/__init__.py index 164c5c2093..62ee051a08 100644 --- a/tests/shanghai/eip3860_initcode/__init__.py +++ b/tests/shanghai/eip3860_initcode/__init__.py @@ -1,5 +1,4 @@ """ abstract: Test [EIP-3860: Limit and meter initcode](https://eips.ethereum.org/EIPS/eip-3860) - Tests for [EIP-3860: Limit and meter initcode](https://eips.ethereum.org/EIPS/eip-3860). """ diff --git a/tests/shanghai/eip3860_initcode/test_initcode.py b/tests/shanghai/eip3860_initcode/test_initcode.py index 9ac605b032..d9dae80879 100644 --- a/tests/shanghai/eip3860_initcode/test_initcode.py +++ b/tests/shanghai/eip3860_initcode/test_initcode.py @@ -1,10 +1,8 @@ """ abstract: Test [EIP-3860: Limit and meter initcode](https://eips.ethereum.org/EIPS/eip-3860) - Tests for [EIP-3860: Limit and meter initcode](https://eips.ethereum.org/EIPS/eip-3860). note: Tests ported from: - - [ethereum/tests/pull/990](https://github.com/ethereum/tests/pull/990) - [ethereum/tests/pull/1012](https://github.com/ethereum/tests/pull/990) """ @@ -15,19 +13,18 @@ from ethereum_test_tools import ( Account, - Block, - BlockchainTestFiller, + Address, Environment, Initcode, StateTestFiller, TestAddress, Transaction, + TransactionException, Yul, ceiling_division, compute_create2_address, compute_create_address, eip_2028_transaction_data_cost, - to_address, ) from ethereum_test_tools.vm.opcode import Opcodes as Op @@ -200,7 +197,7 @@ def get_initcode_name(val): ], ids=get_initcode_name, ) -def test_contract_creating_tx(blockchain_test: BlockchainTestFiller, initcode: Initcode): +def test_contract_creating_tx(state_test: StateTestFiller, initcode: Initcode): """ Test cases using a contract creating transaction @@ -232,24 +229,21 @@ def test_contract_creating_tx(blockchain_test: BlockchainTestFiller, initcode: I gas_price=10, ) - block = Block(txs=[tx]) - if len(initcode) > MAX_INITCODE_SIZE and eip_3860_active: # Initcode is above the max size, tx inclusion in the block makes # it invalid. post[created_contract_address] = Account.NONEXISTENT - tx.error = "max initcode size exceeded" - block.exception = "max initcode size exceeded" + tx.error = TransactionException.INITCODE_SIZE_EXCEEDED else: # Initcode is at or below the max size, tx inclusion in the block # is ok and the contract is successfully created. post[created_contract_address] = Account(code=Op.STOP) - blockchain_test( + state_test( pre=pre, post=post, - blocks=[block], - genesis_environment=env, + tx=tx, + env=env, tag=f"{initcode.name}", ) @@ -339,13 +333,13 @@ def pre(self) -> Dict[Any, Any]: # noqa: D102 } @pytest.fixture - def tx_error(self, gas_test_case) -> str | None: + def tx_error(self, gas_test_case) -> TransactionException | None: """ Test that the transaction is invalid if too little intrinsic gas is specified, otherwise the tx succeeds. """ if gas_test_case == "too_little_intrinsic_gas": - return "intrinsic gas too low" + return TransactionException.INTRINSIC_GAS_TOO_LOW return None @pytest.fixture @@ -382,14 +376,6 @@ def tx( error=tx_error, ) - @pytest.fixture - def block(self, tx, tx_error) -> Block: - """ - Test that the tx_error is also propagated on the Block for the case of - too little intrinsic gas. - """ - return Block(txs=[tx], exception=tx_error) - @pytest.fixture def post( self, @@ -413,14 +399,14 @@ def post( def test_gas_usage( self, - blockchain_test: BlockchainTestFiller, + state_test: StateTestFiller, gas_test_case: str, initcode: Initcode, exact_intrinsic_gas, exact_execution_gas, env, pre, - block, + tx, post, ): """ @@ -436,11 +422,11 @@ def test_gas_usage( "equivalent to that of 'test_exact_intrinsic_gas'." ) - blockchain_test( + state_test( pre=pre, post=post, - blocks=[block], - genesis_environment=env, + tx=tx, + env=env, tag=f"{initcode.name}_{gas_test_case}", ) @@ -544,11 +530,11 @@ def test_create_opcode_initcode( pre = { TestAddress: Account(balance=1000000000000000000000), - to_address(0x100): Account( + Address(0x100): Account( code=create_code, nonce=1, ), - to_address(0x200): Account( + Address(0x200): Account( code=call_code, nonce=1, ), @@ -558,7 +544,7 @@ def test_create_opcode_initcode( tx = Transaction( nonce=0, - to=to_address(0x200), + to=Address(0x200), data=initcode, gas_limit=10000000, gas_price=10, @@ -577,7 +563,7 @@ def test_create_opcode_initcode( if len(initcode) > MAX_INITCODE_SIZE and eip_3860_active: # Call returns 0 as out of gas s[0]==1 - post[to_address(0x200)] = Account( + post[Address(0x200)] = Account( nonce=1, storage={ 0: 1, @@ -586,7 +572,7 @@ def test_create_opcode_initcode( ) post[created_contract_address] = Account.NONEXISTENT - post[to_address(0x100)] = Account( + post[Address(0x100)] = Account( nonce=1, storage={ 0: 0, @@ -611,7 +597,7 @@ def test_create_opcode_initcode( expected_gas_usage += calculate_initcode_word_cost(len(initcode)) # Call returns 1 as valid initcode length s[0]==1 && s[1]==1 - post[to_address(0x200)] = Account( + post[Address(0x200)] = Account( nonce=1, storage={ 0: 0, @@ -620,7 +606,7 @@ def test_create_opcode_initcode( ) post[created_contract_address] = Account(code=initcode.deploy_code) - post[to_address(0x100)] = Account( + post[Address(0x100)] = Account( nonce=2, storage={ 0: created_contract_address, @@ -632,6 +618,6 @@ def test_create_opcode_initcode( env=env, pre=pre, post=post, - txs=[tx], + tx=tx, tag=f"{initcode.name}_{opcode}", ) diff --git a/tests/shanghai/eip4895_withdrawals/__init__.py b/tests/shanghai/eip4895_withdrawals/__init__.py index c4490269ca..bb4a893dcb 100644 --- a/tests/shanghai/eip4895_withdrawals/__init__.py +++ b/tests/shanghai/eip4895_withdrawals/__init__.py @@ -1,6 +1,5 @@ """ abstract: Tests [EIP-4895: Beacon chain withdrawals](https://eips.ethereum.org/EIPS/eip-4895) - Test cases for [EIP-4895: Beacon chain push withdrawals as operations](https://eips.ethereum.org/EIPS/eip-4895). """ diff --git a/tests/shanghai/eip4895_withdrawals/test_withdrawals.py b/tests/shanghai/eip4895_withdrawals/test_withdrawals.py index 50f479fdb1..9dda997c0e 100644 --- a/tests/shanghai/eip4895_withdrawals/test_withdrawals.py +++ b/tests/shanghai/eip4895_withdrawals/test_withdrawals.py @@ -1,6 +1,5 @@ """ abstract: Tests [EIP-4895: Beacon chain withdrawals](https://eips.ethereum.org/EIPS/eip-4895) - Test cases for [EIP-4895: Beacon chain push withdrawals as operations](https://eips.ethereum.org/EIPS/eip-4895). """ @@ -12,14 +11,15 @@ from ethereum_test_tools import ( Account, + Address, Block, BlockchainTestFiller, + Hash, TestAddress, Transaction, + TransactionException, Withdrawal, compute_create_address, - to_address, - to_hash, ) from ethereum_test_tools.vm.opcode import Opcodes as Op from evm_transition_tool import TransitionTool @@ -64,7 +64,7 @@ def tx(self): # noqa: D102 nonce=0, gas_price=ONE_GWEI, gas_limit=21000, - to=to_address(0x100), + to=Address(0x100), data="0x", ) @@ -82,11 +82,11 @@ def blocks(self, tx: Transaction, withdrawal: Withdrawal, test_case): # noqa: D if test_case == "tx_in_withdrawals_block": return [ Block( - txs=[tx.with_error("intrinsic gas too low: have 0, want 21000")], + txs=[tx.with_error(TransactionException.INSUFFICIENT_ACCOUNT_FUNDS)], withdrawals=[ withdrawal, ], - exception="Transaction without funds", + exception=TransactionException.INSUFFICIENT_ACCOUNT_FUNDS, ) ] if test_case == "tx_after_withdrawals_block": @@ -136,8 +136,8 @@ def test_use_value_in_contract(blockchain_test: BlockchainTestFiller): pre = { TestAddress: Account(balance=1000000000000000000000, nonce=0), - to_address(0x100): Account(balance=0, code=SEND_ONE_GWEI), - to_address(0x200): Account(balance=1), + Address(0x100): Account(balance=0, code=SEND_ONE_GWEI), + Address(0x200): Account(balance=1), } tx = Transaction( # Transaction sent from the `TestAddress`, which has 0 balance at start @@ -145,13 +145,13 @@ def test_use_value_in_contract(blockchain_test: BlockchainTestFiller): value=0, gas_price=10, gas_limit=100000, - to=to_address(0x100), + to=Address(0x100), data="0x", ) withdrawal = Withdrawal( index=0, validator=0, - address=to_address(0x100), + address=Address(0x100), amount=1, ) @@ -165,13 +165,13 @@ def test_use_value_in_contract(blockchain_test: BlockchainTestFiller): ), ] post = { - to_address(0x100): Account( + Address(0x100): Account( storage={ 0x1: 0x0, # Call fails on the first attempt 0x2: 0x1, # Succeeds on the second attempt } ), - to_address(0x200): Account( + Address(0x200): Account( balance=ONE_GWEI + 1, ), } @@ -190,10 +190,10 @@ def test_balance_within_block(blockchain_test: BlockchainTestFiller): ) pre = { TestAddress: Account(balance=1000000000000000000000, nonce=0), - to_address(0x100): Account( + Address(0x100): Account( code=SAVE_BALANCE_ON_BLOCK_NUMBER, ), - to_address(0x200): Account( + Address(0x200): Account( balance=ONE_GWEI, ), } @@ -203,15 +203,15 @@ def test_balance_within_block(blockchain_test: BlockchainTestFiller): Transaction( nonce=0, gas_limit=100000, - to=to_address(0x100), - data=to_hash(0x200), + to=Address(0x100), + data=Hash(0x200), ) ], withdrawals=[ Withdrawal( index=0, validator=0, - address=to_address(0x200), + address=Address(0x200), amount=1, ) ], @@ -221,15 +221,15 @@ def test_balance_within_block(blockchain_test: BlockchainTestFiller): Transaction( nonce=1, gas_limit=100000, - to=to_address(0x100), - data=to_hash(0x200), + to=Address(0x100), + data=Hash(0x200), ) ] ), ] post = { - to_address(0x100): Account( + Address(0x100): Account( storage={ 1: ONE_GWEI, 2: 2 * ONE_GWEI, @@ -251,17 +251,17 @@ class TestMultipleWithdrawalsSameAddress: """ ADDRESSES = [ - to_address(0x0), # Zero address - to_address(0x1), # Pre-compiles - to_address(0x2), - to_address(0x3), - to_address(0x4), - to_address(0x5), - to_address(0x6), - to_address(0x7), - to_address(0x8), - to_address(0x9), - to_address(2**160 - 1), + Address(0x0), # Zero address + Address(0x1), # Pre-compiles + Address(0x2), + Address(0x3), + Address(0x4), + Address(0x5), + Address(0x6), + Address(0x7), + Address(0x8), + Address(0x9), + Address(2**160 - 1), ] @pytest.fixture @@ -343,7 +343,7 @@ def test_many_withdrawals(blockchain_test: BlockchainTestFiller): withdrawals = [] post = {} for i in range(N): - addr = to_address(0x100 * i) + addr = Address(0x100 * i) amount = i * 1 pre[addr] = Account( code=Op.SSTORE(Op.NUMBER, 1), @@ -379,11 +379,11 @@ def test_self_destructing_account(blockchain_test: BlockchainTestFiller): """ pre = { TestAddress: Account(balance=1000000000000000000000, nonce=0), - to_address(0x100): Account( + Address(0x100): Account( code=Op.SELFDESTRUCT(Op.CALLDATALOAD(0)), balance=(100 * ONE_GWEI), ), - to_address(0x200): Account( + Address(0x200): Account( balance=1, ), } @@ -394,14 +394,14 @@ def test_self_destructing_account(blockchain_test: BlockchainTestFiller): nonce=0, gas_price=10, gas_limit=100000, - to=to_address(0x100), - data=to_hash(0x200), + to=Address(0x100), + data=Hash(0x200), ) withdrawal = Withdrawal( index=0, validator=0, - address=to_address(0x100), + address=Address(0x100), amount=(99), ) @@ -411,11 +411,11 @@ def test_self_destructing_account(blockchain_test: BlockchainTestFiller): ) post = { - to_address(0x100): Account( + Address(0x100): Account( code=None, balance=(99 * ONE_GWEI), ), - to_address(0x200): Account( + Address(0x200): Account( code=None, balance=(100 * ONE_GWEI) + 1, ), @@ -487,16 +487,16 @@ def test_no_evm_execution(blockchain_test: BlockchainTestFiller): """ pre = { TestAddress: Account(balance=1000000000000000000000, nonce=0), - to_address(0x100): Account( + Address(0x100): Account( code=Op.SSTORE(Op.NUMBER, 1), ), - to_address(0x200): Account( + Address(0x200): Account( code=Op.SSTORE(Op.NUMBER, 1), ), - to_address(0x300): Account( + Address(0x300): Account( code=Op.SSTORE(Op.NUMBER, 1), ), - to_address(0x400): Account( + Address(0x400): Account( code=Op.SSTORE(Op.NUMBER, 1), ), } @@ -506,25 +506,25 @@ def test_no_evm_execution(blockchain_test: BlockchainTestFiller): Transaction( nonce=0, gas_limit=100000, - to=to_address(0x300), + to=Address(0x300), ), Transaction( nonce=1, gas_limit=100000, - to=to_address(0x400), + to=Address(0x400), ), ], withdrawals=[ Withdrawal( index=0, validator=0, - address=to_address(0x100), + address=Address(0x100), amount=1, ), Withdrawal( index=1, validator=1, - address=to_address(0x200), + address=Address(0x200), amount=1, ), ], @@ -534,25 +534,25 @@ def test_no_evm_execution(blockchain_test: BlockchainTestFiller): Transaction( nonce=2, gas_limit=100000, - to=to_address(0x100), + to=Address(0x100), ), Transaction( nonce=3, gas_limit=100000, - to=to_address(0x200), + to=Address(0x200), ), ], withdrawals=[ Withdrawal( index=0, validator=0, - address=to_address(0x300), + address=Address(0x300), amount=1, ), Withdrawal( index=1, validator=1, - address=to_address(0x400), + address=Address(0x400), amount=1, ), ], @@ -560,10 +560,10 @@ def test_no_evm_execution(blockchain_test: BlockchainTestFiller): ] post = { - to_address(0x100): Account(storage={2: 1}), - to_address(0x200): Account(storage={2: 1}), - to_address(0x300): Account(storage={1: 1}), - to_address(0x400): Account(storage={1: 1}), + Address(0x100): Account(storage={2: 1}), + Address(0x200): Account(storage={2: 1}), + Address(0x300): Account(storage={1: 1}), + Address(0x400): Account(storage={1: 1}), } blockchain_test(pre=pre, post=post, blocks=blocks) @@ -600,7 +600,7 @@ def test_zero_amount( """ pre = { TestAddress: Account(balance=1000000000000000000000, nonce=0), - to_address(0x200): Account( + Address(0x200): Account( code="0x00", balance=0, ), @@ -611,46 +611,46 @@ def test_zero_amount( Withdrawal( index=0, validator=0, - address=to_address(0x100), + address=Address(0x100), amount=0, ), # No value, touched account Withdrawal( index=0, validator=0, - address=to_address(0x200), + address=Address(0x200), amount=0, ), # Withdrawal with value Withdrawal( index=1, validator=0, - address=to_address(0x300), + address=Address(0x300), amount=1, ), # Withdrawal with maximum amount Withdrawal( index=2, validator=0, - address=to_address(0x400), + address=Address(0x400), amount=2**64 - 1, ), ] all_post = { - to_address(0x100): Account.NONEXISTENT, - to_address(0x200): Account(code="0x00", balance=0), - to_address(0x300): Account(balance=ONE_GWEI), - to_address(0x400): Account(balance=(2**64 - 1) * ONE_GWEI), + Address(0x100): Account.NONEXISTENT, + Address(0x200): Account(code="0x00", balance=0), + Address(0x300): Account(balance=ONE_GWEI), + Address(0x400): Account(balance=(2**64 - 1) * ONE_GWEI), } withdrawals: List[Withdrawal] = [] - post: Mapping[str, Account | object] = {} + post: Mapping[Address, Account | object] = {} if test_case == ZeroAmountTestCases.TWO_ZERO: withdrawals = all_withdrawals[0:2] post = { account: all_post[account] for account in post - if account in [to_address(0x100), to_address(0x200)] + if account in [Address(0x100), Address(0x200)] } elif test_case == ZeroAmountTestCases.THREE_ONE_WITH_VALUE: withdrawals = all_withdrawals[0:3] @@ -659,9 +659,9 @@ def test_zero_amount( for account in post if account in [ - to_address(0x100), - to_address(0x200), - to_address(0x300), + Address(0x100), + Address(0x200), + Address(0x300), ] } elif test_case == ZeroAmountTestCases.FOUR_ONE_WITH_MAX: @@ -706,7 +706,7 @@ def test_large_amount(blockchain_test: BlockchainTestFiller): post = {} for i, amount in enumerate(amounts): - addr = to_address(0x100 * (i + 1)) + addr = Address(0x100 * (i + 1)) withdrawals.append( Withdrawal( index=i, @@ -747,7 +747,7 @@ def test_withdrawing_to_precompiles( Withdrawal( index=0, validator=0, - address=to_address(precompile), + address=Address(precompile), amount=amount, ) ] @@ -758,7 +758,7 @@ def test_withdrawing_to_precompiles( Transaction( nonce=0, gas_limit=100000, - to=to_address(precompile), + to=Address(precompile), ), ], ), diff --git a/tox.ini b/tox.ini index 04f8e382bf..a3a8090f76 100644 --- a/tox.ini +++ b/tox.ini @@ -7,6 +7,10 @@ env_list = [main] development_fork = Cancun +[testenv] +package = wheel +wheel_build_env = .pkg + [testenv:framework] description = Run checks on helper libraries and test framework diff --git a/whitelist.txt b/whitelist.txt index 1b4a5fdc83..5707f3e8bf 100644 --- a/whitelist.txt +++ b/whitelist.txt @@ -5,17 +5,22 @@ address address2 alloc api +apis at5 balance base64 basefee +basename bb besu big0 big1 +blobgasfee blockchain BlockchainTest BlockchainTestFiller +BlockchainTests +BlockException blockhash blocknum blocktest @@ -39,6 +44,7 @@ cancun cd chainid changelog +chfast classdict cli2 codeAddr @@ -58,26 +64,32 @@ customizations Customizations danceratopz dao +dataclasses datastructures delitem +Dencun dev devnet difficulty dir dirname discordapp +docstring docstrings dup eip eips EIPs endianness +EngineAPI +enum env eof esbenp eth ethash ethereum +EthereumJS ethereum's evaluatable evm @@ -87,6 +99,7 @@ executables extcodecopy extcodehash extcodesize +filesystem fn fname forkchoice @@ -108,11 +121,17 @@ git's github Github glightbox +globals go-ethereum's +Golang gwei +hardfork hash32 +Hashable hasher +HeaderNonce hexary +HexNumber hexsha homebrew html @@ -185,7 +204,7 @@ pre Pre precompile prepend -prevrandao +PrevRandao programmatically px py @@ -198,6 +217,7 @@ qGpsxSA quickstart radd randao +readme readthedocs reentrancy reentrant @@ -207,6 +227,7 @@ repos returndatacopy returndatasize rlp +rpc runtime sandboxed secp256k1 @@ -225,12 +246,14 @@ squidfunk src stackoverflow StateTest +StateTests StateTestFiller staticcalled stExample str streetsidesoftware subcall +subclasscheck subdirectories subdirectory subgraph @@ -238,14 +261,19 @@ substring sudo t8n tamasfe +testability TestAddress +TestContractCreationGasUsage TestMultipleWithdrawalsSameAddress textwrap +ThreeHrSleep time15k timestamp +todo toml tox Tox +TransactionException trie tstorage tx @@ -291,6 +319,7 @@ autouse basedir callspec collectonly +copyfile copytree dedent dest @@ -311,6 +340,7 @@ makepyfile metafunc modifyitems nodeid +oog optparser originalname parametrized @@ -320,12 +350,14 @@ parametrize parametrizer parametrizers popen +prevrandao pytester pytestmark readline regexes reportinfo ret +rglob ripemd rjust runpytest @@ -334,11 +366,14 @@ subclasses subcommand substring substrings +testdir +tmpdir tryfirst trylast usefixtures writelines xfail +ZeroPaddedHexNumber stop add mul @@ -496,3 +531,10 @@ precompile precompiles deployer 0x +modexp +0x00 +0x10 + +fi +url +gz