diff --git a/.gitignore b/.gitignore index 14b5085c1d..0c6c63da7c 100644 --- a/.gitignore +++ b/.gitignore @@ -61,6 +61,8 @@ site venv-docs/ .pyspelling_en.dict +# cached fixture downloads (consume) +cached_downloads/ # pytest report assets *.html diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index fc3daaf6a3..8facf4f761 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -50,6 +50,11 @@ Test fixtures for use by clients are available for each release on the [Github r ### 🛠️ Framework +- ✨ Adds two `consume` commands [#339](https://github.com/ethereum/execution-spec-tests/pull/339): + + 1. `consume direct` - Execute a test fixture directly against a client using a `blocktest`-like command (currently only geth supported). + 2. `consume rlp` - Execute a test fixture in a hive simulator against a client that imports the test's genesis config and blocks as RLP upon startup. This is a re-write of the [ethereum/consensus](https://github.com/ethereum/hive/tree/master/simulators/ethereum/consensus) Golang simulator. + - ✨ Add Prague to forks ([#419](https://github.com/ethereum/execution-spec-tests/pull/419)). - ✨ Improve handling of the argument passed to `solc --evm-version` when compiling Yul code ([#418](https://github.com/ethereum/execution-spec-tests/pull/418)). - 🐞 Fix `fill -m yul_test` which failed to filter tests that are (dynamically) marked as a yul test ([#418](https://github.com/ethereum/execution-spec-tests/pull/418)). diff --git a/setup.cfg b/setup.cfg index e3bcd23b85..a98dc5c056 100644 --- a/setup.cfg +++ b/setup.cfg @@ -23,9 +23,13 @@ package_dir = python_requires = >=3.10 install_requires = - ethereum@git+https://github.com/ethereum/execution-specs.git + click>=8.1.0,<9 + ethereum@git+https://github.com/ethereum/execution-specs + hive.py@git+https://github.com/danceratopz/hive.py@chore/setup.cfg/move-mypy-deps-to-lint-extras setuptools types-setuptools + PyJWT>=2.3.0,<3 + tenacity>8.2.0,<9 bidict>=0.23,<1 requests>=2.31.0,<3 colorlog>=6.7.0,<7 @@ -47,12 +51,16 @@ ethereum_test_forks = py.typed evm_transition_tool = py.typed +pytest_plugins = + py.typed [options.entry_points] console_scripts = fill = cli.pytest_commands:fill tf = cli.pytest_commands:tf checkfixtures = cli.check_fixtures:check_fixtures + consume = cli.pytest_commands:consume + genindex = cli.gen_index:generate_fixtures_index_cli gentest = cli.gentest:make_test pyspelling_soft_fail = cli.tox_helpers:pyspelling markdownlintcli2_soft_fail = cli.tox_helpers:markdownlint diff --git a/src/cli/gen_index.py b/src/cli/gen_index.py new file mode 100644 index 0000000000..639befe0ab --- /dev/null +++ b/src/cli/gen_index.py @@ -0,0 +1,218 @@ +""" +Generate an index file of all the json fixtures in the specified directory. +""" +import datetime +import json +import os +from pathlib import Path +from typing import List + +import click +import rich +from rich.progress import ( + BarColumn, + Column, + Progress, + TaskProgressColumn, + TextColumn, + TimeElapsedColumn, +) + +from ethereum_test_tools.common.base_types import HexNumber +from ethereum_test_tools.spec.consume.types import IndexFile, TestCaseIndexFile +from ethereum_test_tools.spec.file.types import Fixtures +from evm_transition_tool import FixtureFormats + +from .hasher import HashableItem + + +def count_json_files_exclude_index(start_path: Path) -> int: + """ + Return the number of json files in the specified directory, excluding + index.json files and tests in "blockchain_tests_hive". + """ + json_file_count = sum( + 1 + for file in start_path.rglob("*.json") + if file.name != "index.json" and "blockchain_tests_hive" not in file.parts + ) + return json_file_count + + +def infer_fixture_format_from_path(file: Path) -> FixtureFormats: + """ + Attempt to infer the fixture format from the file path. + """ + if "blockchain_tests_hive" in file.parts: + return FixtureFormats.BLOCKCHAIN_TEST_HIVE + if "blockchain_tests" in file.parts: + return FixtureFormats.BLOCKCHAIN_TEST + if "state_tests" in file.parts: + return FixtureFormats.STATE_TEST + return FixtureFormats.UNSET_TEST_FORMAT + + +@click.command( + help=( + "Generate an index file of all the json fixtures in the specified directory." + "The index file is saved as 'index.json' in the specified directory." + ) +) +@click.option( + "--input", + "-i", + "input_dir", + type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True), + required=True, + help="The input directory", +) +@click.option( + "--disable-infer-format", + "-d", + "disable_infer_format", + is_flag=True, + default=False, + expose_value=True, + help="Don't try to guess the fixture format from the json file's path.", +) +@click.option( + "--quiet", + "-q", + "quiet_mode", + is_flag=True, + default=False, + expose_value=True, + help="Don't show the progress bar while processing fixture files.", +) +@click.option( + "--force", + "-f", + "force_flag", + is_flag=True, + default=False, + expose_value=True, + help="Force re-generation of the index file, even if it already exists.", +) +def generate_fixtures_index_cli( + input_dir: str, quiet_mode: bool, force_flag: bool, disable_infer_format: bool +): + """ + The CLI wrapper to an index of all the fixtures in the specified directory. + """ + generate_fixtures_index( + Path(input_dir), + quiet_mode=quiet_mode, + force_flag=force_flag, + disable_infer_format=disable_infer_format, + ) + + +def generate_fixtures_index( + input_path: Path, + quiet_mode: bool = False, + force_flag: bool = False, + disable_infer_format: bool = False, +): + """ + Generate an index file (index.json) of all the fixtures in the specified + directory. + """ + total_files = 0 + if not os.path.isdir(input_path): # caught by click if using via cli + raise FileNotFoundError(f"The directory {input_path} does not exist.") + if not quiet_mode: + total_files = count_json_files_exclude_index(input_path) + + output_file = Path(f"{input_path}/index.json") + try: + root_hash = HashableItem.from_folder(folder_path=input_path).hash() + except (KeyError, TypeError): + root_hash = b"" # just regenerate a new index file + + if not force_flag and output_file.exists(): + index_data: IndexFile + try: + with open(output_file, "r") as f: + index_data = IndexFile(**json.load(f)) + if index_data.root_hash and index_data.root_hash == HexNumber(root_hash): + if not quiet_mode: + rich.print(f"Index file [bold cyan]{output_file}[/] is up-to-date.") + return + except Exception as e: + rich.print(f"Ignoring exception {e}") + rich.print(f"...generating a new index file [bold cyan]{output_file}[/]") + + filename_display_width = 25 + with Progress( + TextColumn( + f"[bold cyan]{{task.fields[filename]:<{filename_display_width}}}[/]", + justify="left", + table_column=Column(ratio=1), + ), + BarColumn( + complete_style="green3", + finished_style="bold green3", + table_column=Column(ratio=2), + ), + TaskProgressColumn(), + TimeElapsedColumn(), + expand=False, + disable=quiet_mode, + ) as progress: + task_id = progress.add_task("[cyan]Processing files...", total=total_files, filename="...") + + test_cases: List[TestCaseIndexFile] = [] + for file in input_path.rglob("*.json"): + if file.name == "index.json": + continue + if "blockchain_tests_hive" in file.parts: + continue + + try: + fixture_format = None + if not disable_infer_format: + fixture_format = infer_fixture_format_from_path(file) + fixtures = Fixtures.from_file(file, fixture_format=fixture_format) + except Exception as e: + rich.print(f"[red]Error loading fixtures from {file}[/red]") + raise e + + relative_file_path = Path(file).absolute().relative_to(Path(input_path).absolute()) + for fixture_name, fixture in fixtures.items(): + test_cases.append( + TestCaseIndexFile( + id=fixture_name, + json_path=relative_file_path, + fixture_hash=fixture.info.get("hash", None), + fork=fixture.get_fork(), + format=fixture.format, + ) + ) + + display_filename = file.name + if len(display_filename) > filename_display_width: + display_filename = display_filename[: filename_display_width - 3] + "..." + else: + display_filename = display_filename.ljust(filename_display_width) + + progress.update(task_id, advance=1, filename=display_filename) + + progress.update( + task_id, + completed=total_files, + filename="Indexing complete 🦄".ljust(filename_display_width), + ) + + index = IndexFile( + test_cases=test_cases, + root_hash=root_hash, + created_at=datetime.datetime.now(), + test_count=len(test_cases), + ) + + with open(output_file, "w") as f: + f.write(index.model_dump_json(exclude_none=False, indent=2)) + + +if __name__ == "__main__": + generate_fixtures_index_cli() diff --git a/src/cli/hasher.py b/src/cli/hasher.py index 6073899ad2..e724d57fc5 100644 --- a/src/cli/hasher.py +++ b/src/cli/hasher.py @@ -75,12 +75,16 @@ def from_json_file(cls, *, file_path: Path, parents: List[str]) -> "HashableItem with file_path.open("r") as f: data = json.load(f) for key, item in sorted(data.items()): - assert isinstance(item, dict), f"Expected dict, got {type(item)}" - assert "_info" in item, f"Expected _info in {key}" - assert "hash" in item["_info"], f"Expected hash in {key}" - assert isinstance( - item["_info"]["hash"], str - ), f"Expected hash to be a string in {key}, got {type(item['_info']['hash'])}" + if not isinstance(item, dict): + raise TypeError(f"Expected dict, got {type(item)} for {key}") + if "_info" not in item: + raise KeyError(f"Expected '_info' in {key}") + if "hash" not in item["_info"]: + raise KeyError(f"Expected 'hash' in {key}") + if not isinstance(item["_info"]["hash"], str): + raise TypeError( + f"Expected hash to be a string in {key}, got {type(item['_info']['hash'])}" + ) item_hash_bytes = bytes.fromhex(item["_info"]["hash"][2:]) items[key] = cls( type=HashableItemType.TEST, @@ -96,6 +100,8 @@ def from_folder(cls, *, folder_path: Path, parents: List[str] = []) -> "Hashable """ items = {} for file_path in sorted(folder_path.iterdir()): + if file_path.name == "index.json": + continue if file_path.is_file() and file_path.suffix == ".json": item = cls.from_json_file( file_path=file_path, parents=parents + [folder_path.name] diff --git a/src/cli/pytest_commands.py b/src/cli/pytest_commands.py index 4f0cd9ffcd..21bf6971a0 100644 --- a/src/cli/pytest_commands.py +++ b/src/cli/pytest_commands.py @@ -34,8 +34,10 @@ ``` """ +import os import sys -from typing import Any, Callable, List +import warnings +from typing import Any, Callable, List, Literal import click import pytest @@ -105,6 +107,25 @@ def handle_help_flags( return list(pytest_args) +def handle_stdout_flags(args): + """ + If the user has requested to write to stdout, add pytest arguments in order + to suppress pytest's test session header and summary output. + """ + writing_to_stdout = False + if any(arg == "--output=stdout" for arg in args): + writing_to_stdout = True + elif "--output" in args: + output_index = args.index("--output") + if args[output_index + 1] == "stdout": + writing_to_stdout = True + if writing_to_stdout: + if any(arg == "-n" or arg.startswith("-n=") for arg in args): + sys.exit("error: xdist-plugin not supported with --output=stdout (remove -n args).") + args.extend(["-qq", "-s", "--no-html"]) + return args + + @click.command(context_settings=dict(ignore_unknown_options=True)) @common_click_options def fill( @@ -115,6 +136,110 @@ def fill( """ Entry point for the fill command. """ - updated_args = handle_help_flags(pytest_args, help_flag, pytest_help_flag) - result = pytest.main(updated_args) + args = handle_help_flags(pytest_args, help_flag, pytest_help_flag) + args = handle_stdout_flags(args) + result = pytest.main(args) sys.exit(result) + + +def get_hive_flags_from_env(): + """ + Read simulator flags from environment variables and convert them, as best as + possible, into pytest flags. + """ + pytest_args = [] + xdist_workers = os.getenv("HIVE_PARALLELISM") + if xdist_workers is not None: + pytest_args.extend("-n", xdist_workers) + test_pattern = os.getenv("HIVE_TEST_PATTERN") + if test_pattern is not None: + # TODO: Check that the regex is a valid pytest -k "test expression" + pytest_args.extend("-k", test_pattern) + random_seed = os.getenv("HIVE_RANDOM_SEED") + if random_seed is not None: + # TODO: implement random seed + warnings.warning("HIVE_RANDOM_SEED is not yet supported.") + log_level = os.getenv("HIVE_LOGLEVEL") + if log_level is not None: + # TODO add logging within simulators and implement log level via cli + warnings.warning("HIVE_LOG_LEVEL is not yet supported.") + return pytest_args + + +ConsumeCommands = Literal["dirct", "rlp", "engine", "all"] + + +def consume_ini_path(consume_command: ConsumeCommands) -> str: + """ + Get the path to the ini file for the specified consume command. + """ + return f"src/pytest_plugins/consume/ini_files/pytest-consume-{consume_command}.ini" + + +@click.group() +def consume(): + """ + Help clients consume JSON test fixtures. + """ + pass + + +@click.command(context_settings=dict(ignore_unknown_options=True)) +@common_click_options +def consume_direct(pytest_args, help_flag, pytest_help_flag): + """ + Clients consume directly via the `blocktest` interface. + """ + args = handle_help_flags(pytest_args, help_flag, pytest_help_flag) + args += ["-c", consume_ini_path("direct"), "--rootdir", "./"] + if not sys.stdin.isatty(): # the command is receiving input on stdin + args.extend(["-s", "--input=stdin"]) + pytest.main(args) + + +@click.command(context_settings=dict(ignore_unknown_options=True)) +@common_click_options +def consume_via_rlp(pytest_args, help_flag, pytest_help_flag): + """ + Clients consume RLP-encoded blocks on startup. + """ + args = handle_help_flags(pytest_args, help_flag, pytest_help_flag) + args += ["-c", consume_ini_path("rlp"), "--rootdir", "./"] + args += get_hive_flags_from_env() + if not sys.stdin.isatty(): # the command is receiving input on stdin + args.extend(["-s", "--input=stdin"]) + pytest.main(args) + + +@click.command(context_settings=dict(ignore_unknown_options=True)) +@common_click_options +def consume_via_engine_api(pytest_args, help_flag, pytest_help_flag): + """ + Clients consume via the Engine API. + """ + args = handle_help_flags(pytest_args, help_flag, pytest_help_flag) + args += ["-c", consume_ini_path("engine"), "--rootdir", "./"] + args += get_hive_flags_from_env() + if not sys.stdin.isatty(): # the command is receiving input on stdin + args.extend(["-s", "--input=stdin"]) + pytest.main(args) + + +@click.command(context_settings=dict(ignore_unknown_options=True)) +@common_click_options +def consume_all(pytest_args, help_flag, pytest_help_flag): + """ + Clients consume via all available methods (direct, rlp, engine). + """ + args = handle_help_flags(pytest_args, help_flag, pytest_help_flag) + args += ["-c", consume_ini_path("all"), "--rootdir", "./"] + args += get_hive_flags_from_env() + if not sys.stdin.isatty(): # the command is receiving input on stdin + args.extend(["-s", "--input=stdin"]) + pytest.main(args) + + +consume.add_command(consume_all, name="all") +consume.add_command(consume_direct, name="direct") +consume.add_command(consume_via_rlp, name="rlp") +consume.add_command(consume_via_engine_api, name="engine") diff --git a/src/ethereum_test_tools/spec/base/base_test.py b/src/ethereum_test_tools/spec/base/base_test.py index 911f38fe4e..4facab78ff 100644 --- a/src/ethereum_test_tools/spec/base/base_test.py +++ b/src/ethereum_test_tools/spec/base/base_test.py @@ -111,6 +111,12 @@ def fill_info( if ref_spec is not None: ref_spec.write_info(self.info) + def get_fork(self) -> str: + """ + Returns the fork of the fixture as a string. + """ + raise NotImplementedError + class BaseTest(BaseModel): """ diff --git a/src/ethereum_test_tools/spec/blockchain/types.py b/src/ethereum_test_tools/spec/blockchain/types.py index 8b99d1ab68..a67f9588af 100644 --- a/src/ethereum_test_tools/spec/blockchain/types.py +++ b/src/ethereum_test_tools/spec/blockchain/types.py @@ -8,7 +8,7 @@ from ethereum import rlp as eth_rlp from ethereum.base_types import Uint from ethereum.crypto.hash import keccak256 -from pydantic import ConfigDict, Field, PlainSerializer, computed_field +from pydantic import AliasChoices, ConfigDict, Field, PlainSerializer, computed_field from ethereum_test_forks import Fork from evm_transition_tool import FixtureFormats @@ -138,11 +138,19 @@ class FixtureHeader(CamelModel): parent_hash: Hash ommers_hash: Hash = Field(Hash(EmptyOmmersRoot), alias="uncleHash") - fee_recipient: Address = Field(..., alias="coinbase") + fee_recipient: Address = Field( + ..., alias="coinbase", validation_alias=AliasChoices("coinbase", "miner") + ) state_root: Hash - transactions_trie: Hash - receipts_root: Hash = Field(..., alias="receiptTrie") - logs_bloom: Bloom = Field(..., alias="bloom") + transactions_trie: Hash = Field( + validation_alias=AliasChoices("transactionsTrie", "transactionsRoot") + ) + receipts_root: Hash = Field( + ..., alias="receiptTrie", validation_alias=AliasChoices("receiptTrie", "receiptsRoot") + ) + logs_bloom: Bloom = Field( + ..., alias="bloom", validation_alias=AliasChoices("bloom", "logsBloom") + ) difficulty: ZeroPaddedHexNumber = ZeroPaddedHexNumber(0) number: ZeroPaddedHexNumber gas_limit: ZeroPaddedHexNumber @@ -552,6 +560,12 @@ class FixtureCommon(BaseFixture): pre: Alloc post_state: Alloc + def get_fork(self) -> str: + """ + Returns the fork of the fixture as a string. + """ + return self.fork + class Fixture(FixtureCommon): """ diff --git a/src/ethereum_test_tools/spec/consume/__init__.py b/src/ethereum_test_tools/spec/consume/__init__.py new file mode 100644 index 0000000000..c6378cbf8b --- /dev/null +++ b/src/ethereum_test_tools/spec/consume/__init__.py @@ -0,0 +1,3 @@ +""" +Defines pydantic models used by the consume commands. +""" diff --git a/src/ethereum_test_tools/spec/consume/types.py b/src/ethereum_test_tools/spec/consume/types.py new file mode 100644 index 0000000000..5ad039be23 --- /dev/null +++ b/src/ethereum_test_tools/spec/consume/types.py @@ -0,0 +1,152 @@ +""" +Defines models for index files and consume test cases. +""" + +import datetime +import json +from pathlib import Path +from typing import List, TextIO + +from pydantic import BaseModel, RootModel + +from evm_transition_tool import FixtureFormats + +from ...common.base_types import HexNumber +from ..blockchain.types import Fixture as BlockchainFixture +from ..file.types import Fixtures +from ..state.types import Fixture as StateFixture + + +class TestCaseBase(BaseModel): + """ + Base model for a test case used in EEST consume commands. + """ + + id: str + fixture_hash: HexNumber | None + fork: str + format: FixtureFormats + __test__ = False # stop pytest from collecting this class as a test + + +class TestCaseStream(TestCaseBase): + """ + The test case model used to load test cases from a stream (stdin). + """ + + fixture: StateFixture | BlockchainFixture + __test__ = False # stop pytest from collecting this class as a test + + +class TestCaseIndexFile(TestCaseBase): + """ + The test case model used to save/load test cases to/from an index file. + """ + + json_path: Path + __test__ = False # stop pytest from collecting this class as a test + + # TODO: add pytest marks + """ + ConsumerTypes = Literal["all", "direct", "rlp", "engine"] + @classmethod + def _marks_default(cls): + return {consumer_type: [] for consumer_type in get_args(ConsumerTypes)} + marks: Mapping[ConsumerTypes, List[pytest.MarkDecorator]] = field( + default_factory=lambda: TestCase._marks_default() + ) + """ + + +class IndexFile(BaseModel): + """ + The model definition used for fixture index files. + """ + + root_hash: HexNumber | None + created_at: datetime.datetime + test_count: int + test_cases: List[TestCaseIndexFile] + + +class TestCases(RootModel): + """ + Root model defining a list test cases used in consume commands. + """ + + root: List[TestCaseIndexFile] | List[TestCaseStream] + __test__ = False # stop pytest from collecting this class as a test + + def __len__(self): + """Return the number of test cases in the root list.""" + return len(self.root) + + def __getitem__(self, position): + """Retrieve a test case by its index.""" + return self.root[position] + + def __setitem__(self, position, value): + """Set a test case at a particular index.""" + self.root[position] = value + + def __delitem__(self, position): + """Remove a test case at a particular index.""" + del self.root[position] + + def append(self, item): + """Append a test case to the root list.""" + self.root.append(item) + + def insert(self, position, value): + """Insert a test case at a given position.""" + self.root.insert(position, value) + + def remove(self, value): + """Remove a test case from the root list.""" + self.root.remove(value) + + def pop(self, position=-1): + """Remove and return a test case at the given position.""" + return self.root.pop(position) + + def clear(self): + """Remove all items from the root list.""" + self.root.clear() + + def __iter__(self): + """Return an iterator for the root list.""" + return iter(self.root) + + def __repr__(self): + """Return a string representation of the TestCases object.""" + return f"{self.__class__.__name__}(root={self.root})" + + @classmethod + def from_stream(cls, fd: TextIO) -> "TestCases": + """ + Create a TestCases object from a stream. + """ + fixtures = Fixtures.from_json_data(json.load(fd)) + test_cases = [] + for fixture_name, fixture in fixtures.items(): + if fixture.format == FixtureFormats.BLOCKCHAIN_TEST_HIVE: + print("Skipping hive fixture", fixture_name) + test_cases.append( + TestCaseStream( + id=fixture_name, + fixture_hash=fixture.hash, + fork=fixture.get_fork(), + format=fixture.format, + fixture=fixture, + ) + ) + return cls(root=test_cases) + + @classmethod + def from_index_file(cls, index_file: Path) -> "TestCases": + """ + Create a TestCases object from an index file. + """ + with open(index_file, "r") as fd: + index = IndexFile.model_validate_json(fd.read()) + return cls(root=index.test_cases) diff --git a/src/ethereum_test_tools/spec/file/types.py b/src/ethereum_test_tools/spec/file/types.py index 3b3622211c..608d94ab09 100644 --- a/src/ethereum_test_tools/spec/file/types.py +++ b/src/ethereum_test_tools/spec/file/types.py @@ -109,7 +109,7 @@ def from_json_data( FixtureFormats.STATE_TEST.value: StateFixtures, } - if fixture_format is not None: + if fixture_format not in [None, "unset_test_format", FixtureFormats.UNSET_TEST_FORMAT]: if fixture_format not in model_mapping: raise TypeError(f"Unsupported fixture format: {fixture_format}") model_class = model_mapping[fixture_format] diff --git a/src/ethereum_test_tools/spec/fixture_collector.py b/src/ethereum_test_tools/spec/fixture_collector.py index d2ddea63d0..4b710d6912 100644 --- a/src/ethereum_test_tools/spec/fixture_collector.py +++ b/src/ethereum_test_tools/spec/fixture_collector.py @@ -3,14 +3,17 @@ fixtures. """ +import json import os import re +import sys from dataclasses import dataclass, field from pathlib import Path from typing import Dict, Literal, Optional, Tuple from evm_transition_tool import FixtureFormats, TransitionTool +from ..common.json import to_json from .base.base_test import BaseFixture from .file.types import Fixtures @@ -150,6 +153,12 @@ def dump_fixtures(self) -> None: """ Dumps all collected fixtures to their respective files. """ + if self.output_dir == "stdout": + combined_fixtures = { + k: to_json(v) for fixture in self.all_fixtures.values() for k, v in fixture.items() + } + json.dump(combined_fixtures, sys.stdout, indent=4) + return os.makedirs(self.output_dir, exist_ok=True) for fixture_path, fixtures in self.all_fixtures.items(): os.makedirs(fixture_path.parent, exist_ok=True) @@ -167,7 +176,10 @@ def verify_fixture_files(self, evm_fixture_verification: TransitionTool) -> None info = self.json_path_to_test_item[fixture_path] verify_fixtures_dump_dir = self._get_verify_fixtures_dump_dir(info) evm_fixture_verification.verify_fixture( - fixture.format, fixture_path, verify_fixtures_dump_dir + fixture.format, + fixture_path, + fixture_name=None, + debug_output_path=verify_fixtures_dump_dir, ) def _get_verify_fixtures_dump_dir( diff --git a/src/ethereum_test_tools/spec/state/types.py b/src/ethereum_test_tools/spec/state/types.py index 71205546aa..bd4de25974 100644 --- a/src/ethereum_test_tools/spec/state/types.py +++ b/src/ethereum_test_tools/spec/state/types.py @@ -96,3 +96,11 @@ class Fixture(BaseFixture): post: Mapping[str, List[FixtureForkPost]] format: ClassVar[FixtureFormats] = FixtureFormats.STATE_TEST + + def get_fork(self) -> str: + """ + Returns the fork of the fixture as a string. + """ + forks = list(self.post.keys()) + assert len(forks) == 1, "Expected state test fixture with single fork" + return forks[0] diff --git a/src/evm_transition_tool/execution_specs.py b/src/evm_transition_tool/execution_specs.py index b7d06f430f..ccffb92f76 100644 --- a/src/evm_transition_tool/execution_specs.py +++ b/src/evm_transition_tool/execution_specs.py @@ -11,6 +11,7 @@ from ethereum_test_forks import Constantinople, ConstantinopleFix, Fork from .geth import GethTransitionTool +from .transition_tool import FixtureFormats UNSUPPORTED_FORKS = ( Constantinople, @@ -99,3 +100,29 @@ def is_fork_supported(self, fork: Fork) -> bool: Currently, ethereum-spec-evm provides no way to determine supported forks. """ return fork not in UNSUPPORTED_FORKS + + def get_blocktest_help(self) -> str: + """ + Return the help string for the blocktest subcommand. + """ + raise NotImplementedError( + "The `blocktest` command is not supported by the ethereum-spec-evm. " + "Use geth's evm tool." + ) + + def verify_fixture( + self, + fixture_format: FixtureFormats, + fixture_path: Path, + fixture_name: Optional[str] = None, + debug_output_path: Optional[Path] = None, + ): + """ + Executes `evm [state|block]test` to verify the fixture at `fixture_path`. + + Currently only implemented by geth's evm. + """ + raise NotImplementedError( + "The `verify_fixture()` function is not supported by the ethereum-spec-evm. " + "Use geth's evm tool." + ) diff --git a/src/evm_transition_tool/geth.py b/src/evm_transition_tool/geth.py index 0ad7efd32f..b4e8ef5ad7 100644 --- a/src/evm_transition_tool/geth.py +++ b/src/evm_transition_tool/geth.py @@ -10,8 +10,6 @@ from re import compile from typing import Optional -import pytest - from ethereum_test_forks import Fork from .transition_tool import FixtureFormats, TransitionTool, dump_files_to_directory @@ -56,19 +54,25 @@ def is_fork_supported(self, fork: Fork) -> bool: """ return fork.transition_tool_name() in self.help_string - def process_statetest_result(self, result: str): + def get_blocktest_help(self) -> str: """ - Process the result of a `evm statetest` to parse as JSON and raise if any test failed. + Return the help string for the blocktest subcommand. """ - result_json = json.loads(result) - if not isinstance(result_json, list): - raise Exception(f"Unexpected result from evm statetest: {result_json}") - for test_result in result_json: - if not test_result["pass"]: - pytest.fail(f"Test failed: {test_result['name']}. Error: {test_result['error']}") + args = [str(self.binary), "blocktest", "--help"] + try: + result = subprocess.run(args, capture_output=True, text=True) + except subprocess.CalledProcessError as e: + raise Exception("evm process unexpectedly returned a non-zero status code: " f"{e}.") + except Exception as e: + raise Exception(f"Unexpected exception calling evm tool: {e}.") + return result.stdout def verify_fixture( - self, fixture_format: FixtureFormats, fixture_path: Path, debug_output_path: Optional[Path] + self, + fixture_format: FixtureFormats, + fixture_path: Path, + fixture_name: Optional[str] = None, + debug_output_path: Optional[Path] = None, ): """ Executes `evm [state|block]test` to verify the fixture at `fixture_path`. @@ -87,6 +91,10 @@ def verify_fixture( else: raise Exception(f"Invalid test fixture format: {fixture_format}") + if fixture_name and fixture_format == FixtureFormats.BLOCKCHAIN_TEST: + assert isinstance(fixture_name, str), "fixture_name must be a string" + command.append("--run") + command.append(fixture_name) command.append(str(fixture_path)) result = subprocess.run( @@ -95,12 +103,8 @@ def verify_fixture( stderr=subprocess.PIPE, ) - if FixtureFormats.is_state_test(fixture_format): - self.process_statetest_result(result.stdout.decode()) - if debug_output_path: debug_fixture_path = debug_output_path / "fixtures.json" - shutil.copyfile(fixture_path, debug_fixture_path) # Use the local copy of the fixture in the debug directory verify_fixtures_call = " ".join(command[:-1]) + f" {debug_fixture_path}" verify_fixtures_script = textwrap.dedent( @@ -119,9 +123,17 @@ def verify_fixture( "verify_fixtures.sh+x": verify_fixtures_script, }, ) + shutil.copyfile(fixture_path, debug_fixture_path) if result.returncode != 0: raise Exception( - f"Failed to verify fixture via: '{' '.join(command)}'. " - f"Error: '{result.stderr.decode()}'" + f"EVM test failed.\n{' '.join(command)}\n\n Error:\n{result.stderr.decode()}" ) + + if FixtureFormats.is_state_test(fixture_format): + result_json = json.loads(result.stdout.decode()) + if not isinstance(result_json, list): + raise Exception(f"Unexpected result from evm statetest: {result_json}") + else: + result_json = [] # there is no parseable format for blocktest output + return result_json diff --git a/src/evm_transition_tool/transition_tool.py b/src/evm_transition_tool/transition_tool.py index 5173a55109..9b718106f8 100644 --- a/src/evm_transition_tool/transition_tool.py +++ b/src/evm_transition_tool/transition_tool.py @@ -583,13 +583,17 @@ def evaluate( ) def verify_fixture( - self, fixture_format: FixtureFormats, fixture_path: Path, debug_output_path: Optional[Path] + self, + fixture_format: FixtureFormats, + fixture_path: Path, + fixture_name: Optional[str] = None, + debug_output_path: Optional[Path] = None, ): """ Executes `evm [state|block]test` to verify the fixture at `fixture_path`. Currently only implemented by geth's evm. """ - raise Exception( + raise NotImplementedError( "The `verify_fixture()` function is not supported by this tool. Use geth's evm tool." ) diff --git a/src/pytest_plugins/consume/__init__.py b/src/pytest_plugins/consume/__init__.py new file mode 100644 index 0000000000..18fbc2c630 --- /dev/null +++ b/src/pytest_plugins/consume/__init__.py @@ -0,0 +1,3 @@ +""" +Pytest plugins for consume commands. +""" diff --git a/src/pytest_plugins/consume/consume.py b/src/pytest_plugins/consume/consume.py new file mode 100644 index 0000000000..c53615a48d --- /dev/null +++ b/src/pytest_plugins/consume/consume.py @@ -0,0 +1,221 @@ +""" +A pytest plugin providing common functionality for consuming test fixtures. +""" + +import os +import sys +import tarfile +from pathlib import Path +from typing import Literal, Union +from urllib.parse import urlparse + +import pytest +import requests +import rich + +from cli.gen_index import generate_fixtures_index +from ethereum_test_tools.spec.consume.types import TestCases +from evm_transition_tool import FixtureFormats + +cached_downloads_directory = Path("./cached_downloads") + +JsonSource = Union[Path, Literal["stdin"]] + + +def default_input_directory() -> str: + """ + The default directory to consume generated test fixtures from. Defined as a + function to allow for easier testing. + """ + return "./fixtures" + + +def default_html_report_filename() -> str: + """ + The default file to store the generated HTML test report. Defined as a + function to allow for easier testing. + """ + return "report_consume.html" + + +def is_url(string: str) -> bool: + """ + Check if a string is a remote URL. + """ + result = urlparse(string) + return all([result.scheme, result.netloc]) + + +def download_and_extract(url: str, base_directory: Path) -> Path: + """ + Download the URL and extract it locally if it hasn't already been downloaded. + """ + parsed_url = urlparse(url) + filename = Path(parsed_url.path).name + version = Path(parsed_url.path).parts[-2] + extract_to = base_directory / version / filename.removesuffix(".tar.gz") + + if extract_to.exists(): + # skip download if the archive has already been downloaded + return extract_to + + extract_to.mkdir(parents=True, exist_ok=False) + response = requests.get(url) + response.raise_for_status() + + archive_path = extract_to / filename + with open(archive_path, "wb") as file: + file.write(response.content) + + with tarfile.open(archive_path, "r:gz") as tar: + tar.extractall(path=extract_to) + + return extract_to + + +def pytest_addoption(parser): # noqa: D103 + consume_group = parser.getgroup( + "consume", "Arguments related to consuming fixtures via a client" + ) + consume_group.addoption( + "--input", + action="store", + dest="fixture_source", + default=default_input_directory(), + help=( + "A URL or local directory specifying the JSON test fixtures. Default: " + f"'{default_input_directory()}'." + ), + ) + consume_group.addoption( + "--no-html", + action="store_true", + dest="disable_html", + default=False, + help=( + "Don't generate an HTML test report (in the output directory). " + "The --html flag can be used to specify a different path." + ), + ) + + +@pytest.hookimpl(tryfirst=True) +def pytest_configure(config): # noqa: D103 + """ + Pytest hook called after command line options have been parsed and before + test collection begins. + + `@pytest.hookimpl(tryfirst=True)` is applied to ensure that this hook is + called before the pytest-html plugin's pytest_configure to ensure that + it uses the modified `htmlpath` option. + """ + input_source = config.getoption("fixture_source") + if input_source == "stdin": + config.test_cases = TestCases.from_stream(sys.stdin) + return + + if is_url(input_source): + cached_downloads_directory.mkdir(parents=True, exist_ok=True) + input_source = download_and_extract(input_source, cached_downloads_directory) + config.option.fixture_source = input_source + + input_source = Path(input_source) + if not input_source.exists(): + pytest.exit(f"Specified fixture directory '{input_source}' does not exist.") + if not any(input_source.glob("**/*.json")): + pytest.exit( + f"Specified fixture directory '{input_source}' does not contain any JSON files." + ) + + index_file = input_source / "index.json" + if not index_file.exists(): + rich.print(f"Generating index file [bold cyan]{index_file}[/]...") + generate_fixtures_index( + Path(input_source), quiet_mode=False, force_flag=False, disable_infer_format=False + ) + config.test_cases = TestCases.from_index_file(Path(input_source) / "index.json") + + if config.option.collectonly: + return + if not config.getoption("disable_html") and config.getoption("htmlpath") is None: + # generate an html report by default, unless explicitly disabled + config.option.htmlpath = os.path.join( + config.getoption("fixture_source"), default_html_report_filename() + ) + + +def pytest_html_report_title(report): + """ + Set the HTML report title (pytest-html plugin). + """ + report.title = "Consume Test Report" + + +def pytest_report_header(config): # noqa: D103 + input_source = config.getoption("fixture_source") + return f"fixtures: {input_source}" + + +@pytest.fixture(scope="function") +def fixture_source(request) -> JsonSource: # noqa: D103 + return request.config.getoption("fixture_source") + + +def pytest_generate_tests(metafunc): + """ + Generate test cases for every test fixture in all the JSON fixture files + within the specified fixtures directory, or read from stdin if the directory is 'stdin'. + """ + test_cases = metafunc.config.test_cases + + if "test_blocktest" in metafunc.function.__name__: + pytest_params = [ + pytest.param( + test_case, + id=test_case.id, + # marks=test_case.marks["all"] + test_case.marks["direct"], + ) + for test_case in test_cases + if test_case.format == FixtureFormats.BLOCKCHAIN_TEST + ] + metafunc.parametrize("test_case", pytest_params) + + if "test_statetest" in metafunc.function.__name__: + pytest_params = [ + pytest.param( + test_case, + id=test_case.id, + # marks=test_case.marks["all"] + test_case.marks["direct"], + ) + for test_case in test_cases + if test_case.format == FixtureFormats.STATE_TEST + ] + metafunc.parametrize("test_case", pytest_params) + + if "test_via_rlp" in metafunc.function.__name__: + pytest_params = [ + pytest.param( + test_case, + id=test_case.id, + # marks=test_case.marks["all"] + test_case.marks["rlp"], + ) + for test_case in test_cases + if test_case.format == FixtureFormats.BLOCKCHAIN_TEST + ] + metafunc.parametrize("test_case", pytest_params) + + if "test_via_engine" in metafunc.function.__name__: + pytest_params = [ + pytest.param( + test_case, + id=test_case.id, + # marks=test_case.marks["all"] + test_case.marks["engine"], + ) + for test_case in test_cases + if test_case.format == FixtureFormats.BLOCKCHAIN_TEST_HIVE + ] + metafunc.parametrize("test_case", pytest_params) + + if "client_type" in metafunc.fixturenames: + client_ids = [client.name for client in metafunc.config.hive_execution_clients] + metafunc.parametrize("client_type", metafunc.config.hive_execution_clients, ids=client_ids) diff --git a/src/pytest_plugins/consume/direct.py b/src/pytest_plugins/consume/direct.py new file mode 100644 index 0000000000..45f7e5305a --- /dev/null +++ b/src/pytest_plugins/consume/direct.py @@ -0,0 +1,130 @@ +""" +A pytest plugin that configures the consume command to act as a test runner +for "direct" client fixture consumer interfaces. + +For example, via go-ethereum's `evm blocktest` or `evm statetest` commands. +""" + +import json +import tempfile +from pathlib import Path +from typing import Generator, Optional + +import pytest + +from ethereum_test_tools.common.json import to_json +from ethereum_test_tools.spec.consume.types import TestCaseIndexFile, TestCaseStream +from ethereum_test_tools.spec.file.types import Fixtures +from evm_transition_tool import TransitionTool + + +def pytest_addoption(parser): # noqa: D103 + consume_group = parser.getgroup( + "consume_direct", "Arguments related to consuming fixtures via a client" + ) + + consume_group.addoption( + "--evm-bin", + action="store", + dest="evm_bin", + type=Path, + default=None, + help=( + "Path to an evm executable that provides `blocktest`. Default: First 'evm' entry in " + "PATH." + ), + ) + consume_group.addoption( + "--traces", + action="store_true", + dest="evm_collect_traces", + default=False, + help="Collect traces of the execution information from the transition tool.", + ) + debug_group = parser.getgroup("debug", "Arguments defining debug behavior") + debug_group.addoption( + "--evm-dump-dir", + action="store", + dest="base_dump_dir", + type=Path, + default=None, + help="Path to dump the transition tool debug output.", + ) + + +def pytest_configure(config): # noqa: D103 + evm = TransitionTool.from_binary_path( + binary_path=config.getoption("evm_bin"), + # TODO: The verify_fixture() method doesn't currently use this option. + trace=config.getoption("evm_collect_traces"), + ) + try: + blocktest_help_string = evm.get_blocktest_help() + except NotImplementedError as e: + pytest.exit(str(e)) + config.evm = evm + config.evm_run_single_test = "--run" in blocktest_help_string + + +@pytest.fixture(autouse=True, scope="session") +def evm(request) -> Generator[TransitionTool, None, None]: + """ + Returns the interface to the evm binary that will consume tests. + """ + yield request.config.evm + request.config.evm.shutdown() + + +@pytest.fixture(scope="session") +def evm_run_single_test(request) -> bool: + """ + Helper specifying whether to execute one test per fixture in each json file. + """ + return request.config.evm_run_single_test + + +@pytest.fixture(scope="function") +def test_dump_dir( + request, fixture_path: Path, fixture_name: str, evm_run_single_test: bool +) -> Optional[Path]: + """ + The directory to write evm debug output to. + """ + base_dump_dir = request.config.getoption("base_dump_dir") + if not base_dump_dir: + return None + if evm_run_single_test: + if len(fixture_name) > 142: + # ensure file name is not too long for eCryptFS + fixture_name = fixture_name[:70] + "..." + fixture_name[-70:] + return base_dump_dir / fixture_path.stem / fixture_name.replace("/", "-") + return base_dump_dir / fixture_path.stem + + +@pytest.fixture +def fixture_path(test_case: TestCaseIndexFile | TestCaseStream, fixture_source): + """ + The path to the current JSON fixture file. + + If the fixture source is stdin, the fixture is written to a temporary json file. + """ + if fixture_source == "stdin": + assert isinstance(test_case, TestCaseStream) + temp_dir = tempfile.TemporaryDirectory() + fixture_path = Path(temp_dir.name) / f"{test_case.id.replace('/','_')}.json" + fixtures = Fixtures({test_case.id: test_case.fixture}) + with open(fixture_path, "w") as f: + json.dump(to_json(fixtures), f, indent=4) + yield fixture_path + temp_dir.cleanup() + else: + assert isinstance(test_case, TestCaseIndexFile) + yield fixture_source / test_case.json_path + + +@pytest.fixture(scope="function") +def fixture_name(test_case: TestCaseIndexFile | TestCaseStream): + """ + The name of the current fixture. + """ + return test_case.id diff --git a/src/pytest_plugins/consume/engine.py b/src/pytest_plugins/consume/engine.py new file mode 100644 index 0000000000..bb85d035e1 --- /dev/null +++ b/src/pytest_plugins/consume/engine.py @@ -0,0 +1,24 @@ +""" +A hive simulator that executes blocks against clients using the `engine_newPayloadVX` method from +the Engine API, verifying the appropriate VALID/INVALID responses. + +Implemented using the pytest framework as a pytest plugin. +""" + +import pytest + + +@pytest.fixture(scope="session") +def test_suite_name() -> str: + """ + The name of the hive test suite used in this simulator. + """ + return "EEST Consume Blocks via Engine API" + + +@pytest.fixture(scope="session") +def test_suite_description() -> str: + """ + The description of the hive test suite used in this simulator. + """ + return "Execute blockchain tests by against clients using the `engine_newPayloadVX` method." diff --git a/src/pytest_plugins/consume/hive_ruleset.py b/src/pytest_plugins/consume/hive_ruleset.py new file mode 100644 index 0000000000..2f3af7ff4c --- /dev/null +++ b/src/pytest_plugins/consume/hive_ruleset.py @@ -0,0 +1,309 @@ +""" +Network/fork rules for Hive, taken verbatim from the consensus simulator. +""" + +ruleset = { + "Frontier": { + "HIVE_FORK_HOMESTEAD": 2000, + "HIVE_FORK_DAO_BLOCK": 2000, + "HIVE_FORK_TANGERINE": 2000, + "HIVE_FORK_SPURIOUS": 2000, + "HIVE_FORK_BYZANTIUM": 2000, + "HIVE_FORK_CONSTANTINOPLE": 2000, + "HIVE_FORK_PETERSBURG": 2000, + "HIVE_FORK_ISTANBUL": 2000, + "HIVE_FORK_BERLIN": 2000, + "HIVE_FORK_LONDON": 2000, + }, + "Homestead": { + "HIVE_FORK_HOMESTEAD": 0, + "HIVE_FORK_DAO_BLOCK": 2000, + "HIVE_FORK_TANGERINE": 2000, + "HIVE_FORK_SPURIOUS": 2000, + "HIVE_FORK_BYZANTIUM": 2000, + "HIVE_FORK_CONSTANTINOPLE": 2000, + "HIVE_FORK_PETERSBURG": 2000, + "HIVE_FORK_ISTANBUL": 2000, + "HIVE_FORK_BERLIN": 2000, + "HIVE_FORK_LONDON": 2000, + }, + "EIP150": { + "HIVE_FORK_HOMESTEAD": 0, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 2000, + "HIVE_FORK_BYZANTIUM": 2000, + "HIVE_FORK_CONSTANTINOPLE": 2000, + "HIVE_FORK_PETERSBURG": 2000, + "HIVE_FORK_ISTANBUL": 2000, + "HIVE_FORK_BERLIN": 2000, + "HIVE_FORK_LONDON": 2000, + }, + "EIP158": { + "HIVE_FORK_HOMESTEAD": 0, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 0, + "HIVE_FORK_BYZANTIUM": 2000, + "HIVE_FORK_CONSTANTINOPLE": 2000, + "HIVE_FORK_PETERSBURG": 2000, + "HIVE_FORK_ISTANBUL": 2000, + "HIVE_FORK_BERLIN": 2000, + "HIVE_FORK_LONDON": 2000, + }, + "Byzantium": { + "HIVE_FORK_HOMESTEAD": 0, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 0, + "HIVE_FORK_BYZANTIUM": 0, + "HIVE_FORK_CONSTANTINOPLE": 2000, + "HIVE_FORK_PETERSBURG": 2000, + "HIVE_FORK_ISTANBUL": 2000, + "HIVE_FORK_BERLIN": 2000, + "HIVE_FORK_LONDON": 2000, + }, + "Constantinople": { + "HIVE_FORK_HOMESTEAD": 0, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 0, + "HIVE_FORK_BYZANTIUM": 0, + "HIVE_FORK_CONSTANTINOPLE": 0, + "HIVE_FORK_PETERSBURG": 2000, + "HIVE_FORK_ISTANBUL": 2000, + "HIVE_FORK_BERLIN": 2000, + "HIVE_FORK_LONDON": 2000, + }, + "ConstantinopleFix": { + "HIVE_FORK_HOMESTEAD": 0, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 0, + "HIVE_FORK_BYZANTIUM": 0, + "HIVE_FORK_CONSTANTINOPLE": 0, + "HIVE_FORK_PETERSBURG": 0, + "HIVE_FORK_ISTANBUL": 2000, + "HIVE_FORK_BERLIN": 2000, + "HIVE_FORK_LONDON": 2000, + }, + "Istanbul": { + "HIVE_FORK_HOMESTEAD": 0, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 0, + "HIVE_FORK_BYZANTIUM": 0, + "HIVE_FORK_CONSTANTINOPLE": 0, + "HIVE_FORK_PETERSBURG": 0, + "HIVE_FORK_ISTANBUL": 0, + "HIVE_FORK_BERLIN": 2000, + "HIVE_FORK_LONDON": 2000, + }, + "Berlin": { + "HIVE_FORK_HOMESTEAD": 0, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 0, + "HIVE_FORK_BYZANTIUM": 0, + "HIVE_FORK_CONSTANTINOPLE": 0, + "HIVE_FORK_PETERSBURG": 0, + "HIVE_FORK_ISTANBUL": 0, + "HIVE_FORK_BERLIN": 0, + "HIVE_FORK_LONDON": 2000, + }, + "FrontierToHomesteadAt5": { + "HIVE_FORK_HOMESTEAD": 5, + "HIVE_FORK_DAO_BLOCK": 2000, + "HIVE_FORK_TANGERINE": 2000, + "HIVE_FORK_SPURIOUS": 2000, + "HIVE_FORK_BYZANTIUM": 2000, + "HIVE_FORK_CONSTANTINOPLE": 2000, + "HIVE_FORK_PETERSBURG": 2000, + "HIVE_FORK_ISTANBUL": 2000, + "HIVE_FORK_BERLIN": 2000, + "HIVE_FORK_LONDON": 2000, + }, + "HomesteadToEIP150At5": { + "HIVE_FORK_HOMESTEAD": 0, + # "HIVE_FORK_DAO_BLOCK": 2000, + "HIVE_FORK_TANGERINE": 5, + "HIVE_FORK_SPURIOUS": 2000, + "HIVE_FORK_BYZANTIUM": 2000, + "HIVE_FORK_CONSTANTINOPLE": 2000, + "HIVE_FORK_PETERSBURG": 2000, + "HIVE_FORK_ISTANBUL": 2000, + "HIVE_FORK_BERLIN": 2000, + "HIVE_FORK_LONDON": 2000, + }, + "HomesteadToDaoAt5": { + "HIVE_FORK_HOMESTEAD": 0, + "HIVE_FORK_DAO_BLOCK": 5, + "HIVE_FORK_TANGERINE": 2000, + "HIVE_FORK_SPURIOUS": 2000, + "HIVE_FORK_BYZANTIUM": 2000, + "HIVE_FORK_CONSTANTINOPLE": 2000, + "HIVE_FORK_PETERSBURG": 2000, + "HIVE_FORK_ISTANBUL": 2000, + "HIVE_FORK_BERLIN": 2000, + "HIVE_FORK_LONDON": 2000, + }, + "EIP158ToByzantiumAt5": { + "HIVE_FORK_HOMESTEAD": 0, + # "HIVE_FORK_DAO_BLOCK": 2000, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 0, + "HIVE_FORK_BYZANTIUM": 5, + "HIVE_FORK_CONSTANTINOPLE": 2000, + "HIVE_FORK_PETERSBURG": 2000, + "HIVE_FORK_ISTANBUL": 2000, + "HIVE_FORK_BERLIN": 2000, + "HIVE_FORK_LONDON": 2000, + }, + "ByzantiumToConstantinopleAt5": { + "HIVE_FORK_HOMESTEAD": 0, + # "HIVE_FORK_DAO_BLOCK": 2000, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 0, + "HIVE_FORK_BYZANTIUM": 0, + "HIVE_FORK_CONSTANTINOPLE": 5, + "HIVE_FORK_PETERSBURG": 2000, + "HIVE_FORK_ISTANBUL": 2000, + "HIVE_FORK_BERLIN": 2000, + "HIVE_FORK_LONDON": 2000, + }, + "ByzantiumToConstantinopleFixAt5": { + "HIVE_FORK_HOMESTEAD": 0, + # "HIVE_FORK_DAO_BLOCK": 2000, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 0, + "HIVE_FORK_BYZANTIUM": 0, + "HIVE_FORK_CONSTANTINOPLE": 5, + "HIVE_FORK_PETERSBURG": 5, + "HIVE_FORK_ISTANBUL": 2000, + "HIVE_FORK_BERLIN": 2000, + "HIVE_FORK_LONDON": 2000, + }, + "ConstantinopleFixToIstanbulAt5": { + "HIVE_FORK_HOMESTEAD": 0, + # "HIVE_FORK_DAO_BLOCK": 2000, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 0, + "HIVE_FORK_BYZANTIUM": 0, + "HIVE_FORK_CONSTANTINOPLE": 0, + "HIVE_FORK_PETERSBURG": 0, + "HIVE_FORK_ISTANBUL": 5, + "HIVE_FORK_BERLIN": 2000, + "HIVE_FORK_LONDON": 2000, + }, + "IstanbulToBerlinAt5": { + "HIVE_FORK_HOMESTEAD": 0, + # "HIVE_FORK_DAO_BLOCK": 2000, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 0, + "HIVE_FORK_BYZANTIUM": 0, + "HIVE_FORK_CONSTANTINOPLE": 0, + "HIVE_FORK_PETERSBURG": 0, + "HIVE_FORK_ISTANBUL": 0, + "HIVE_FORK_BERLIN": 5, + "HIVE_FORK_LONDON": 2000, + }, + "BerlinToLondonAt5": { + "HIVE_FORK_HOMESTEAD": 0, + # "HIVE_FORK_DAO_BLOCK": 2000, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 0, + "HIVE_FORK_BYZANTIUM": 0, + "HIVE_FORK_CONSTANTINOPLE": 0, + "HIVE_FORK_PETERSBURG": 0, + "HIVE_FORK_ISTANBUL": 0, + "HIVE_FORK_BERLIN": 0, + "HIVE_FORK_LONDON": 5, + }, + "London": { + "HIVE_FORK_HOMESTEAD": 0, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 0, + "HIVE_FORK_BYZANTIUM": 0, + "HIVE_FORK_CONSTANTINOPLE": 0, + "HIVE_FORK_PETERSBURG": 0, + "HIVE_FORK_ISTANBUL": 0, + "HIVE_FORK_BERLIN": 0, + "HIVE_FORK_LONDON": 0, + }, + "ArrowGlacierToMergeAtDiffC0000": { + "HIVE_FORK_HOMESTEAD": 0, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 0, + "HIVE_FORK_BYZANTIUM": 0, + "HIVE_FORK_CONSTANTINOPLE": 0, + "HIVE_FORK_PETERSBURG": 0, + "HIVE_FORK_ISTANBUL": 0, + "HIVE_FORK_BERLIN": 0, + "HIVE_FORK_LONDON": 0, + "HIVE_TERMINAL_TOTAL_DIFFICULTY": 786432, + }, + "Merge": { + "HIVE_FORK_HOMESTEAD": 0, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 0, + "HIVE_FORK_BYZANTIUM": 0, + "HIVE_FORK_CONSTANTINOPLE": 0, + "HIVE_FORK_PETERSBURG": 0, + "HIVE_FORK_ISTANBUL": 0, + "HIVE_FORK_BERLIN": 0, + "HIVE_FORK_LONDON": 0, + "HIVE_FORK_MERGE": 0, + "HIVE_TERMINAL_TOTAL_DIFFICULTY": 0, + }, + "Shanghai": { + "HIVE_FORK_HOMESTEAD": 0, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 0, + "HIVE_FORK_BYZANTIUM": 0, + "HIVE_FORK_CONSTANTINOPLE": 0, + "HIVE_FORK_PETERSBURG": 0, + "HIVE_FORK_ISTANBUL": 0, + "HIVE_FORK_BERLIN": 0, + "HIVE_FORK_LONDON": 0, + "HIVE_FORK_MERGE": 0, + "HIVE_TERMINAL_TOTAL_DIFFICULTY": 0, + "HIVE_SHANGHAI_TIMESTAMP": 0, + }, + "MergeToShanghaiAtTime15k": { + "HIVE_FORK_HOMESTEAD": 0, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 0, + "HIVE_FORK_BYZANTIUM": 0, + "HIVE_FORK_CONSTANTINOPLE": 0, + "HIVE_FORK_PETERSBURG": 0, + "HIVE_FORK_ISTANBUL": 0, + "HIVE_FORK_BERLIN": 0, + "HIVE_FORK_LONDON": 0, + "HIVE_FORK_MERGE": 0, + "HIVE_TERMINAL_TOTAL_DIFFICULTY": 0, + "HIVE_SHANGHAI_TIMESTAMP": 15000, + }, + "Cancun": { + "HIVE_FORK_HOMESTEAD": 0, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 0, + "HIVE_FORK_BYZANTIUM": 0, + "HIVE_FORK_CONSTANTINOPLE": 0, + "HIVE_FORK_PETERSBURG": 0, + "HIVE_FORK_ISTANBUL": 0, + "HIVE_FORK_BERLIN": 0, + "HIVE_FORK_LONDON": 0, + "HIVE_FORK_MERGE": 0, + "HIVE_TERMINAL_TOTAL_DIFFICULTY": 0, + "HIVE_SHANGHAI_TIMESTAMP": 0, + "HIVE_CANCUN_TIMESTAMP": 0, + }, + "ShanghaiToCancunAtTime15k": { + "HIVE_FORK_HOMESTEAD": 0, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 0, + "HIVE_FORK_BYZANTIUM": 0, + "HIVE_FORK_CONSTANTINOPLE": 0, + "HIVE_FORK_PETERSBURG": 0, + "HIVE_FORK_ISTANBUL": 0, + "HIVE_FORK_BERLIN": 0, + "HIVE_FORK_LONDON": 0, + "HIVE_FORK_MERGE": 0, + "HIVE_TERMINAL_TOTAL_DIFFICULTY": 0, + "HIVE_SHANGHAI_TIMESTAMP": 0, + "HIVE_CANCUN_TIMESTAMP": 15000, + }, +} diff --git a/src/pytest_plugins/consume/hive_rulest_engine.py b/src/pytest_plugins/consume/hive_rulest_engine.py new file mode 100644 index 0000000000..32c0cf8986 --- /dev/null +++ b/src/pytest_plugins/consume/hive_rulest_engine.py @@ -0,0 +1,80 @@ +""" +Fork rules for clients ran within hive, starting from the Merge fork as +we are executing blocks using the Engine API. +""" + +# TODO: 1) Can we programmatically generate this? +# TODO: 2) Can we generate a single ruleset for both rlp and engine_api simulators. +client_fork_ruleset = { + "Merge": { + "HIVE_FORK_HOMESTEAD": 0, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 0, + "HIVE_FORK_BYZANTIUM": 0, + "HIVE_FORK_CONSTANTINOPLE": 0, + "HIVE_FORK_PETERSBURG": 0, + "HIVE_FORK_ISTANBUL": 0, + "HIVE_FORK_BERLIN": 0, + "HIVE_FORK_LONDON": 0, + "HIVE_FORK_MERGE": 0, + "HIVE_TERMINAL_TOTAL_DIFFICULTY": 0, + }, + "Shanghai": { + "HIVE_FORK_HOMESTEAD": 0, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 0, + "HIVE_FORK_BYZANTIUM": 0, + "HIVE_FORK_CONSTANTINOPLE": 0, + "HIVE_FORK_PETERSBURG": 0, + "HIVE_FORK_ISTANBUL": 0, + "HIVE_FORK_BERLIN": 0, + "HIVE_FORK_LONDON": 0, + "HIVE_FORK_MERGE": 0, + "HIVE_TERMINAL_TOTAL_DIFFICULTY": 0, + "HIVE_SHANGHAI_TIMESTAMP": 0, + }, + "MergeToShanghaiAtTime15k": { + "HIVE_FORK_HOMESTEAD": 0, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 0, + "HIVE_FORK_BYZANTIUM": 0, + "HIVE_FORK_CONSTANTINOPLE": 0, + "HIVE_FORK_PETERSBURG": 0, + "HIVE_FORK_ISTANBUL": 0, + "HIVE_FORK_BERLIN": 0, + "HIVE_FORK_LONDON": 0, + "HIVE_FORK_MERGE": 0, + "HIVE_TERMINAL_TOTAL_DIFFICULTY": 0, + "HIVE_SHANGHAI_TIMESTAMP": 15000, + }, + "Cancun": { + "HIVE_FORK_HOMESTEAD": 0, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 0, + "HIVE_FORK_BYZANTIUM": 0, + "HIVE_FORK_CONSTANTINOPLE": 0, + "HIVE_FORK_PETERSBURG": 0, + "HIVE_FORK_ISTANBUL": 0, + "HIVE_FORK_BERLIN": 0, + "HIVE_FORK_LONDON": 0, + "HIVE_FORK_MERGE": 0, + "HIVE_TERMINAL_TOTAL_DIFFICULTY": 0, + "HIVE_SHANGHAI_TIMESTAMP": 0, + "HIVE_CANCUN_TIMESTAMP": 0, + }, + "ShanghaiToCancunAtTime15k": { + "HIVE_FORK_HOMESTEAD": 0, + "HIVE_FORK_TANGERINE": 0, + "HIVE_FORK_SPURIOUS": 0, + "HIVE_FORK_BYZANTIUM": 0, + "HIVE_FORK_CONSTANTINOPLE": 0, + "HIVE_FORK_PETERSBURG": 0, + "HIVE_FORK_ISTANBUL": 0, + "HIVE_FORK_BERLIN": 0, + "HIVE_FORK_LONDON": 0, + "HIVE_FORK_MERGE": 0, + "HIVE_TERMINAL_TOTAL_DIFFICULTY": 0, + "HIVE_SHANGHAI_TIMESTAMP": 0, + "HIVE_CANCUN_TIMESTAMP": 15000, + }, +} diff --git a/src/pytest_plugins/consume/ini_files/pytest-consume-all.ini b/src/pytest_plugins/consume/ini_files/pytest-consume-all.ini new file mode 100644 index 0000000000..f6f49ea78d --- /dev/null +++ b/src/pytest_plugins/consume/ini_files/pytest-consume-all.ini @@ -0,0 +1,15 @@ +[pytest] +console_output_style = count +minversion = 7.0 +python_files = test_* +testpaths = tests_consume/test_direct.py tests_consume/test_via_rlp.py tests_consume/test_via_engine_api.py +addopts = + -rxXs + --tb short + -p pytest_plugins.consume.consume + -p pytest_plugins.consume.direct + -p pytest_plugins.consume.rlp + -p pytest_plugins.consume.engine + -p pytest_plugins.consume.simulator_common + -p pytest_plugins.pytest_hive.pytest_hive + -p pytest_plugins.test_help.test_help diff --git a/src/pytest_plugins/consume/ini_files/pytest-consume-direct.ini b/src/pytest_plugins/consume/ini_files/pytest-consume-direct.ini new file mode 100644 index 0000000000..1e72110ef4 --- /dev/null +++ b/src/pytest_plugins/consume/ini_files/pytest-consume-direct.ini @@ -0,0 +1,11 @@ +[pytest] +console_output_style = count +minversion = 7.0 +python_files = test_direct.py +testpaths = tests_consume/test_direct.py +addopts = + -rxXs + --tb short + -p pytest_plugins.consume.consume + -p pytest_plugins.consume.direct + -p pytest_plugins.test_help.test_help diff --git a/src/pytest_plugins/consume/ini_files/pytest-consume-engine.ini b/src/pytest_plugins/consume/ini_files/pytest-consume-engine.ini new file mode 100644 index 0000000000..f6b9522f10 --- /dev/null +++ b/src/pytest_plugins/consume/ini_files/pytest-consume-engine.ini @@ -0,0 +1,13 @@ +[pytest] +console_output_style = count +minversion = 7.0 +python_files = test_via_engine_api.py +testpaths = tests_consume/test_via_engine_api.py +addopts = + -rxXs + --tb short + -p pytest_plugins.consume.consume + -p pytest_plugins.consume.engine + -p pytest_plugins.consume.simulator_common + -p pytest_plugins.pytest_hive.pytest_hive + -p pytest_plugins.test_help.test_help diff --git a/src/pytest_plugins/consume/ini_files/pytest-consume-rlp.ini b/src/pytest_plugins/consume/ini_files/pytest-consume-rlp.ini new file mode 100644 index 0000000000..5e1288403a --- /dev/null +++ b/src/pytest_plugins/consume/ini_files/pytest-consume-rlp.ini @@ -0,0 +1,13 @@ +[pytest] +console_output_style = count +minversion = 7.0 +python_files = test_via_rlp.py +testpaths = tests_consume/test_via_rlp.py +addopts = + -rxXs + --tb short + -p pytest_plugins.consume.consume + -p pytest_plugins.consume.rlp + -p pytest_plugins.consume.simulator_common + -p pytest_plugins.pytest_hive.pytest_hive + -p pytest_plugins.test_help.test_help diff --git a/src/pytest_plugins/consume/rlp.py b/src/pytest_plugins/consume/rlp.py new file mode 100644 index 0000000000..ec00f9b4e3 --- /dev/null +++ b/src/pytest_plugins/consume/rlp.py @@ -0,0 +1,24 @@ +""" +A hive simulator that executes test fixtures in the blockchain test format +against clients by providing them a genesis state and RLP-encoded blocks +that they consume upon start-up. + +Implemented using the pytest framework as a pytest plugin. +""" +import pytest + + +@pytest.fixture(scope="session") +def test_suite_name() -> str: + """ + The name of the hive test suite used in this simulator. + """ + return "EEST Consume Blocks via RLP" + + +@pytest.fixture(scope="session") +def test_suite_description() -> str: + """ + The description of the hive test suite used in this simulator. + """ + return "Execute blockchain tests by providing RLP-encoded blocks to a client upon start-up." diff --git a/src/pytest_plugins/consume/simulator_common.py b/src/pytest_plugins/consume/simulator_common.py new file mode 100644 index 0000000000..e8f4b7d2f7 --- /dev/null +++ b/src/pytest_plugins/consume/simulator_common.py @@ -0,0 +1,38 @@ +""" +A pytest plugin containing common functionality for executing blockchain test +fixtures in Hive simulators (RLP and Engine API). +""" + +from pathlib import Path + +import pytest + +from ethereum_test_tools.spec.blockchain.types import Fixture +from ethereum_test_tools.spec.consume.types import TestCaseIndexFile, TestCaseStream +from ethereum_test_tools.spec.file.types import BlockchainFixtures +from pytest_plugins.consume.consume import JsonSource + +TestCase = TestCaseIndexFile | TestCaseStream + + +@pytest.fixture(scope="function") +def fixture(fixture_source: JsonSource, test_case: TestCase) -> Fixture: + """ + Return the blockchain fixture's pydantic model for the current test case. + + The fixture is either already available within the test case (if consume + is taking input on stdin) or loaded from the fixture json file if taking + input from disk (fixture directory with index file). + """ + if fixture_source == "stdin": + assert isinstance(test_case, TestCaseStream), "Expected a stream test case" + assert isinstance(test_case.fixture, Fixture), "Expected a blockchain test fixture" + fixture = test_case.fixture + else: + assert isinstance(test_case, TestCaseIndexFile), "Expected an index file test case" + # TODO: Optimize, json files will be loaded multiple times. This pytest fixture + # is executed per test case, and a fixture json will contain multiple test cases. + # Use cache fixtures as for statetest in consume direct? + fixtures = BlockchainFixtures.from_file(Path(fixture_source) / test_case.json_path) + fixture = fixtures[test_case.id] + return fixture diff --git a/src/pytest_plugins/py.typed b/src/pytest_plugins/py.typed new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/pytest_plugins/pytest_hive/pytest_hive.py b/src/pytest_plugins/pytest_hive/pytest_hive.py new file mode 100644 index 0000000000..0fda7c0c00 --- /dev/null +++ b/src/pytest_plugins/pytest_hive/pytest_hive.py @@ -0,0 +1,134 @@ +""" +A pytest plugin providing common functionality for Hive simulators. + +Simulators using this plugin must define two pytest fixtures: + +1. `test_suite_name`: The name of the test suite. +2. `test_suite_description`: The description of the test suite. + +These fixtures are used when creating the hive test suite. +""" +import os + +import pytest +from hive.client import ClientRole +from hive.simulation import Simulation +from hive.testing import HiveTest, HiveTestResult, HiveTestSuite + + +@pytest.fixture(scope="session") +def simulator(request): # noqa: D103 + return request.config.hive_simulator + + +@pytest.fixture(scope="session") +def test_suite(request, simulator: Simulation): + """ + Defines a Hive test suite and cleans up after all tests have run. + """ + try: + test_suite_name = request.getfixturevalue("test_suite_name") + test_suite_description = request.getfixturevalue("test_suite_description") + except pytest.FixtureLookupError: + pytest.exit( + "Error: The 'test_suite_name' and 'test_suite_description' fixtures are not defined " + "by the hive simulator pytest plugin using this ('test_suite') fixture!" + ) + + suite = simulator.start_suite(name=test_suite_name, description=test_suite_description) + # TODO: Can we share this fixture across all nodes using xdist? Hive uses different suites. + yield suite + suite.end() + + +def pytest_configure(config): # noqa: D103 + hive_simulator_url = os.environ.get("HIVE_SIMULATOR") + if hive_simulator_url is None: + pytest.exit( + "The HIVE_SIMULATOR environment variable is not set.\n\n" + "If running locally, start hive in --dev mode, for example:\n" + "./hive --dev --client go-ethereum\n\n" + "and set the HIVE_SIMULATOR to the reported URL. For example, in bash:\n" + "export HIVE_SIMULATOR=http://127.0.0.1:3000\n" + "or in fish:\n" + "set -x HIVE_SIMULATOR http://127.0.0.1:3000" + ) + # TODO: Try and get these into fixtures; this is only here due to the "dynamic" parametrization + # of client_type with hive_execution_clients. + config.hive_simulator_url = hive_simulator_url + config.hive_simulator = Simulation(url=hive_simulator_url) + try: + config.hive_execution_clients = config.hive_simulator.client_types( + role=ClientRole.ExecutionClient + ) + except Exception as e: + message = ( + f"Error connecting to hive simulator at {hive_simulator_url}.\n\n" + "Did you forget to start hive in --dev mode?\n" + "./hive --dev --client go-ethereum\n\n" + ) + if config.option.verbose > 0: + message += f"Error details:\n{str(e)}" + else: + message += "Re-run with -v for more details." + pytest.exit(message) + + +@pytest.hookimpl(trylast=True) +def pytest_report_header(config, start_path): + """ + Add lines to pytest's console output header. + """ + if config.option.collectonly: + return + return [f"hive simulator: {config.hive_simulator_url}"] + + +@pytest.hookimpl(tryfirst=True, hookwrapper=True) +def pytest_runtest_makereport(item, call): + """ + Make the setup, call, and teardown results available in the teardown phase of + a test fixture (i.e., after yield has been called). + + This is used to get the test result and pass it to the hive test suite. + + Available as: + - result_setup - setup result + - result_call - test result + - result_teardown - teardown result + """ + outcome = yield + rep = outcome.get_result() + setattr(item, f"result_{rep.when}", rep) + + +@pytest.fixture +def hive_test(request, test_suite: HiveTestSuite): + """ + Propagate the pytest test case and its result to the hive server. + """ + test_parameter_string = request.node.nodeid.split("[")[-1].rstrip("]") # test fixture name + test: HiveTest = test_suite.start_test( + # TODO: pass test case documentation when available + name=test_parameter_string, + description="TODO: This should come from the '_info' field.", + ) + yield test + try: + # TODO: Handle xfail/skip, does this work with run=False? + if hasattr(request.node, "result_call") and request.node.result_call.passed: + test_passed = True + test_result_details = "Test passed." + elif hasattr(request.node, "result_call") and not request.node.result_call.passed: + test_passed = False + test_result_details = request.node.result_call.longreprtext + elif hasattr(request.node, "result_setup") and not request.node.result_setup.passed: + test_passed = False + test_result_details = "Test setup failed.\n" + request.node.result_call.longreprtext + else: + test_passed = False + test_result_details = "Test failed for unknown reason (setup or call status unknown)." + except Exception as e: + test_passed = False + test_result_details = f"Exception whilst processing test result: {str(e)}" + test.end(result=HiveTestResult(test_pass=test_passed, details=test_result_details)) diff --git a/src/pytest_plugins/test_filler/test_filler.py b/src/pytest_plugins/test_filler/test_filler.py index a9ee228938..e8522b4b1c 100644 --- a/src/pytest_plugins/test_filler/test_filler.py +++ b/src/pytest_plugins/test_filler/test_filler.py @@ -114,7 +114,10 @@ def pytest_addoption(parser): action="store", dest="output", default=default_output_directory(), - help="Directory to store the generated test fixtures. Can be deleted.", + help=( + "Directory to store the generated test fixtures. Can be deleted. " + f"Default: '{default_output_directory()}'." + ), ) test_group.addoption( "--flat-output", @@ -158,10 +161,18 @@ def pytest_addoption(parser): @pytest.hookimpl(tryfirst=True) def pytest_configure(config): """ - Register the plugin's custom markers and process command-line options. + Pytest hook called after command line options have been parsed and before + test collection begins. + + Couple of notes: + 1. Register the plugin's custom markers and process command-line options. - Custom marker registration: - https://docs.pytest.org/en/7.1.x/how-to/writing_plugins.html#registering-custom-markers + Custom marker registration: + https://docs.pytest.org/en/7.1.x/how-to/writing_plugins.html#registering-custom-markers + + 2. `@pytest.hookimpl(tryfirst=True)` is applied to ensure that this hook is + called before the pytest-html plugin's pytest_configure to ensure that + it uses the modified `htmlpath` option. """ for fixture_format in FixtureFormats: config.addinivalue_line( @@ -226,6 +237,20 @@ def pytest_report_header(config, start_path): return [(f"{t8n_version}, {solc_version}")] +def pytest_report_teststatus(report, config): + """ + Disable test session progress report if we're writing the JSON fixtures to + stdout to be read by a consume command on stdin. I.e., don't write this + type of output to the console: + + ```text + ...x... + ``` + """ + if config.getoption("output") == "stdout": + return report.outcome, "", report.outcome.upper() + + def pytest_metadata(metadata): """ Add or remove metadata to/from the pytest report. @@ -431,6 +456,8 @@ def get_fixture_collection_scope(fixture_name, config): See: https://docs.pytest.org/en/stable/how-to/fixtures.html#dynamic-scope """ + if config.getoption("output") == "stdout": + return "session" if config.getoption("single_fixture_per_file"): return "function" return "module" diff --git a/src/pytest_plugins/test_help/test_help.py b/src/pytest_plugins/test_help/test_help.py index 2a8755dcc0..fb71a5d5de 100644 --- a/src/pytest_plugins/test_help/test_help.py +++ b/src/pytest_plugins/test_help/test_help.py @@ -4,6 +4,7 @@ """ import argparse +from pathlib import Path import pytest @@ -18,7 +19,10 @@ def pytest_addoption(parser): action="store_true", dest="show_test_help", default=False, - help="Only show help options specific to execution-spec-tests and exit.", + help=( + "Only show help options specific to a specific execution-spec-tests command and " + "exit." + ), ) @@ -37,14 +41,29 @@ def show_test_help(config): that group is specific to execution-spec-tests command-line arguments. """ - test_group_substrings = [ - "execution-spec-tests", - "evm", - "solc", - "fork range", - "filler location", - "defining debug", # the "debug" group in test_filler plugin. - ] + pytest_ini = Path(config.inifile) + if pytest_ini.name == "pytest.ini": + test_group_substrings = [ + "execution-spec-tests", + "evm", + "solc", + "fork range", + "filler location", + "defining debug", + ] + elif pytest_ini.name in [ + "pytest-consume-all.ini", + "pytest-consume-direct.ini", + "pytest-consume-rlp.ini", + "pytest-consume-engine.ini", + ]: + test_group_substrings = [ + "execution-spec-tests", + "consuming", + "defining debug", + ] + else: + raise ValueError("Unexpected pytest.ini file option generating test help.") test_parser = argparse.ArgumentParser() for group in config._parser.optparser._action_groups: diff --git a/stubs/jwt/__init__.pyi b/stubs/jwt/__init__.pyi new file mode 100644 index 0000000000..dee1918afd --- /dev/null +++ b/stubs/jwt/__init__.pyi @@ -0,0 +1,3 @@ +from .encode import encode + +__all__ = ("encode",) diff --git a/stubs/jwt/encode.pyi b/stubs/jwt/encode.pyi new file mode 100644 index 0000000000..3bfe608a1a --- /dev/null +++ b/stubs/jwt/encode.pyi @@ -0,0 +1,3 @@ +from typing import Any, Dict + +def encode(payload: Dict[Any, Any], key: bytes, algorithm: str) -> str: ... diff --git a/tests/cancun/eip4788_beacon_root/conftest.py b/tests/cancun/eip4788_beacon_root/conftest.py index e6bc99f3d0..7b577eca6e 100644 --- a/tests/cancun/eip4788_beacon_root/conftest.py +++ b/tests/cancun/eip4788_beacon_root/conftest.py @@ -101,7 +101,7 @@ def contract_call_account(call_type: Op, call_value: int, call_gas: int) -> Acco if call_type == Op.CALL or call_type == Op.CALLCODE: contract_call_code += Op.SSTORE( 0x00, # store the result of the contract call in storage[0] - call_type( + call_type( # type: ignore # https://github.com/ethereum/execution-spec-tests/issues/348 # noqa: E501 call_gas, Spec.BEACON_ROOTS_ADDRESS, call_value, @@ -115,7 +115,7 @@ def contract_call_account(call_type: Op, call_value: int, call_gas: int) -> Acco # delegatecall and staticcall use one less argument contract_call_code += Op.SSTORE( 0x00, - call_type( + call_type( # type: ignore # https://github.com/ethereum/execution-spec-tests/issues/348 # noqa: E501 call_gas, Spec.BEACON_ROOTS_ADDRESS, args_start, diff --git a/tests/cancun/eip4844_blobs/test_point_evaluation_precompile.py b/tests/cancun/eip4844_blobs/test_point_evaluation_precompile.py index c2443f71d3..062bb3918f 100644 --- a/tests/cancun/eip4844_blobs/test_point_evaluation_precompile.py +++ b/tests/cancun/eip4844_blobs/test_point_evaluation_precompile.py @@ -111,7 +111,7 @@ def precompile_caller_account(call_type: Op, call_gas: int) -> Account: if call_type == Op.CALL or call_type == Op.CALLCODE: precompile_caller_code += Op.SSTORE( 0, - call_type( + call_type( # type: ignore # https://github.com/ethereum/execution-spec-tests/issues/348 # noqa: E501 call_gas, Spec.POINT_EVALUATION_PRECOMPILE_ADDRESS, 0x00, @@ -125,7 +125,7 @@ def precompile_caller_account(call_type: Op, call_gas: int) -> Account: # Delegatecall and staticcall use one less argument precompile_caller_code += Op.SSTORE( 0, - call_type( + call_type( # type: ignore # https://github.com/ethereum/execution-spec-tests/issues/348 # noqa: E501 call_gas, Spec.POINT_EVALUATION_PRECOMPILE_ADDRESS, 0x00, diff --git a/tests/cancun/eip4844_blobs/test_point_evaluation_precompile_gas.py b/tests/cancun/eip4844_blobs/test_point_evaluation_precompile_gas.py index 8d8ccf5475..fe87c81763 100644 --- a/tests/cancun/eip4844_blobs/test_point_evaluation_precompile_gas.py +++ b/tests/cancun/eip4844_blobs/test_point_evaluation_precompile_gas.py @@ -89,7 +89,7 @@ def precompile_caller_account( + copy_opcode_cost(len(precompile_input)) ) if call_type == Op.CALL or call_type == Op.CALLCODE: - precompile_caller_code += call_type( + precompile_caller_code += call_type( # type: ignore # https://github.com/ethereum/execution-spec-tests/issues/348 # noqa: E501 call_gas, Spec.POINT_EVALUATION_PRECOMPILE_ADDRESS, 0x00, @@ -101,7 +101,7 @@ def precompile_caller_account( overhead_cost += (PUSH_OPERATIONS_COST * 6) + (CALLDATASIZE_COST * 1) elif call_type == Op.DELEGATECALL or call_type == Op.STATICCALL: # Delegatecall and staticcall use one less argument - precompile_caller_code += call_type( + precompile_caller_code += call_type( # type: ignore # https://github.com/ethereum/execution-spec-tests/issues/348 # noqa: E501 call_gas, Spec.POINT_EVALUATION_PRECOMPILE_ADDRESS, 0x00, diff --git a/tests_consume/test_direct.py b/tests_consume/test_direct.py new file mode 100644 index 0000000000..dbc1f5f2ad --- /dev/null +++ b/tests_consume/test_direct.py @@ -0,0 +1,70 @@ +""" +Executes a JSON test fixture directly against a client using a dedicated +client interface similar to geth's EVM 'blocktest' command. +""" + +import re +from pathlib import Path +from typing import Any, List, Optional + +import pytest + +from ethereum_test_tools.spec.consume.types import TestCaseIndexFile, TestCaseStream +from evm_transition_tool import TransitionTool + +statetest_results: dict[Path, List[dict[str, Any]]] = {} + + +def test_blocktest( # noqa: D103 + test_case: TestCaseIndexFile | TestCaseStream, + evm: TransitionTool, + evm_run_single_test: bool, + fixture_path: Path, + test_dump_dir: Optional[Path], +): + fixture_name = None + if evm_run_single_test: + fixture_name = re.escape(test_case.id) + evm.verify_fixture( + test_case.format, + fixture_path, + fixture_name=fixture_name, + debug_output_path=test_dump_dir, + ) + + +@pytest.fixture(scope="function") +def run_statetest( + test_case: TestCaseIndexFile | TestCaseStream, + evm: TransitionTool, + fixture_path: Path, + test_dump_dir: Optional[Path], +): + """ + Run statetest on the json fixture file if the test result is not already cached. + """ + # TODO: Check if all required results have been tested and delete test result data if so. + # TODO: Can we group the tests appropriately so that this works more efficiently with xdist? + if fixture_path not in statetest_results: + json_result = evm.verify_fixture( + test_case.format, + fixture_path, + fixture_name=None, + debug_output_path=test_dump_dir, + ) + statetest_results[fixture_path] = json_result + + +@pytest.mark.usefixtures("run_statetest") +def test_statetest( # noqa: D103 + test_case: TestCaseIndexFile | TestCaseStream, + fixture_path: Path, +): + test_result = [ + test_result + for test_result in statetest_results[fixture_path] + if test_result["name"] == test_case.id + ] + assert len(test_result) < 2, f"Multiple test results for {test_case.id}" + assert len(test_result) == 1, f"Test result for {test_case.id} missing" + assert test_result[0]["pass"], f"State test failed: {test_result[0]['error']}" diff --git a/tests_consume/test_via_engine_api.py b/tests_consume/test_via_engine_api.py new file mode 100644 index 0000000000..c520000f91 --- /dev/null +++ b/tests_consume/test_via_engine_api.py @@ -0,0 +1,24 @@ +""" +A hive simulator that executes blocks against clients using the +`engine_newPayloadVX` method from the Engine API, verifying +the appropriate VALID/INVALID responses. + +Implemented using the pytest framework as a pytest plugin. +""" + +import pytest + +from ethereum_test_tools.spec.blockchain.types import HiveFixture + + +@pytest.mark.skip(reason="Not implemented yet.") +def test_via_engine_api(fixture: HiveFixture): + """ + 1. Checks that the genesis block hash of the client matches that of the fixture. + 2. Executes the test case fixture blocks against the client under test using the + `engine_newPayloadVX` method from the Engine API, verifying the appropriate + VALID/INVALID responses. + 3. Performs a forkchoice update to finalize the chain and verify the post state. + 4. Checks that the post state of the client matches that of the fixture. + """ + pass diff --git a/tests_consume/test_via_rlp.py b/tests_consume/test_via_rlp.py new file mode 100644 index 0000000000..27d576c7f2 --- /dev/null +++ b/tests_consume/test_via_rlp.py @@ -0,0 +1,288 @@ +""" +Test a fully instantiated client using RLP-encoded blocks from blockchain tests. + +The test fixtures should have the blockchain test format. The setup sends +the genesis file and RLP-encoded blocks to the client container using hive. +The client consumes these files upon start-up. + +Given a genesis state and a list of RLP-encoded blocks, the test verifies that: +1. The client's genesis block hash matches that defined in the fixture. +2. The client's last block hash matches that defined in the fixture. +""" +import io +import json +import pprint +import time +from typing import Any, Generator, List, Literal, Mapping, Optional, Union, cast + +import pytest +import requests +import rich +from hive.client import Client, ClientType +from hive.testing import HiveTest +from pydantic import BaseModel +from tenacity import retry, stop_after_attempt, wait_exponential + +from ethereum_test_tools.common.base_types import Bytes +from ethereum_test_tools.common.json import to_json +from ethereum_test_tools.spec.blockchain.types import Fixture, FixtureHeader +from pytest_plugins.consume.hive_ruleset import ruleset + + +class TestCaseTimingData(BaseModel): + """ + The times taken to perform the various steps of a test case (seconds). + """ + + __test__ = False + prepare_files: Optional[float] = None # start of test until client start + start_client: Optional[float] = None + get_genesis: Optional[float] = None + get_last_block: Optional[float] = None + stop_client: Optional[float] = None + total: Optional[float] = None + + @staticmethod + def format_float(num: float | None, precision: int = 4) -> str | None: + """ + Format a float to a specific precision in significant figures. + """ + if num is None: + return None + return f"{num:.{precision}f}" + + def formatted(self, precision: int = 4) -> "TestCaseTimingData": + """ + Return a new instance of the model with formatted float values. + """ + data = {field: self.format_float(value, precision) for field, value in self} + return TestCaseTimingData(**data) + + +@pytest.fixture(scope="function") +def t_test_start() -> float: + """ + The time the test started; used to time fixture+file preparation and total time. + """ + return time.perf_counter() + + +@pytest.fixture(scope="function", autouse=True) +def timing_data(request, t_test_start) -> Generator[TestCaseTimingData, None, None]: + """ + Helper to record timing data for various stages of executing test case. + """ + timing_data = TestCaseTimingData() + yield timing_data + timing_data.total = time.perf_counter() - t_test_start + rich.print(f"\nTimings (seconds): {timing_data.formatted()}") + if hasattr(request.node, "rep_call"): # make available for test reports + request.node.rep_call.timings = timing_data + + +@pytest.fixture(scope="function") +@pytest.mark.usefixtures("timing_data") +def client_genesis(fixture: Fixture) -> dict: + """ + Convert the fixture's genesis block header and pre-state to a client genesis state. + """ + genesis = to_json(fixture.genesis) # NOTE: to_json() excludes None values + alloc = to_json(fixture.pre) + # NOTE: nethermind requires account keys without '0x' prefix + genesis["alloc"] = {k.replace("0x", ""): v for k, v in alloc.items()} + return genesis + + +@pytest.fixture(scope="function") +def blocks_rlp(fixture: Fixture) -> List[Bytes]: + """ + A list of the fixture's blocks encoded as RLP. + """ + return [block.rlp for block in fixture.blocks] + + +@pytest.fixture +def buffered_genesis(client_genesis: dict) -> io.BufferedReader: + """ + Create a buffered reader for the genesis block header of the current test + fixture. + """ + genesis_json = json.dumps(client_genesis) + genesis_bytes = genesis_json.encode("utf-8") + return io.BufferedReader(cast(io.RawIOBase, io.BytesIO(genesis_bytes))) + + +@pytest.fixture +def buffered_blocks_rlp(blocks_rlp: List[bytes], start=1) -> List[io.BufferedReader]: + """ + Convert the RLP-encoded blocks of the current test fixture to buffered readers. + """ + block_rlp_files = [] + for i, block_rlp in enumerate(blocks_rlp): + block_rlp_stream = io.BytesIO(block_rlp) + block_rlp_files.append(io.BufferedReader(cast(io.RawIOBase, block_rlp_stream))) + return block_rlp_files + + +@pytest.fixture +def client_files( + buffered_genesis: io.BufferedReader, + buffered_blocks_rlp: list[io.BufferedReader], +) -> Mapping[str, io.BufferedReader]: + """ + Define the files that hive will start the client with. + + The files are specified as a dictionary whose: + - Keys are the target file paths in the client's docker container, and, + - Values are in-memory buffered file objects. + """ + files = {f"/blocks/{i:04d}.rlp": block_rlp for i, block_rlp in enumerate(buffered_blocks_rlp)} + files["/genesis.json"] = buffered_genesis + return files + + +@pytest.fixture +def environment(fixture: Fixture) -> dict: + """ + Define the environment that hive will start the client with using the fork + rules specific for the simulator. + """ + assert fixture.fork in ruleset, f"fork '{fixture.fork}' missing in hive ruleset" + return { + "HIVE_CHAIN_ID": "1", + "HIVE_FORK_DAO_VOTE": "1", + "HIVE_NODETYPE": "full", + **{k: f"{v:d}" for k, v in ruleset[fixture.fork].items()}, + } + + +@pytest.fixture(scope="function") +def client( + hive_test: HiveTest, + client_files: dict, + environment: dict, + client_type: ClientType, + t_test_start: float, + timing_data: TestCaseTimingData, +) -> Generator[Client, None, None]: + """ + Initialize the client with the appropriate files and environment variables. + """ + timing_data.prepare_files = time.perf_counter() - t_test_start + t_start = time.perf_counter() + client = hive_test.start_client( + client_type=client_type, environment=environment, files=client_files + ) + timing_data.start_client = time.perf_counter() - t_start + assert client is not None + yield client + t_start = time.perf_counter() + client.stop() + timing_data.stop_client = time.perf_counter() - t_start + + +BlockNumberType = Union[int, Literal["latest", "earliest", "pending"]] + + +@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, max=10)) +def get_block(client: Client, block_number: BlockNumberType) -> dict: + """ + Retrieve the i-th block from the client using the JSON-RPC API. + Retries up to two times (three attempts total) in case of an error or a timeout, + with exponential backoff. + """ + if isinstance(block_number, int): + block_number_string = hex(block_number) + else: + block_number_string = block_number + url = f"http://{client.ip}:8545" + payload = { + "jsonrpc": "2.0", + "method": "eth_getBlockByNumber", + "params": [block_number_string, False], + "id": 1, + } + headers = {"Content-Type": "application/json"} + + response = requests.post(url, json=payload, headers=headers) + response.raise_for_status() + result = response.json().get("result") + + if result is None or "error" in result: + error_info: Any = "result is None; and therefore contains no error info" + error_code = None + if result is not None: + error_info = result["error"] + error_code = error_info["code"] + raise Exception( + f"Error calling JSON RPC eth_getBlockByNumber, code: {error_code}, " + f"message: {error_info}" + ) + + return result + + +def compare_models(expected: FixtureHeader, got: FixtureHeader) -> dict: + """ + Compare two FixtureHeader model instances and return their differences. + """ + differences = {} + for (exp_name, exp_value), (got_name, got_value) in zip(expected, got): + if exp_value != got_value: + differences[exp_name] = { + "expected ": str(exp_value), + "got (via rpc)": str(got_value), + } + return differences + + +class GenesisBlockMismatchException(Exception): + """ + Used when the client's genesis block hash does not match the fixture. + """ + + def __init__(self, *, expected_header: FixtureHeader, got_header: FixtureHeader): + message = ( + "Genesis block hash mismatch.\n" + f"Expected: {expected_header.block_hash}\n" + f" Got: {got_header.block_hash}." + ) + differences = compare_models(expected_header, got_header) + if differences: + message += ( + "\n\nAdditionally, there are differences between the expected and received " + "genesis block header fields:\n" + f"{pprint.pformat(differences, indent=4)}" + ) + else: + message += ( + "There were no differences in the expected and received genesis block headers." + ) + super().__init__(message) + + +def test_via_rlp( + client: Client, + fixture: Fixture, + client_genesis: dict, + timing_data: TestCaseTimingData, +): + """ + Verify that the client's state as calculated from the specified genesis state + and blocks matches those defined in the test fixture. + + Test: + + 1. The client's genesis block hash matches `fixture.genesis.block_hash`. + 2. The client's last block's hash matches `fixture.last_block_hash`. + """ + t_start = time.perf_counter() + genesis_block = get_block(client, 0) + timing_data.get_genesis = time.perf_counter() - t_start + if genesis_block["hash"] != str(fixture.genesis.block_hash): + raise GenesisBlockMismatchException( + expected_header=fixture.genesis, got_header=FixtureHeader(**genesis_block) + ) + block = get_block(client, "latest") + timing_data.get_last_block = time.perf_counter() - timing_data.get_genesis - t_start + assert block["hash"] == str(fixture.last_block_hash), "hash mismatch in last block" diff --git a/tox.ini b/tox.ini index 60b8c6ca48..1d70aad607 100644 --- a/tox.ini +++ b/tox.ini @@ -18,7 +18,7 @@ extras = test lint -src = src setup.py +src = src setup.py tests_consume commands = fname8 {[testenv:framework]src} diff --git a/whitelist.txt b/whitelist.txt index c994a24314..e8738c29a7 100644 --- a/whitelist.txt +++ b/whitelist.txt @@ -152,11 +152,13 @@ HeaderNonce hexary HexNumber hexsha +hexbytes homebrew html htmlpath https hyperledger +iat ignoreRevsFile img incrementing @@ -193,13 +195,14 @@ marioevz markdownlint md metaclass -Misspelled words: +mixhash mkdocs mkdocstrings mypy namespace nav ncheck +nethermind nexternal nGo nJSON @@ -208,6 +211,7 @@ NOP NOPs nPython nSHA +num number ommer ommers @@ -217,7 +221,9 @@ origin parseable pathlib pdb +perf petersburg +pformat png Pomerantz ppa @@ -253,7 +259,9 @@ returndatacopy returndatasize returncontract rlp +rootdir rpc +ruleset runtime sandboxed secp256k1 @@ -355,19 +363,26 @@ copytree dedent dest exc +extractall fixturenames fspath funcargs +getfixturevalue getgroup getoption +Golang groupby hookimpl hookwrapper IEXEC IGNORECASE +inifile +isatty iterdir ljust +longreprtext makepyfile +makereport metafunc modifyitems nodeid @@ -381,12 +396,14 @@ params parametrize parametrizer parametrizers +parametrization popen prevrandao pytester pytestmark readline regexes +removesuffix reportinfo ret rglob @@ -398,7 +415,10 @@ subclasses subcommand substring substrings +tf +teardown testdir +teststatus tmpdir tryfirst trylast @@ -584,3 +604,6 @@ modexp fi url gz +tT +istanbul +berlin \ No newline at end of file