diff --git a/teuthology/misc.py b/teuthology/misc.py index a1b3cf6819..c3d324a4bc 100644 --- a/teuthology/misc.py +++ b/teuthology/misc.py @@ -18,11 +18,10 @@ from sys import stdin import pprint import datetime -from types import MappingProxyType from tarfile import ReadError -from typing import Optional +from typing import Optional, TypeVar from teuthology.util.compat import urljoin, urlopen, HTTPError @@ -110,7 +109,7 @@ def config_file(string): return config_dict -def merge_configs(config_paths): +def merge_configs(config_paths) -> dict: """ Takes one or many paths to yaml config files and merges them together, returning the result. """ @@ -123,7 +122,7 @@ def merge_configs(config_paths): continue else: with open(conf_path) as partial_file: - partial_dict = yaml.safe_load(partial_file) + partial_dict: dict = yaml.safe_load(partial_file) try: conf_dict = deep_merge(conf_dict, partial_dict) except Exception: @@ -986,7 +985,8 @@ def replace_all_with_clients(cluster, config): return norm_config -def deep_merge(a, b): +DeepMerge = TypeVar('DeepMerge') +def deep_merge(a: DeepMerge, b: DeepMerge) -> DeepMerge: """ Deep Merge. If a and b are both lists, all elements in b are added into a. If a and b are both dictionaries, elements in b are @@ -996,21 +996,18 @@ def deep_merge(a, b): """ if b is None: return a - elif isinstance(a, list): + if a is None: + return deep_merge(b.__class__(), b) + if isinstance(a, list): assert isinstance(b, list) a.extend(b) return a - elif isinstance(a, dict): - assert isinstance(b, dict) or isinstance(b, MappingProxyType) + if isinstance(a, dict): + assert isinstance(b, dict) for (k, v) in b.items(): a[k] = deep_merge(a.get(k), v) return a - elif isinstance(b, dict) or isinstance(b, list): - return deep_merge(b.__class__(), b) - elif isinstance(b, MappingProxyType): - return deep_merge(dict(), b) - else: - return b + return b def get_valgrind_args(testdir, name, preamble, v, exit_on_first_error=True): diff --git a/teuthology/provision/test/test_fog.py b/teuthology/provision/test/test_fog.py index 2124d73909..a61c172abe 100644 --- a/teuthology/provision/test/test_fog.py +++ b/teuthology/provision/test/test_fog.py @@ -8,12 +8,15 @@ from teuthology.provision import fog -test_config = dict(fog=dict( - endpoint='http://fog.example.com/fog', - api_token='API_TOKEN', - user_token='USER_TOKEN', - machine_types='type1,type2', -)) +test_config = dict( + fog=dict( + endpoint='http://fog.example.com/fog', + api_token='API_TOKEN', + user_token='USER_TOKEN', + machine_types='type1,type2', + ), + fog_reimage_timeout=1800, +) class TestFOG(object): diff --git a/teuthology/run.py b/teuthology/run.py index 8ac6b7b58c..643955d375 100644 --- a/teuthology/run.py +++ b/teuthology/run.py @@ -93,7 +93,7 @@ def fetch_tasks_if_needed(job_config): return suite_path -def setup_config(config_paths): +def setup_config(config_paths) -> dict: """ Takes a list of config yaml files and combines them into a single dictionary. Processes / validates the dictionary and then @@ -260,7 +260,7 @@ def get_initial_tasks(lock, config, machine_type): return init_tasks -def report_outcome(config, archive, summary, fake_ctx): +def report_outcome(config, archive, summary): """ Reports on the final outcome of the command. """ status = get_status(summary) passed = status == 'pass' @@ -326,8 +326,6 @@ def main(args): os_version = args["--os-version"] interactive_on_error = args["--interactive-on-error"] - set_up_logging(verbose, archive) - # print the command being ran log.debug("Teuthology command: {0}".format(get_teuthology_command(args))) @@ -338,6 +336,10 @@ def main(args): if archive is not None and 'archive_path' not in config: config['archive_path'] = archive + elif archive is None and 'archive_path' in config: + archive = args['--archive'] = config['archive_path'] + + set_up_logging(verbose, archive) write_initial_metadata(archive, config, name, description, owner) report.try_push_job_info(config, dict(status='running')) @@ -400,10 +402,10 @@ def main(args): # FIXME this should become more generic, and the keys should use # '_' uniformly if fake_ctx.config.get('interactive-on-error'): - teuthology.config.config.ctx = fake_ctx + teuth_config.config.ctx = fake_ctx try: run_tasks(tasks=config['tasks'], ctx=fake_ctx) finally: # print to stdout the results and possibly send an email on any errors - report_outcome(config, archive, fake_ctx.summary, fake_ctx) + report_outcome(config, archive, fake_ctx.summary) diff --git a/teuthology/suite/merge.py b/teuthology/suite/merge.py index 4ae9e05bf7..f13058e47d 100644 --- a/teuthology/suite/merge.py +++ b/teuthology/suite/merge.py @@ -14,7 +14,7 @@ TEUTHOLOGY_TEMPLATE = MappingProxyType({ "teuthology": { "fragments_dropped": [], - "meta": MappingProxyType({}), + "meta": {}, "postmerge": [], } }) @@ -114,7 +114,6 @@ def config_merge(configs, suite_name=None, **kwargs): postmerge scripts. Logically, if a filter matches then reject will drop the entire job (config) from the list. """ - seed = kwargs.setdefault('seed', 1) if not isinstance(seed, int): log.debug("no valid seed input: using 1") @@ -130,7 +129,7 @@ def config_merge(configs, suite_name=None, **kwargs): desc = combine_path(suite_name, desc) yaml_complete_obj = {} - deep_merge(yaml_complete_obj, TEUTHOLOGY_TEMPLATE) + deep_merge(yaml_complete_obj, dict(TEUTHOLOGY_TEMPLATE)) for path in paths: if path not in yaml_cache: with open(path) as f: diff --git a/teuthology/task/__init__.py b/teuthology/task/__init__.py index eb1d04c8b8..98330a7bf6 100644 --- a/teuthology/task/__init__.py +++ b/teuthology/task/__init__.py @@ -24,7 +24,7 @@ class MySubtask(MyTask): name = 'mytask.mysubtask' """ - def __init__(self, ctx=None, config=None): + def __init__(self, ctx, config=None): if not hasattr(self, 'name'): self.name = self.__class__.__name__.lower() self.log = log diff --git a/teuthology/task/ansible.py b/teuthology/task/ansible.py index d27137d12c..29d1170d1a 100644 --- a/teuthology/task/ansible.py +++ b/teuthology/task/ansible.py @@ -14,30 +14,12 @@ from teuthology.config import config as teuth_config from teuthology.exceptions import CommandFailedError, AnsibleFailedError from teuthology.job_status import set_status - from teuthology.task import Task +from teuthology.util.loggerfile import LoggerFile log = logging.getLogger(__name__) -class LoggerFile(object): - """ - A thin wrapper around a logging.Logger instance that provides a file-like - interface. - - Used by Ansible.execute_playbook() when it calls pexpect.run() - """ - def __init__(self, logger, level): - self.logger = logger - self.level = level - - def write(self, string): - self.logger.log(self.level, string.decode('utf-8', 'ignore')) - - def flush(self): - pass - - class FailureAnalyzer: def analyze(self, failure_log): failure_obj = yaml.safe_load(failure_log) diff --git a/teuthology/task/cephmetrics.py b/teuthology/task/cephmetrics.py index fee4ad6b67..813d266add 100644 --- a/teuthology/task/cephmetrics.py +++ b/teuthology/task/cephmetrics.py @@ -5,8 +5,9 @@ from teuthology.config import config as teuth_config from teuthology.exceptions import CommandFailedError +from teuthology.task.ansible import Ansible +from teuthology.util.loggerfile import LoggerFile -from teuthology.task.ansible import Ansible, LoggerFile log = logging.getLogger(__name__) diff --git a/teuthology/task/tests/__init__.py b/teuthology/task/tests/__init__.py index 43c6c11699..b558341cf6 100644 --- a/teuthology/task/tests/__init__.py +++ b/teuthology/task/tests/__init__.py @@ -1,40 +1,36 @@ """ -This task is used to integration test teuthology. Including this -task in your yaml config will execute pytest which finds any tests in -the current directory. Each test that is discovered will be passed the -teuthology ctx and config args that each teuthology task usually gets. -This allows the tests to operate against the cluster. +This task runs teuthology's unit tests and integration tests. +It can run in one of two modes: "py" or "cli". The latter executes py.test in a +separate process, whereas the former invokes it in the teuthology job's python +process. +If the running job has remotes available to it, it will attempt to run integration tests. +Note that this requires running in "py" mode - the default. An example:: tasks - tests: - """ import logging +import os +import pathlib +import pexpect import pytest from teuthology.job_status import set_status +from teuthology.task import Task +from teuthology.util.loggerfile import LoggerFile log = logging.getLogger(__name__) -@pytest.fixture -def ctx(): - return {} - - -@pytest.fixture -def config(): - return [] - - class TeuthologyContextPlugin(object): def __init__(self, ctx, config): self.ctx = ctx self.config = config self.failures = list() + self.stats = dict() # this is pytest hook for generating tests with custom parameters def pytest_generate_tests(self, metafunc): @@ -45,63 +41,130 @@ def pytest_generate_tests(self, metafunc): # log the outcome of each test @pytest.hookimpl(hookwrapper=True) - def pytest_runtest_makereport(self, item, call): + def pytest_runtest_makereport(self, item: pytest.Item, call: pytest.CallInfo): outcome = yield report = outcome.get_result() - - # after the test has been called, get its report and log it - if call.when == 'call': - # item.location[0] is a slash delimeted path to the test file - # being ran. We only want the portion after teuthology.task.tests - test_path = item.location[0].replace("/", ".").split(".") - test_path = ".".join(test_path[4:-1]) - # removes the string '[ctx0, config0]' after the test name - test_name = item.location[2].split("[")[0] - name = "{path}:{name}".format(path=test_path, name=test_name) - if report.passed: - log.info("{name} Passed".format(name=name)) - elif report.skipped: - log.info("{name} {info}".format( - name=name, - info=call.excinfo.exconly() - )) + test_path = item.location[0] + line_no = item.location[1] + test_name = item.location[2] + name = f"{test_path}:{line_no}:{test_name}" + log_msg = f"{report.outcome.upper()} {name}" + outcome_str = report.outcome.lower() + self.stats.setdefault(outcome_str, 0) + self.stats[outcome_str] += 1 + if outcome_str in ['passed', 'skipped']: + if call.when == 'call': + log.info(log_msg) else: - # TODO: figure out a way to log the traceback - log.error("{name} Failed:\n {info}".format( - name=name, - info=call.excinfo.exconly() - )) - failure = "{name}: {err}".format( - name=name, - err=call.excinfo.exconly().replace("\n", "") - ) - self.failures.append(failure) - self.ctx.summary['failure_reason'] = self.failures - - return report - - -def task(ctx, config): + log.info(f"----- {name} {call.when} -----") + else: + log_msg = f"{log_msg}:{call.when}" + if call.excinfo: + self.failures.append(name) + log_msg = f"{log_msg}: {call.excinfo.getrepr()}" + else: + self.failures.append(log_msg) + log.error(log_msg) + + return + + +# https://docs.pytest.org/en/stable/reference/exit-codes.html +exit_codes = { + 0: "All tests were collected and passed successfully", + 1: "Tests were collected and run but some of the tests failed", + 2: "Test execution was interrupted by the user", + 3: "Internal error happened while executing tests", + 4: "pytest command line usage error", + 5: "No tests were collected", +} + + +class Tests(Task): """ Use pytest to recurse through this directory, finding any tests and then executing them with the teuthology ctx and config args. Your tests must follow standard pytest conventions to be discovered. + + If config["mode"] == "py", (the default), it will be run in the job's process. + If config["mode"] == "cli" py.test will be invoked as a subprocess. """ - try: - status = pytest.main( - args=[ - '-q', - '--pyargs', __name__, 'teuthology.test' - ], - plugins=[TeuthologyContextPlugin(ctx, config)] - ) - except Exception: - log.exception("Saw non-test failure!") - set_status(ctx.summary, "dead") - else: - if status == 0: + base_args = ['-v', '--color=no'] + + def setup(self): + super().setup() + mode = self.config.get("mode", "py") + assert mode in ["py", "cli"], "mode must either be 'py' or 'cli'" + if mode == "cli": + # integration tests need ctx from this process, so we need to invoke + # pytest via python to be able to pass them + assert len(self.cluster.remotes) == 0, \ + "Tests requiring remote nodes conflicts with CLI mode" + self.mode = mode + self.stats = dict() + self.orig_curdir = os.curdir + + def begin(self): + super().begin() + try: + if self.mode == "py": + self.status, self.failures = self.run_py() + else: + self.status, self.failures = self.run_cli() + except Exception as e: + log.exception("Saw non-test failure!") + self.ctx.summary['failure_reason'] = str(e) + set_status(self.ctx.summary, "dead") + + def end(self): + if os.curdir != self.orig_curdir: + os.chdir(self.orig_curdir) + if self.stats: + log.info(f"Stats: {self.stats}") + if self.status == 0: log.info("OK. All tests passed!") - set_status(ctx.summary, "pass") + set_status(self.ctx.summary, "pass") else: - log.error("FAIL. Saw test failures...") - set_status(ctx.summary, "fail") + status_msg = str(self.status) + if self.status in exit_codes: + status_msg = f"{status_msg}: {exit_codes[self.status]}" + log.error(f"FAIL (exit code {status_msg})") + if self.failures: + msg = f"{len(self.failures)} Failures: {self.failures}" + self.ctx.summary['failure_reason'] = msg + log.error(msg) + set_status(self.ctx.summary, "fail") + super().end() + + def run_cli(self): + pytest_args = self.base_args + ['./teuthology/test', './scripts'] + if len(self.cluster.remotes): + pytest_args.append('./teuthology/task/tests') + self.log.info(f"pytest args: {pytest_args}") + cwd = str(pathlib.Path(__file__).parents[3]) + log.info(f"pytest cwd: {cwd}") + _, status = pexpect.run( + "py.test " + " ".join(pytest_args), + cwd=cwd, + withexitstatus=True, + timeout=None, + logfile=LoggerFile(self.log, logging.INFO), + ) + return status, [] + + def run_py(self): + pytest_args = self.base_args + ['--pyargs', 'teuthology', 'scripts'] + if len(self.cluster.remotes): + pytest_args.append(__name__) + self.log.info(f"pytest args: {pytest_args}") + context_plugin = TeuthologyContextPlugin(self.ctx, self.config) + # the cwd needs to change so that FakeArchive can find files in this repo + os.chdir(str(pathlib.Path(__file__).parents[3])) + status = pytest.main( + args=pytest_args, + plugins=[context_plugin], + ) + self.stats = context_plugin.stats + return status, context_plugin.failures + +task = Tests diff --git a/teuthology/test/task/test_ansible.py b/teuthology/test/task/test_ansible.py index 9f378b4804..1b7afc01d6 100644 --- a/teuthology/test/task/test_ansible.py +++ b/teuthology/test/task/test_ansible.py @@ -68,7 +68,7 @@ def setup_method(self): self.ctx.cluster.add(Remote('user@remote2'), ['role2']) self.ctx.config = dict() self.ctx.summary = dict() - self.ctx.archive = '../' + self.ctx.archive = "" self.task_config = dict(playbook=[]) self.start_patchers() diff --git a/teuthology/test/test_report.py b/teuthology/test/test_report.py index 342cebdd0e..a8535dfc8e 100644 --- a/teuthology/test/test_report.py +++ b/teuthology/test/test_report.py @@ -1,77 +1,86 @@ -import yaml import json +import pytest +import yaml + from teuthology.test import fake_archive from teuthology import report -class TestSerializer(object): - def setup_method(self): - self.archive = fake_archive.FakeArchive() - self.archive.setup() - self.archive_base = self.archive.archive_base - self.reporter = report.ResultsReporter(archive_base=self.archive_base) +@pytest.fixture +def archive(tmp_path): + archive = fake_archive.FakeArchive(archive_base=str(tmp_path)) + yield archive + archive.teardown() - def teardown_method(self): - self.archive.teardown() - def test_all_runs_one_run(self): - run_name = "test_all_runs" - yaml_path = "examples/3node_ceph.yaml" - job_count = 3 - self.archive.create_fake_run(run_name, job_count, yaml_path) - assert [run_name] == self.reporter.serializer.all_runs - - def test_all_runs_three_runs(self): - run_count = 3 - runs = {} - for i in range(run_count): - run_name = "run #%s" % i - yaml_path = "examples/3node_ceph.yaml" - job_count = 3 - job_ids = self.archive.create_fake_run( - run_name, - job_count, - yaml_path) - runs[run_name] = job_ids - assert sorted(runs.keys()) == sorted(self.reporter.serializer.all_runs) - - def test_jobs_for_run(self): - run_name = "test_jobs_for_run" +@pytest.fixture(autouse=True) +def reporter(archive): + archive.setup() + return report.ResultsReporter(archive_base=archive.archive_base) + + +def test_all_runs_one_run(archive, reporter): + run_name = "test_all_runs" + yaml_path = "examples/3node_ceph.yaml" + job_count = 3 + archive.create_fake_run(run_name, job_count, yaml_path) + assert [run_name] == reporter.serializer.all_runs + + +def test_all_runs_three_runs(archive, reporter): + run_count = 3 + runs = {} + for i in range(run_count): + run_name = "run #%s" % i yaml_path = "examples/3node_ceph.yaml" job_count = 3 - jobs = self.archive.create_fake_run(run_name, job_count, yaml_path) - job_ids = [str(job['job_id']) for job in jobs] + job_ids = archive.create_fake_run( + run_name, + job_count, + yaml_path) + runs[run_name] = job_ids + assert sorted(runs.keys()) == sorted(reporter.serializer.all_runs) - got_jobs = self.reporter.serializer.jobs_for_run(run_name) - assert sorted(job_ids) == sorted(got_jobs.keys()) - def test_running_jobs_for_run(self): - run_name = "test_jobs_for_run" - yaml_path = "examples/3node_ceph.yaml" - job_count = 10 - num_hung = 3 - self.archive.create_fake_run(run_name, job_count, yaml_path, - num_hung=num_hung) +def test_jobs_for_run(archive, reporter): + run_name = "test_jobs_for_run" + yaml_path = "examples/3node_ceph.yaml" + job_count = 3 + jobs = archive.create_fake_run(run_name, job_count, yaml_path) + job_ids = [str(job['job_id']) for job in jobs] - got_jobs = self.reporter.serializer.running_jobs_for_run(run_name) - assert len(got_jobs) == num_hung + got_jobs = reporter.serializer.jobs_for_run(run_name) + assert sorted(job_ids) == sorted(got_jobs.keys()) - def test_json_for_job(self): - run_name = "test_json_for_job" - yaml_path = "examples/3node_ceph.yaml" - job_count = 1 - jobs = self.archive.create_fake_run(run_name, job_count, yaml_path) - job = jobs[0] - - with open(yaml_path) as yaml_file: - obj_from_yaml = yaml.safe_load(yaml_file) - full_obj = obj_from_yaml.copy() - full_obj.update(job['info']) - full_obj.update(job['summary']) - - out_json = self.reporter.serializer.json_for_job( - run_name, str(job['job_id'])) - out_obj = json.loads(out_json) - assert full_obj == out_obj + +def test_running_jobs_for_run(archive, reporter): + run_name = "test_jobs_for_run" + yaml_path = "examples/3node_ceph.yaml" + job_count = 10 + num_hung = 3 + archive.create_fake_run(run_name, job_count, yaml_path, + num_hung=num_hung) + + got_jobs = reporter.serializer.running_jobs_for_run(run_name) + assert len(got_jobs) == num_hung + + +def test_json_for_job(archive, reporter): + run_name = "test_json_for_job" + yaml_path = "examples/3node_ceph.yaml" + job_count = 1 + jobs = archive.create_fake_run(run_name, job_count, yaml_path) + job = jobs[0] + + with open(yaml_path) as yaml_file: + obj_from_yaml = yaml.safe_load(yaml_file) + full_obj = obj_from_yaml.copy() + full_obj.update(job['info']) + full_obj.update(job['summary']) + + out_json = reporter.serializer.json_for_job( + run_name, str(job['job_id'])) + out_obj = json.loads(out_json) + assert full_obj == out_obj diff --git a/teuthology/test/test_run.py b/teuthology/test/test_run.py index c176c8f653..c2bcff4abe 100644 --- a/teuthology/test/test_run.py +++ b/teuthology/test/test_run.py @@ -1,7 +1,7 @@ import pytest import docopt -from unittest.mock import patch, call, Mock +from unittest.mock import patch, call from teuthology import run from scripts import run as scripts_run @@ -133,13 +133,12 @@ def test_fetch_tasks_if_needed(self, m_fetch_qa_suite): @patch("sys.exit") def test_report_outcome(self, m_sys_exit, m_open, m_email_results, m_try_push_job_info, m_safe_dump, m_get_status): m_get_status.return_value = "fail" - fake_ctx = Mock() summary = {"failure_reason": "reasons"} summary_dump = "failure_reason: reasons\n" config = {"email-on-error": True} config_dump = "email-on-error: true\n" m_safe_dump.side_effect = [None, summary_dump, config_dump] - run.report_outcome(config, "the/archive/path", summary, fake_ctx) + run.report_outcome(config, "the/archive/path", summary) m_try_push_job_info.assert_called_with(config, summary) m_open.assert_called_with("the/archive/path/summary.yaml", "w") assert m_email_results.called diff --git a/teuthology/util/loggerfile.py b/teuthology/util/loggerfile.py new file mode 100644 index 0000000000..3dd786258d --- /dev/null +++ b/teuthology/util/loggerfile.py @@ -0,0 +1,19 @@ +import logging + +class LoggerFile(object): + """ + A thin wrapper around a logging.Logger instance that provides a file-like + interface. + + Used by Ansible.execute_playbook() when it calls pexpect.run() + """ + def __init__(self, logger: logging.Logger, level: int): + self.logger = logger + self.level = level + + def write(self, string): + self.logger.log(self.level, string.decode('utf-8', 'ignore')) + + def flush(self): + pass +