diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 048801cb8..ab7d79918 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -8,13 +8,12 @@ jobs: fail-fast: false matrix: toxenv: - - py36,docs - - cluster_itests + - py38,docs steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: - python-version: 3.6 + python-version: 3.8 # GHA won't setup tox for us and we use tox-pip-extensions for venv-update - run: pip install tox==3.2 tox-pip-extensions==1.3.0 # there are no pre-built wheels for bsddb3, so we need to install @@ -32,12 +31,12 @@ jobs: strategy: fail-fast: false matrix: - dist: [bionic] + dist: [bionic, jammy] steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: - python-version: 3.6 + python-version: 3.8 # the container provided by GitHub doesn't include utilities # needed for dpkg building, so we need to install `devscripts` # to bring those in diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fe9c40164..3dab50605 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -17,7 +17,7 @@ repos: - id: pretty-format-json args: [--autofix, --indent, '4', --no-sort-keys] - repo: https://github.com/PyCQA/flake8 - rev: 3.9.2 + rev: 5.0.4 hooks: - id: flake8 exclude: ^docs/source/conf.py$ @@ -28,10 +28,10 @@ repos: - id: reorder-python-imports args: [--py3-plus] - repo: https://github.com/asottile/pyupgrade - rev: v2.0.2 + rev: v3.3.1 hooks: - id: pyupgrade - args: [--py36-plus] + args: [--py38-plus] - repo: local hooks: - id: patch-enforce-autospec @@ -42,8 +42,7 @@ repos: language: script files: ^tests/.*\.py$ - repo: http://github.com/psf/black - rev: 23.3.0 + rev: 22.3.0 hooks: - id: black - language_version: python3.8 - args: [--target-version, py36] + args: [--target-version, py38] diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 000000000..3339f3ec7 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,39 @@ +# Read the Docs configuration file for Sphinx projects +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# RTD defaults as of 2023-11-08 +build: + # TODO: Bump to jammy+3.8 once master branch updated + os: ubuntu-20.04 + tools: + python: "3.6" + # You can also specify other tool versions: + # nodejs: "20" + # rust: "1.70" + # golang: "1.20" + +# Also provide downloadable zip +formats: [htmlzip] + +# Build documentation in the "docs/" directory with Sphinx +sphinx: + configuration: docs/conf.py + # You can configure Sphinx to use a different builder, for instance use the dirhtml builder for simpler URLs + # builder: "dirhtml" + # Fail on all warnings to avoid broken references + # fail_on_warning: true + +# Optionally build your docs in additional formats such as PDF and ePub +# formats: +# - pdf +# - epub + +# Optional but recommended, declare the Python requirements required +# to build your documentation +# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html +python: + install: + - requirements: requirements-docs.txt diff --git a/Makefile b/Makefile index 8da22c70b..03481009c 100644 --- a/Makefile +++ b/Makefile @@ -11,23 +11,25 @@ endif NOOP = true ifeq ($(PAASTA_ENV),YELP) - # This index must match the Ubuntu codename in the dockerfile. - DOCKER_PIP_INDEX_URL ?= http://169.254.255.254:20641/bionic/simple/ + export PIP_INDEX_URL ?= http://169.254.255.254:20641/$*/simple/ export NPM_CONFIG_REGISTRY ?= https://npm.yelpcorp.com/ ADD_MISSING_DEPS_MAYBE:=-diff --unchanged-line-format= --old-line-format= --new-line-format='%L' ./requirements.txt ./yelp_package/extra_requirements_yelp.txt >> ./requirements.txt else - DOCKER_PIP_INDEX_URL ?= https://pypi.python.org/simple + export PIP_INDEX_URL ?= https://pypi.python.org/simple export NPM_CONFIG_REGISTRY ?= https://registry.npmjs.org ADD_MISSING_DEPS_MAYBE:=$(NOOP) endif -.PHONY : all clean tests docs dev cluster_itests +.PHONY : all clean tests docs dev -usage: @echo "make test - Run tests" @echo "make deb_bionic - Generate bionic deb package" @echo "make itest_bionic - Run tests and integration checks" @echo "make _itest_bionic - Run only integration checks" + @echo "make deb_jammy - Generate bionic deb package" + @echo "make itest_jammy - Run tests and integration checks" + @echo "make _itest_jammy - Run only integration checks" @echo "make release - Prepare debian info for new release" @echo "make clean - Get rid of scratch and byte files" @echo "make dev - Get a local copy of trond running in debug mode in the foreground" @@ -35,18 +37,17 @@ endif docker_%: @echo "Building docker image for $*" [ -d dist ] || mkdir -p dist - cd ./yelp_package/$* && docker build --build-arg PIP_INDEX_URL=${DOCKER_PIP_INDEX_URL} --build-arg NPM_CONFIG_REGISTRY=${NPM_CONFIG_REGISTRY} -t tron-builder-$* . + cd ./yelp_package/$* && docker build --build-arg PIP_INDEX_URL=${PIP_INDEX_URL} --build-arg NPM_CONFIG_REGISTRY=${NPM_CONFIG_REGISTRY} -t tron-builder-$* . deb_%: clean docker_% coffee_% @echo "Building deb for $*" # backup these files so we can temp modify them cp requirements.txt requirements.txt.old $(ADD_MISSING_DEPS_MAYBE) - $(DOCKER_RUN) -e PIP_INDEX_URL=${DOCKER_PIP_INDEX_URL} tron-builder-$* /bin/bash -c ' \ + $(DOCKER_RUN) -e PIP_INDEX_URL=${PIP_INDEX_URL} tron-builder-$* /bin/bash -c ' \ dpkg-buildpackage -d && \ mv ../*.deb dist/ && \ - rm -rf debian/tron && \ - chown -R $(UID):$(GID) dist debian \ + rm -rf debian/tron \ ' # restore the backed up files mv requirements.txt.old requirements.txt @@ -56,15 +57,14 @@ coffee_%: docker_% $(DOCKER_RUN) tron-builder-$* /bin/bash -c ' \ rm -rf tronweb/js/cs && \ mkdir -p tronweb/js/cs && \ - coffee -o tronweb/js/cs/ -c tronweb/coffee/ && \ - chown -R $(UID):$(GID) tronweb/js/cs/ \ + coffee -o tronweb/js/cs/ -c tronweb/coffee/ \ ' test: - tox -e py36 + tox -e py38 test_in_docker_%: docker_% - $(DOCKER_RUN) tron-builder-$* python3.6 -m tox -vv -e py36 + $(DOCKER_RUN) tron-builder-$* python3.8 -m tox -vv -e py38 tox_%: tox -e $* @@ -78,17 +78,14 @@ debitest_%: deb_% _itest_% itest_%: debitest_% @echo "itest $* OK" -cluster_itests: - tox -e cluster_itests - dev: - SSH_AUTH_SOCK=$(SSH_AUTH_SOCK) .tox/py36/bin/trond --debug --working-dir=dev -l logging.conf --host=0.0.0.0 + SSH_AUTH_SOCK=$(SSH_AUTH_SOCK) .tox/py38/bin/trond --debug --working-dir=dev -l logging.conf --host=0.0.0.0 example_cluster: tox -e example-cluster yelpy: - .tox/py36/bin/pip install -r yelp_package/extra_requirements_yelp.txt + .tox/py38/bin/pip install -r yelp_package/extra_requirements_yelp.txt LAST_COMMIT_MSG = $(shell git log -1 --pretty=%B | sed -e 's/[\x27\x22]/\\\x27/g') release: diff --git a/bin/tronctl b/bin/tronctl index 81e7f783f..c94abd034 100755 --- a/bin/tronctl +++ b/bin/tronctl @@ -47,7 +47,11 @@ COMMAND_HELP = ( "job name, job run id, or action id", "Start the selected job, job run, or action. Creates a new job run if starting a job.", ), - ("rerun", "job run id", "Start a new job run with the same start time command context as the given job run.",), + ( + "rerun", + "job run id", + "Start a new job run with the same start time command context as the given job run.", + ), ( "retry", "action id", @@ -55,16 +59,28 @@ COMMAND_HELP = ( ), ("recover", "action id", "Ask Tron to start tracking an UNKNOWN action run again"), ("cancel", "job run id", "Cancel the selected job run."), - ("backfill", "job name", "Start job runs for a particular date range",), + ( + "backfill", + "job name", + "Start job runs for a particular date range", + ), ("disable", "job name", "Disable selected job and cancel any outstanding runs"), ("enable", "job name", "Enable the selected job and schedule the next run"), - ("fail", "job run or action id", "Mark an UNKNOWN job or action as failed. Does not publish action triggers.",), + ( + "fail", + "job run or action id", + "Mark an UNKNOWN job or action as failed. Does not publish action triggers.", + ), ( "success", "job run or action id", "Mark an UNKNOWN job or action as having succeeded. Will publish action triggers.", ), - ("skip", "action id", "Skip a failed action, unblocks dependent actions. Does *not* publish action triggers.",), + ( + "skip", + "action id", + "Skip a failed action, unblocks dependent actions. Does *not* publish action triggers.", + ), ( "skip-and-publish", "action id", @@ -87,8 +103,7 @@ def parse_date(date_string): def parse_cli(): parser = cmd_utils.build_option_parser() - subparsers = parser.add_subparsers(dest="command", title="commands", help="Tronctl command to run") - subparsers.required = True # add_subparsers only supports required arg from py37 + subparsers = parser.add_subparsers(dest="command", title="commands", help="Tronctl command to run", required=True) cmd_parsers = {} for cmd_name, id_help_text, desc in COMMAND_HELP: @@ -100,14 +115,20 @@ def parse_cli(): # start cmd_parsers["start"].add_argument( - "--run-date", type=parse_date, dest="run_date", help="What the run-date should be set to", + "--run-date", + type=parse_date, + dest="run_date", + help="What the run-date should be set to", ) # backfill backfill_parser = cmd_parsers["backfill"] mutex_dates_group = backfill_parser.add_mutually_exclusive_group(required=True) mutex_dates_group.add_argument( - "--start-date", type=parse_date, dest="start_date", help="First run-date to backfill", + "--start-date", + type=parse_date, + dest="start_date", + help="First run-date to backfill", ) backfill_parser.add_argument( "--end-date", @@ -215,14 +236,16 @@ def request(url: str, data: Dict[str, Any], headers=None, method=None) -> bool: def event_publish(args): for event in args.id: yield request( - urljoin(args.server, "/api/events"), dict(command="publish", event=event), + urljoin(args.server, "/api/events"), + dict(command="publish", event=event), ) def event_discard(args): for event in args.id: yield request( - urljoin(args.server, "/api/events"), dict(command="discard", event=event), + urljoin(args.server, "/api/events"), + dict(command="discard", event=event), ) @@ -236,7 +259,10 @@ def _get_triggers_for_action(server: str, action_identifier: str) -> Optional[Tu return None trigger_response = client.request( - uri=urljoin(server, f"/api/jobs/{namespace}.{job_name}/{run_number}/{action_name}",), + uri=urljoin( + server, + f"/api/jobs/{namespace}.{job_name}/{run_number}/{action_name}", + ), ) if trigger_response.error: print(f"Unable to fetch downstream triggers for {action_identifier}: {trigger_response.error}") @@ -256,17 +282,28 @@ def control_objects(args: argparse.Namespace): url_index = tron_client.index() for identifier in args.id: try: - tron_id = client.get_object_type_from_identifier(url_index, identifier,) + tron_id = client.get_object_type_from_identifier( + url_index, + identifier, + ) except ValueError as e: - possibilities = list(tron_jobs_completer(prefix="", client=tron_client),) - suggestions = suggest_possibilities(word=identifier, possibilities=possibilities,) + possibilities = list( + tron_jobs_completer(prefix="", client=tron_client), + ) + suggestions = suggest_possibilities( + word=identifier, + possibilities=possibilities, + ) raise SystemExit(f"Error: {e}{suggestions}") if args.command == "skip-and-publish": # this command is more of a pseudo-command - skip and publish are handled in two different resources # and changing the API would be painful, so instead we call skip + publish separately from the client # (i.e., this file) to implement this functionality - if request(url=urljoin(args.server, tron_id.url), data={"command": "skip"},): + if request( + url=urljoin(args.server, tron_id.url), + data={"command": "skip"}, + ): # a single action can have 0..N triggers to publish and these can be arbitrarily named, so we need to # query the API and figure out what triggers exist triggers = _get_triggers_for_action(server=args.server, action_identifier=identifier) @@ -283,7 +320,8 @@ def control_objects(args: argparse.Namespace): # around the full set of args everywhere to do so for trigger in triggers: yield request( - url=urljoin(args.server, "/api/events"), data={"command": "publish", "event": trigger}, + url=urljoin(args.server, "/api/events"), + data={"command": "publish", "event": trigger}, ) else: print(f"Unable to skip {identifier}.") @@ -414,7 +452,8 @@ def main(): sys.exit(ExitCode.fail) except RequestError as err: print( - f"Error connecting to the tron server ({args.server}): {err}", file=sys.stderr, + f"Error connecting to the tron server ({args.server}): {err}", + file=sys.stderr, ) sys.exit(ExitCode.fail) diff --git a/bin/trond b/bin/trond index 7f656dce5..2a84deae3 100755 --- a/bin/trond +++ b/bin/trond @@ -25,7 +25,9 @@ def parse_cli(): parser = argparse.ArgumentParser() parser.add_argument( - "--version", action="version", version=f"{parser.prog} {tron.__version__}", + "--version", + action="version", + version=f"{parser.prog} {tron.__version__}", ) parser.add_argument( @@ -36,15 +38,22 @@ def parse_cli(): ) parser.add_argument( - "-c", "--config-path", default=DEFAULT_CONF_PATH, help="File path to the Tron configuration file", + "-c", + "--config-path", + default=DEFAULT_CONF_PATH, + help="File path to the Tron configuration file", ) parser.add_argument( - "--nodaemon", action="store_true", default=False, help="[DEPRECATED] Disable daemonizing, default %(default)s", + "--nodaemon", + action="store_true", + default=False, + help="[DEPRECATED] Disable daemonizing, default %(default)s", ) parser.add_argument( # for backwards compatibility - "--pid-file", help="[DEPRECATED] File path to pid file. Use --lock-file instead.", + "--pid-file", + help="[DEPRECATED] File path to pid file. Use --lock-file instead.", ) parser.add_argument( @@ -55,15 +64,23 @@ def parse_cli(): logging_group = parser.add_argument_group("logging", "") logging_group.add_argument( - "--log-conf", "-l", help="File path to a custom logging.conf", + "--log-conf", + "-l", + help="File path to a custom logging.conf", ) logging_group.add_argument( - "-v", "--verbose", action="count", default=0, help="Verbose logging. Repeat for more verbosity.", + "-v", + "--verbose", + action="count", + default=0, + help="Verbose logging. Repeat for more verbosity.", ) logging_group.add_argument( - "--debug", action="store_true", help="Debug mode, extra error reporting, no daemonizing", + "--debug", + action="store_true", + help="Debug mode, extra error reporting, no daemonizing", ) api_group = parser.add_argument_group("Web Service API", "") @@ -87,7 +104,10 @@ def parse_cli(): requirement = pkg_resources.Requirement.parse("tron") api_group.add_argument( "--web-path", - default=pkg_resources.resource_filename(requirement, "tronweb",), + default=pkg_resources.resource_filename( + requirement, + "tronweb", + ), help="Path to static web resources, default %(default)s.", ) @@ -108,7 +128,10 @@ def parse_cli(): args.lock_file = DEFAULT_LOCKFILE args.lock_file = os.path.join(args.working_dir, args.lock_file) - args.config_path = os.path.join(args.working_dir, args.config_path,) + args.config_path = os.path.join( + args.working_dir, + args.config_path, + ) return args diff --git a/bin/tronfig b/bin/tronfig index 9938b8527..057b0ed3b 100755 --- a/bin/tronfig +++ b/bin/tronfig @@ -20,7 +20,11 @@ def parse_cli(): parser = cmd_utils.build_option_parser() parser.add_argument( - "-p", "--print", action="store_true", dest="print_config", help="Print config to stdout, rather than uploading", + "-p", + "--print", + action="store_true", + dest="print_config", + help="Print config to stdout, rather than uploading", ) parser.add_argument( "-C", @@ -32,7 +36,10 @@ def parse_cli(): "will accept your configuration.", ) parser.add_argument( - "-d", "--delete", action="store_true", help="Delete the configuration for this namespace", + "-d", + "--delete", + action="store_true", + help="Delete the configuration for this namespace", ) parser.add_argument( "-V", @@ -52,10 +59,17 @@ def parse_cli(): help="Full validation of a folder, don't upload, " "same as -V but checks for more edge-cases", ) parser.add_argument( - "-n", "--namespace", action="store", help="Alternate namespace to use", + "-n", + "--namespace", + action="store", + help="Alternate namespace to use", ) parser.add_argument( - "-m", "--master-config", action="store", dest="master_config", help="Source of master configuration file", + "-m", + "--master-config", + action="store", + dest="master_config", + help="Source of master configuration file", ) parser.add_argument("source") @@ -63,7 +77,12 @@ def parse_cli(): def upload_config(client, config_name, contents, config_hash, check=False): - response = client.config(config_name, config_data=contents, config_hash=config_hash, check=check,) + response = client.config( + config_name, + config_data=contents, + config_hash=config_hash, + check=check, + ) if "error" in response: log.error(response["error"]) @@ -76,9 +95,17 @@ def upload_config(client, config_name, contents, config_hash, check=False): def validate(config_name, config_content, master_content=None): try: config_data = manager.from_string(config_content) - master_data = manager.from_string(master_content,) if master_content else None + master_data = ( + manager.from_string( + master_content, + ) + if master_content + else None + ) config_parse.validate_fragment( - name=config_name, fragment=config_data, master_config=master_data, + name=config_name, + fragment=config_data, + master_config=master_data, ) except ConfigError as e: return str(e) @@ -86,10 +113,14 @@ def validate(config_name, config_content, master_content=None): def delete_config(client, config_name): if config_name == schema.MASTER_NAMESPACE: - log.error("Deleting MASTER namespace is not allowed. Name must be specified.",) + log.error( + "Deleting MASTER namespace is not allowed. Name must be specified.", + ) return - response = input(f"This will delete the configuration for the {config_name} namespace. Proceed? (y/n): ",) + response = input( + f"This will delete the configuration for the {config_name} namespace. Proceed? (y/n): ", + ) if response[:1].lower() != "y": return @@ -146,8 +177,15 @@ if __name__ == "__main__": name, content = get_config_input(args.namespace, args.source) master_content = None if args.master_config: - _, master_content = get_config_input(schema.MASTER_NAMESPACE, args.master_config,) - result = validate(config_name=name, config_content=content, master_content=master_content,) + _, master_content = get_config_input( + schema.MASTER_NAMESPACE, + args.master_config, + ) + result = validate( + config_name=name, + config_content=content, + master_content=master_content, + ) elif args.validate_dir: result = validate_dir(args.source) @@ -176,7 +214,13 @@ if __name__ == "__main__": print(result) sys.exit(1) - if upload_config(client, namespace, content, config_hash, check=args.check,): + if upload_config( + client, + namespace, + content, + config_hash, + check=args.check, + ): sys.exit(0) print("Uploading failed") diff --git a/bin/tronrepl b/bin/tronrepl index ef626ec42..ebe73abfb 100755 --- a/bin/tronrepl +++ b/bin/tronrepl @@ -26,7 +26,9 @@ def parse_cli(): parser = argparse.ArgumentParser() parser.add_argument( - "--version", action="version", version=f"{parser.prog} {tron.__version__}", + "--version", + action="version", + version=f"{parser.prog} {tron.__version__}", ) parser.add_argument( @@ -37,15 +39,22 @@ def parse_cli(): ) parser.add_argument( - "-c", "--config-path", default=DEFAULT_CONF_PATH, help="File path to the Tron configuration file", + "-c", + "--config-path", + default=DEFAULT_CONF_PATH, + help="File path to the Tron configuration file", ) parser.add_argument( - "--nodaemon", action="store_true", default=False, help="[DEPRECATED] Disable daemonizing, default %(default)s", + "--nodaemon", + action="store_true", + default=False, + help="[DEPRECATED] Disable daemonizing, default %(default)s", ) parser.add_argument( # for backwards compatibility - "--pid-file", help="[DEPRECATED] File path to pid file. Use --lock-file instead.", + "--pid-file", + help="[DEPRECATED] File path to pid file. Use --lock-file instead.", ) parser.add_argument( @@ -56,15 +65,23 @@ def parse_cli(): logging_group = parser.add_argument_group("logging", "") logging_group.add_argument( - "--log-conf", "-l", help="File path to a custom logging.conf", + "--log-conf", + "-l", + help="File path to a custom logging.conf", ) logging_group.add_argument( - "-v", "--verbose", action="count", default=0, help="Verbose logging. Repeat for more verbosity.", + "-v", + "--verbose", + action="count", + default=0, + help="Verbose logging. Repeat for more verbosity.", ) logging_group.add_argument( - "--debug", action="store_true", help="Debug mode, extra error reporting, no daemonizing", + "--debug", + action="store_true", + help="Debug mode, extra error reporting, no daemonizing", ) api_group = parser.add_argument_group("Web Service API", "") @@ -88,7 +105,10 @@ def parse_cli(): requirement = pkg_resources.Requirement.parse("tron") api_group.add_argument( "--web-path", - default=pkg_resources.resource_filename(requirement, "tronweb",), + default=pkg_resources.resource_filename( + requirement, + "tronweb", + ), help="Path to static web resources, default %(default)s.", ) @@ -109,7 +129,10 @@ def parse_cli(): args.lock_file = DEFAULT_LOCKFILE args.lock_file = os.path.join(args.working_dir, args.lock_file) - args.config_path = os.path.join(args.working_dir, args.config_path,) + args.config_path = os.path.join( + args.working_dir, + args.config_path, + ) return args @@ -149,7 +172,10 @@ def main(): setup_environment(args) trond = trondaemon.TronDaemon(args) # noqa: F841 - trond.mcp = tron.mcp.MasterControlProgram(trond.options.working_dir, trond.options.config_path,) + trond.mcp = tron.mcp.MasterControlProgram( + trond.options.working_dir, + trond.options.config_path, + ) trond.mcp._load_config() # trond.mcp.restore_state(trond.mcp.config.load().get_master().action_runner) diff --git a/bin/tronview b/bin/tronview index 934d06662..4800eefdc 100755 --- a/bin/tronview +++ b/bin/tronview @@ -18,25 +18,56 @@ from tron.commands.cmd_utils import tron_jobs_completer def parse_cli(): parser = cmd_utils.build_option_parser() parser.add_argument( - "--numshown", "-n", type=int, dest="num_displays", help="Max number of jobs/job-runs shown", default=10, + "--numshown", + "-n", + type=int, + dest="num_displays", + help="Max number of jobs/job-runs shown", + default=10, ) parser.add_argument( - "--color", "-c", action="store_true", dest="display_color", help="Display in color", default=None, + "--color", + "-c", + action="store_true", + dest="display_color", + help="Display in color", + default=None, ) parser.add_argument( - "--nocolor", action="store_false", dest="display_color", help="Display without color", default=None, + "--nocolor", + action="store_false", + dest="display_color", + help="Display without color", + default=None, ) parser.add_argument( - "--stdout", "-o", action="count", dest="stdout", help="Solely displays stdout", default=0, + "--stdout", + "-o", + action="count", + dest="stdout", + help="Solely displays stdout", + default=0, ) parser.add_argument( - "--stderr", "-e", action="count", dest="stderr", help="Solely displays stderr", default=0, + "--stderr", + "-e", + action="count", + dest="stderr", + help="Solely displays stderr", + default=0, ) parser.add_argument( - "--events", "-E", action="store_true", dest="events", help="Display stored events", default=0, + "--events", + "-E", + action="store_true", + dest="events", + help="Display stored events", + default=0, ) parser.add_argument( - "name", nargs="?", help="job name | job run id | action id", + "name", + nargs="?", + help="job name | job run id | action id", ).completer = cmd_utils.tron_jobs_completer argcomplete.autocomplete(parser) @@ -54,7 +85,10 @@ def view_all(args, client): """Retrieve jobs and display them.""" return display.DisplayJobs().format( client.jobs( - include_job_runs=False, include_action_runs=False, include_action_graph=False, include_node_pool=False, + include_job_runs=False, + include_action_runs=False, + include_action_graph=False, + include_node_pool=False, ), ) @@ -72,7 +106,10 @@ def view_job_run(args, job_run_id, client): def view_action_run(args, act_run_id, client): - content = client.action_runs(act_run_id.url, num_lines=args.num_displays,) + content = client.action_runs( + act_run_id.url, + num_lines=args.num_displays, + ) return display.format_action_run_details(content) @@ -89,7 +126,10 @@ def get_view_output(name, args, client): tron_id = get_object_type_from_identifier(url_index, name) except ValueError as e: possibilities = list(tron_jobs_completer(prefix="", client=client)) - suggestions = suggest_possibilities(word=name, possibilities=possibilities,) + suggestions = suggest_possibilities( + word=name, + possibilities=possibilities, + ) raise SystemExit(f"Error: {e}{suggestions}") if tron_id.type not in obj_type_to_view_map: @@ -135,7 +175,8 @@ def main(): except RequestError as err: print( - f"Error connecting to the tron server ({args.server}): {err}", file=sys.stderr, + f"Error connecting to the tron server ({args.server}): {err}", + file=sys.stderr, ) sys.exit(ExitCode.fail) diff --git a/cluster_itests/config/MASTER.yaml b/cluster_itests/config/MASTER.yaml deleted file mode 100644 index 14306f717..000000000 --- a/cluster_itests/config/MASTER.yaml +++ /dev/null @@ -1,44 +0,0 @@ -state_persistence: - name: "/tmp/tron_state" - store_type: "shelve" - buffer_size: 10 - -mesos_options: - master_address: mesosmaster - enabled: True - dockercfg_location: 'file:///root/.dockercfg' - secret_file: '/work/cluster_itests/tron_framework_secret' - principal: 'tron' - -k8s_options: - enabled: true - kubeconfig_path: '/DOES/NOT/EXIST/YET' - -ssh_options: - agent: False - identities: - - /work/example-cluster/insecure_key - -action_runner: - runner_type: "subprocess" - remote_status_path: "/tmp/tron" - remote_exec_path: "/work/bin/" - -nodes: - - hostname: localhost - username: root - -time_zone: US/Eastern - -jobs: - - name: "mesostest" - node: localhost - schedule: "cron 0 0 1 1 *" - time_zone: "US/Pacific" - actions: - - name: "first" - executor: mesos - command: "sleep 5m" - docker_image: busybox - cpus: 0.1 - mem: 100 diff --git a/cluster_itests/config/_manifest.yaml b/cluster_itests/config/_manifest.yaml deleted file mode 100644 index c696d1fca..000000000 --- a/cluster_itests/config/_manifest.yaml +++ /dev/null @@ -1 +0,0 @@ -{MASTER: /work/cluster_itests/config/MASTER.yaml} diff --git a/cluster_itests/docker-compose.yml b/cluster_itests/docker-compose.yml index dbd3ce2dd..e69de29bb 100644 --- a/cluster_itests/docker-compose.yml +++ b/cluster_itests/docker-compose.yml @@ -1,39 +0,0 @@ -version: '2' - -services: - zookeeper: - build: ../yelp_package/itest_dockerfiles/zookeeper/ - ports: - - 2181 - - mesosmaster: - build: ../yelp_package/itest_dockerfiles/mesos/ - image: paasta_itest_mesos:latest - hostname: mesosmaster - ports: - - 5050 - command: 'mesos-master --zk=zk://zookeeper:2181/mesos-testcluster --registry=in_memory --quorum=1 --authenticate --authenticate_slaves --work_dir=/tmp/mesos --credentials=/etc/mesos-secrets --authenticate_http_frameworks --http_framework_authenticators=basic' - depends_on: - - zookeeper - - mesosslave: - build: ../yelp_package/itest_dockerfiles/mesos/ - ports: - - 5051 - command: 'mesos-slave --master=zk://zookeeper:2181/mesos-testcluster --resources="cpus(*):10; mem(*):512; disk(*):100" --credential=/etc/mesos-slave-secret --containerizers=docker --docker=/usr/bin/docker --work_dir=/tmp/mesos --attributes="region:fakeregion;pool:default" --no-docker_kill_orphans --log_dir=/var/log/mesos' - volumes: - - /var/run/docker.sock:/var/run/docker.sock - depends_on: - - zookeeper - environment: - - MESOS_SYSTEMD_ENABLE_SUPPORT=false - - tronmaster: - build: ../yelp_package/itest_dockerfiles/tronmaster/ - command: 'tox -e trond_inside_container' - ports: - - 8089 - depends_on: - - mesosmaster - volumes: - - ../:/work:rw diff --git a/cluster_itests/environment.py b/cluster_itests/environment.py deleted file mode 100644 index 15caef348..000000000 --- a/cluster_itests/environment.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2015-2016 Yelp Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import errno -import os -import signal -import time -from functools import wraps - -import requests - - -def before_all(context): - wait_for_http("tronmaster:8089") - wait_for_http("mesosmaster:5050") - - -def after_all(context): - pass - - -def after_scenario(context, scenario): - pass - - -def before_feature(context, feature): - if "skip" in feature.tags: - feature.skip("Marked with @skip") - return - - -def before_scenario(context, scenario): - if "skip" in scenario.effective_tags: - scenario.skip("Marked with @skip") - return - - -class TimeoutError(Exception): - pass - - -def timeout(seconds=10, error_message=os.strerror(errno.ETIME)): - def decorator(func): - def _handle_timeout(signum, frame): - raise TimeoutError(error_message) - - def wrapper(*args, **kwargs): - signal.signal(signal.SIGALRM, _handle_timeout) - signal.alarm(seconds) - try: - result = func(*args, **kwargs) - finally: - signal.alarm(0) - return result - - return wraps(func)(wrapper) - - return decorator - - -@timeout(120, error_message="Service is not available. Cancelling integration tests") -def wait_for_http(service): - while True: - print(f"Waiting for {service} to be up...") - try: - response = requests.get("http://%s/" % service, timeout=5) - except ( - requests.exceptions.ConnectionError, - requests.exceptions.Timeout, - ): - time.sleep(5) - continue - if response.status_code == 200: - print(f"{service} is up and running!") - break diff --git a/cluster_itests/mesos.feature b/cluster_itests/mesos.feature deleted file mode 100644 index 209739413..000000000 --- a/cluster_itests/mesos.feature +++ /dev/null @@ -1,8 +0,0 @@ -Feature: Tron can connect to a mesos cluster - - Scenario: Framework registration - Given a working mesos cluster - When we run tronctl start MASTER.mesostest - And we sleep 3 seconds - Then we should see 1 frameworks - Then we should see tron in the list of frameworks diff --git a/cluster_itests/steps/itest_utils.py b/cluster_itests/steps/itest_utils.py deleted file mode 100644 index a5fb36fc8..000000000 --- a/cluster_itests/steps/itest_utils.py +++ /dev/null @@ -1,9 +0,0 @@ -import subprocess - - -def run(command): - process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) - process.wait() - returncode = process.returncode - output = "".join([line.decode("utf8") for line in process.stdout.readlines()]) - return returncode, output diff --git a/cluster_itests/steps/mesos_steps.py b/cluster_itests/steps/mesos_steps.py deleted file mode 100644 index de22a76ba..000000000 --- a/cluster_itests/steps/mesos_steps.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright 2015-2016 Yelp Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import time - -import itest_utils -import requests -from behave import given -from behave import then -from behave import when - - -@given("a working mesos cluster") -def working_mesos_cluster(context): - pass - - -@when("we run tronctl {command}") -def run_tronctl_command(context, command): - full_command = f"tronctl --server http://tronmaster:8089 {command}" - exit_code, context.output = itest_utils.run(full_command) - print(full_command) - print(exit_code) - print(context.output) - assert exit_code == 0, context.output - - -@then("we should see {framework_string} in the list of frameworks") -def see_framework_in_list(context, framework_string): - frameworks = list_active_frameworks() - assert any(framework_string in f for f in frameworks), frameworks - - -@when("we sleep {num:d} seconds") -def sleep(context, num): - time.sleep(num) - - -@then("we should see {num:d} frameworks") -def see_num_frameworks(context, num): - frameworks = list_active_frameworks() - assert len(frameworks) == num, frameworks - - -def list_active_frameworks(): - framework_info = fetch_frameworks_endpoint()["frameworks"] - return [f["name"] for f in framework_info] - - -def fetch_frameworks_endpoint(): - url = "http://mesosmaster:5050/state/frameworks.json" - resp = requests.get(url=url) - return resp.json() diff --git a/cluster_itests/tron_framework_secret b/cluster_itests/tron_framework_secret deleted file mode 100644 index 2bb56dd4b..000000000 --- a/cluster_itests/tron_framework_secret +++ /dev/null @@ -1 +0,0 @@ -tron-secret diff --git a/contrib/migration_script.py b/contrib/migration_script.py index 2fad770f4..00060b98b 100755 --- a/contrib/migration_script.py +++ b/contrib/migration_script.py @@ -24,21 +24,31 @@ class bcolors: def parse_args(): - parser = argparse.ArgumentParser(description="Migrate jobs to new namespace",) + parser = argparse.ArgumentParser( + description="Migrate jobs to new namespace", + ) parser.add_argument( - "--server", required=True, help="specify the location of tron master", + "--server", + required=True, + help="specify the location of tron master", ) parser.add_argument( - "--old-ns", required=True, help="Old namespace", + "--old-ns", + required=True, + help="Old namespace", ) parser.add_argument( - "--new-ns", required=True, help="New namespace", + "--new-ns", + required=True, + help="New namespace", ) parser.add_argument( - "source", help="source file to get list of jobs", + "source", + help="source file to get list of jobs", ) parser.add_argument( - "--job", help="Specify a single job to migrate", + "--job", + help="Specify a single job to migrate", ) args = parser.parse_args() return args @@ -63,7 +73,7 @@ def check_job_if_running(jobs_status, job_name): def command_jobs(command, jobs, args, ns=None): - """ This function run tronctl command for the jobs + """This function run tronctl command for the jobs command: the tronctl command it will run jobs: a list of jobs args: the args for this script @@ -101,7 +111,10 @@ def command_jobs(command, jobs, args, ns=None): def ssh_command(hostname, command): print(bcolors.BOLD + f"Executing the command: ssh -A {hostname} {command}" + bcolors.ENDC) ssh = subprocess.Popen( - ["ssh", "-A", hostname, command], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + ["ssh", "-A", hostname, command], + shell=False, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, ) exitcode = ssh.wait() result = ssh.stdout.readlines() diff --git a/contrib/mock_patch_checker.py b/contrib/mock_patch_checker.py index a208a3be2..0a95bbf84 100755 --- a/contrib/mock_patch_checker.py +++ b/contrib/mock_patch_checker.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3.6 +#!/usr/bin/env python3.8 import ast import sys diff --git a/contrib/patch-config-loggers.diff b/contrib/patch-config-loggers.diff new file mode 100644 index 000000000..4ccf7d506 --- /dev/null +++ b/contrib/patch-config-loggers.diff @@ -0,0 +1,81 @@ +--- a/debian/tron/opt/venvs/tron/lib/python3.8/site-packages/kubernetes/client/configuration.py ++++ b/debian/tron/opt/venvs/tron/lib/python3.8/site-packages/kubernetes/client/configuration.py +@@ -71,11 +71,11 @@ + """ + + _default = None +- + def __init__(self, host="http://localhost", + api_key=None, api_key_prefix=None, + username=None, password=None, + discard_unknown_keys=False, ++ is_logger_used=False, + ): + """Constructor + """ +@@ -106,26 +106,28 @@ + """Password for HTTP basic authentication + """ + self.discard_unknown_keys = discard_unknown_keys ++ self.is_logger_used = is_logger_used + self.logger = {} +- """Logging Settings +- """ +- self.logger["package_logger"] = logging.getLogger("client") +- self.logger["urllib3_logger"] = logging.getLogger("urllib3") +- self.logger_format = '%(asctime)s %(levelname)s %(message)s' +- """Log format +- """ +- self.logger_stream_handler = None +- """Log stream handler +- """ +- self.logger_file_handler = None +- """Log file handler +- """ +- self.logger_file = None +- """Debug file location +- """ +- self.debug = False +- """Debug switch +- """ ++ if self.is_logger_used: ++ """Logging Settings ++ """ ++ self.logger["package_logger"] = logging.getLogger("client") ++ self.logger["urllib3_logger"] = logging.getLogger("urllib3") ++ self.logger_format = '%(asctime)s %(levelname)s %(message)s' ++ """Log format ++ """ ++ self.logger_stream_handler = None ++ """Log stream handler ++ """ ++ self.logger_file_handler = None ++ """Log file handler ++ """ ++ self.logger_file = None ++ """Debug file location ++ """ ++ self.debug = False ++ """Debug switch ++ """ + + self.verify_ssl = True + """SSL/TLS verification +@@ -178,11 +180,12 @@ + for k, v in self.__dict__.items(): + if k not in ('logger', 'logger_file_handler'): + setattr(result, k, copy.deepcopy(v, memo)) +- # shallow copy of loggers +- result.logger = copy.copy(self.logger) +- # use setters to configure loggers +- result.logger_file = self.logger_file +- result.debug = self.debug ++ if self.is_logger_used: ++ # shallow copy of loggers ++ result.logger = copy.copy(self.logger) ++ # use setters to configure loggers ++ result.logger_file = self.logger_file ++ result.debug = self.debug + return result + + @classmethod diff --git a/contrib/sync_namespaces_jobs.py b/contrib/sync_namespaces_jobs.py index b7b4a2d2f..ddb53a533 100755 --- a/contrib/sync_namespaces_jobs.py +++ b/contrib/sync_namespaces_jobs.py @@ -13,15 +13,24 @@ def parse_args(): - parser = argparse.ArgumentParser(description="Creating namespaces and jobs configuration for load testing",) + parser = argparse.ArgumentParser( + description="Creating namespaces and jobs configuration for load testing", + ) parser.add_argument( - "--multiple", type=int, default=1, help="multiple workload of namespaces and jobs from source directory", + "--multiple", + type=int, + default=1, + help="multiple workload of namespaces and jobs from source directory", ) parser.add_argument( - "--src", default="/nail/etc/services/tron/prod", help="Directory to get Tron configuration files", + "--src", + default="/nail/etc/services/tron/prod", + help="Directory to get Tron configuration files", ) parser.add_argument( - "--dest", default="/tmp/tron-servdir", help="Directory to put Tron configuration files for load testing", + "--dest", + default="/tmp/tron-servdir", + help="Directory to put Tron configuration files for load testing", ) args = parser.parse_args() return args @@ -52,7 +61,10 @@ def main(): if "node" in action: action["node"] = "localhost" for i in range(args.multiple): - out_filepath = os.path.join(args.dest, "load_testing_" + str(i) + "-" + filename,) + out_filepath = os.path.join( + args.dest, + "load_testing_" + str(i) + "-" + filename, + ) with open(out_filepath, "w") as outf: yaml.dump(config, outf, default_flow_style=False) diff --git a/debian/changelog b/debian/changelog index ed7a9d301..2a6af07ab 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,58 @@ +tron (1.28.3) jammy; urgency=medium + + * 1.28.3 tagged with 'make release' + Commit: Merge pull request #938 from Yelp/u/kkasp/TRON-2112-lock- + start Add lock to tron start to mitigate the risk of running + duplicate jobs… + + -- Kevin Kaspari Thu, 08 Feb 2024 11:24:39 -0800 + +tron (1.28.2) jammy; urgency=medium + + * 1.28.2 tagged with 'make release' + Commit: Remove unnecessary mocks (#937) This is what I get for + trusting GHA rather than also running the tests internally :) These + mocks are no longer required and are actually causing test failures + internally. + + -- Luis Perez Thu, 01 Feb 2024 09:10:42 -0800 + +tron (1.28.1) jammy; urgency=medium + + * 1.28.1 tagged with 'make release' + Commit: Downpin yelp-clog (#936) We had bumped this a couple major + versions since we were also bumping scribereader - but we reverted + the scribereader bump before merging the jammy/py38 branch and + forgot to also revert the yelp-clog bump :) + + -- Luis Perez Thu, 01 Feb 2024 08:51:40 -0800 + +tron (1.28.0) jammy; urgency=medium + + * 1.28.0 tagged with 'make release' + Commit: Upgrading Tron to py3.8 + patching it with the fix (#934) * + Monkeypatch SimpleQueue back to PySimpleQueue We have a hunch that + this is what is causing our pod event loop to have wildly delayed + items * Revert 'Revert python/jammy upgrades (#907)' This reverts + commit 483da5c0fb258b01b8e47912ad034d43554ada7d. * new formatting + * Bump pyyaml * Rm sad test that is not relevant for validation * + added stuff to run tron locally * matching the python version with + whats currently running in infrastage * adding also changelog * + This commit includes some requriements for clog and try/except block + for handle_events * This commit adds the patch fix * Revert + 'matching the python version with whats currently running in + infrastage' This reverts commit + 1f81a6d4805d742a7a7a28b1d7d8eef39522a896. * Revert 'adding also + changelog' This reverts commit + 951fcc8fc31114bde340bad4b82ced0529e03b65. * precommit fixes the + patch * Fixing mypy issues and tests failing * removing return and + adding comment for handling defer being none * addressing wording + * fix InvariantException back to Exception --------- Co-authored- + by: Luis Perez Co-authored-by: Vincent Thibault + + + -- Eman Elsabban Wed, 31 Jan 2024 09:25:13 -0800 + tron (1.27.5) jammy; urgency=medium * 1.27.5 tagged with 'make release' diff --git a/debian/compat b/debian/compat index 1e8b31496..f599e28b8 100644 --- a/debian/compat +++ b/debian/compat @@ -1 +1 @@ -6 +10 diff --git a/debian/control b/debian/control index f9c10a9ea..d40824e96 100644 --- a/debian/control +++ b/debian/control @@ -2,13 +2,13 @@ Source: tron Section: admin Priority: optional Maintainer: Daniel Nephin -Build-Depends: debhelper (>= 7), python3.6-dev, libdb5.3-dev, libyaml-dev, libssl-dev, libffi-dev, dh-virtualenv +Build-Depends: debhelper (>= 7), python3.8-dev, libdb5.3-dev, libyaml-dev, libssl-dev, libffi-dev, dh-virtualenv Standards-Version: 3.8.3 Package: tron Architecture: all Homepage: http://github.com/yelp/Tron -Depends: bsdutils, python3.6, libdb5.3, libyaml-0-2, libssl1.0.0, libffi6, ${shlibs:Depends}, ${misc:Depends} +Depends: bsdutils, python3.8, libdb5.3, libyaml-0-2, ${shlibs:Depends}, ${misc:Depends} Description: Tron is a job scheduling, running and monitoring package. Designed to replace Cron for complex scheduling and dependencies. Provides: diff --git a/debian/rules b/debian/rules index f9698fd0e..95d96f6f9 100755 --- a/debian/rules +++ b/debian/rules @@ -1,6 +1,8 @@ #!/usr/bin/make -f # -*- makefile -*- +DH_VERBOSE := 1 + %: dh $@ --with python-virtualenv @@ -19,10 +21,11 @@ override_dh_virtualenv: echo $(PIP_INDEX_URL) dh_virtualenv --index-url $(PIP_INDEX_URL) \ --extra-pip-arg --trusted-host=169.254.255.254 \ - --python=/usr/bin/python3.6 \ + --python=/usr/bin/python3.8 \ --preinstall cython==0.29.36 \ - --preinstall pip==9.0.1 \ + --preinstall pip==18.1 \ --preinstall setuptools==46.1.3 - + @echo patching k8s client lib for configuration class + patch debian/tron/opt/venvs/tron/lib/python3.8/site-packages/kubernetes/client/configuration.py contrib/patch-config-loggers.diff override_dh_installinit: dh_installinit --noscripts diff --git a/debian/tron.service b/debian/tron.service index 180cd190e..8ad4e469b 100644 --- a/debian/tron.service +++ b/debian/tron.service @@ -5,7 +5,7 @@ After=network.target [Service] User=tron EnvironmentFile=/etc/default/tron -ExecStart=/usr/bin/trond --lock-file=${LOCKFILE:-$PIDFILE} --working-dir=${WORKINGDIR} --host ${LISTEN_HOST} --port ${LISTEN_PORT} ${DAEMON_OPTS} +ExecStart=/usr/bin/zk-flock tron_master_${CLUSTER_NAME} "/usr/bin/trond --lock-file=${LOCKFILE:-$PIDFILE} --working-dir=${WORKINGDIR} --host ${LISTEN_HOST} --port ${LISTEN_PORT} ${DAEMON_OPTS}" ExecStopPost=/usr/bin/logger -t tron_exit_status "SERVICE_RESULT:${SERVICE_RESULT} EXIT_CODE:${EXIT_CODE} EXIT_STATUS:${EXIT_STATUS}" TimeoutStopSec=20 Restart=always diff --git a/dev/config/MASTER.yaml b/dev/config/MASTER.yaml index b5f618768..46b2dbbe2 100755 --- a/dev/config/MASTER.yaml +++ b/dev/config/MASTER.yaml @@ -1,3 +1,4 @@ +# Please visit for a guide on how to setup Tron for local development state_persistence: name: "tron_state" table_name: "tmp-tron-state" @@ -12,18 +13,11 @@ ssh_options: nodes: - hostname: localhost -mesos_options: - master_address: mesos.paasta-mesosstage.yelp - master_port: 5050 - role: '*' - enabled: True - dockercfg_location: file:///root/.dockercfg - # Replace this with the path relative to your home dir to use # action_runner: # runner_type: "subprocess" # remote_status_path: "pg/tron/status" -# remote_exec_path: "pg/tron/.tox/py36/bin" +# remote_exec_path: "pg/tron/.tox/py38/bin" jobs: testjob0: diff --git a/docs/conf.py b/docs/conf.py index 2dcb713fd..a68c63681 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -187,7 +187,13 @@ def __getattr__(cls, name): # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ("index", "Tron.tex", "Tron Documentation", "Yelp, Inc.", "manual",), + ( + "index", + "Tron.tex", + "Tron Documentation", + "Yelp, Inc.", + "manual", + ), ] # The name of an image file (relative to this directory) to place at the top of @@ -216,10 +222,34 @@ def __getattr__(cls, name): # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - ("man_tronview", "tronview", "tronview documentation", ["Yelp, Inc."], 1,), - ("man_tronfig", "tronfig", "tronfig documentation", ["Yelp, Inc."], 1,), - ("man_tronctl", "tronctl", "control Tron jobs", ["Yelp, Inc."], 1,), - ("man_trond", "trond", "trond documentation", ["Yelp, Inc."], 8,), + ( + "man_tronview", + "tronview", + "tronview documentation", + ["Yelp, Inc."], + 1, + ), + ( + "man_tronfig", + "tronfig", + "tronfig documentation", + ["Yelp, Inc."], + 1, + ), + ( + "man_tronctl", + "tronctl", + "control Tron jobs", + ["Yelp, Inc."], + 1, + ), + ( + "man_trond", + "trond", + "trond documentation", + ["Yelp, Inc."], + 8, + ), ] # If true, show URL addresses after external links. @@ -232,7 +262,15 @@ def __getattr__(cls, name): # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - ("index", "Tron", "Tron Documentation", "Yelp, Inc.", "Tron", "One line description of project.", "Miscellaneous",), + ( + "index", + "Tron", + "Tron Documentation", + "Yelp, Inc.", + "Tron", + "One line description of project.", + "Miscellaneous", + ), ] # Documents to append as an appendix to all manuals. diff --git a/example-cluster/.gitignore b/example-cluster/.gitignore deleted file mode 100644 index 2eb72efc2..000000000 --- a/example-cluster/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -config/ -tron.pid -tron_state diff --git a/example-cluster/README.md b/example-cluster/README.md deleted file mode 100644 index a947a3fb9..000000000 --- a/example-cluster/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# Example Cluster - -This is a docker-compose setup for a working Tron setup. There is one example job -in the tronfig which gets deployed. - -# To Run - -``` -$ tox -e example-cluster -``` - -# To start Tron (from inside the master container) - -``` -$ cd /work -$ ./example-cluster/start.sh -``` - -# To test tronview, tronctl, etc -First, start Tron in a container using the above steps. Then in a different terminal, -you will attach to that running container. There you will be able to run `tronctl`, -`tronview` and others against the Trond master in the example cluster. - -``` -$ EXAMPLE_CONTAINER_ID=$(docker ps -q -f status=running -f name=example-cluster_playground) -$ sudo docker exec -it $EXAMPLE_CONTAINER_ID /bin/bash -``` diff --git a/example-cluster/docker-compose.yml b/example-cluster/docker-compose.yml deleted file mode 100755 index b5f055eb0..000000000 --- a/example-cluster/docker-compose.yml +++ /dev/null @@ -1,23 +0,0 @@ -version: "3" -services: - # For debugging - playground: - build: - context: ../ - dockerfile: yelp_package/bionic/Dockerfile - volumes: - - ../:/work - - ./:/nail/tron - ports: - - "8089:8089" - # For itests only, `pip install -e .` won't work - master: - build: - context: . - dockerfile: ubuntu:bionic - links: - - batch1 - batch1: - build: - context: ../ - dockerfile: example-cluster/images/batch/Dockerfile diff --git a/example-cluster/images/batch/Dockerfile b/example-cluster/images/batch/Dockerfile deleted file mode 100755 index 6b272030b..000000000 --- a/example-cluster/images/batch/Dockerfile +++ /dev/null @@ -1,31 +0,0 @@ -FROM ubuntu:bionic - -RUN apt-get update > /dev/null && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ - debhelper \ - dpkg-dev \ - devscripts \ - faketime \ - wget \ - gdebi-core \ - git \ - gcc \ - python-dev \ - coffeescript \ - libdb5.3-dev \ - libyaml-dev \ - libssl-dev \ - libffi-dev \ - ssh \ - rsyslog \ - && apt-get clean > /dev/null - -RUN useradd -ms /bin/bash tron && mkdir -p /home/tron/.ssh -ADD example-cluster/images/batch/insecure_key.pub /home/tron -RUN cat /home/tron/insecure_key.pub > /home/tron/.ssh/authorized_keys - -RUN wget --no-check-certificate https://bootstrap.pypa.io/get-pip.py -O /tmp/get-pip.py -RUN python /tmp/get-pip.py -RUN pip install -U tox wheel setuptools PyYAML - -WORKDIR /work diff --git a/example-cluster/images/batch/insecure_key.pub b/example-cluster/images/batch/insecure_key.pub deleted file mode 100755 index b2c4dac36..000000000 --- a/example-cluster/images/batch/insecure_key.pub +++ /dev/null @@ -1 +0,0 @@ -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDo8SADMCrRE1eu+8WhvewNhkViIvVaFbxSqYPEW/gioS8jD3Q/Hlz31g2h0+CNY+qZ772GTWHHGPH15rCRs0fsaBk1BwNuE0UshTp6XH1DFT0zpVfBklU5K/2hTFFihUsmxMFDBrze2sZ/+0JreeLohZo86FyExKWJJtQlQY71dJZHlBCNgX+oz3o4mBdIleDdyJ67N60Nne/ZzDtlKuZlfwhnls+2jCa5KXHG6m4mveaYrWaQVRZKzr+4lK+VqEKLjwMuHx7jnlesB0IpWfhrN1wykuJCZgh7aI8Mfo215cTjws5jE4u42JOhJbK8xInLeGBZgkZ1UyWd34CnvZ6F robj@C02RJ7CUG8WN diff --git a/example-cluster/images/bionic b/example-cluster/images/bionic deleted file mode 100755 index a6a5d2de2..000000000 --- a/example-cluster/images/bionic +++ /dev/null @@ -1,22 +0,0 @@ -FROM ubuntu:bionic - -# Need Python 3.6 -RUN apt-get -q update && \ - apt-get -q install -y --no-install-recommends software-properties-common && \ - add-apt-repository ppa:deadsnakes/ppa - -RUN apt-get -q update && \ - DEBIAN_FRONTEND=noninteractive apt-get -q install -y --no-install-recommends \ - python3.6 \ - libyaml-dev \ - ssh \ - wget \ - && apt-get -q clean - -RUN wget https://bootstrap.pypa.io/get-pip.py -O /tmp/get-pip.py -RUN python3.6 /tmp/get-pip.py -RUN pip3.6 install wheel - -WORKDIR /work - -RUN mkdir -p /var/log/tron diff --git a/example-cluster/insecure_key b/example-cluster/insecure_key deleted file mode 100755 index 8cc51dd8a..000000000 --- a/example-cluster/insecure_key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEA6PEgAzAq0RNXrvvFob3sDYZFYiL1WhW8UqmDxFv4IqEvIw90 -Px5c99YNodPgjWPqme+9hk1hxxjx9eawkbNH7GgZNQcDbhNFLIU6elx9QxU9M6VX -wZJVOSv9oUxRYoVLJsTBQwa83trGf/tCa3ni6IWaPOhchMSliSbUJUGO9XSWR5QQ -jYF/qM96OJgXSJXg3cieuzetDZ3v2cw7ZSrmZX8IZ5bPtowmuSlxxupuJr3mmK1m -kFUWSs6/uJSvlahCi48DLh8e455XrAdCKVn4azdcMpLiQmYIe2iPDH6NteXE48LO -YxOLuNiToSWyvMSJy3hgWYJGdVMlnd+Ap72ehQIDAQABAoIBAQCjfknHsxD6Oac7 -S78ErLZ3L5yXKo4mMkdBhXTUZvCb7aGV7AzdGESMNpLO2FUtJ8nLTXcqQjm0cg+x -Vb7sJHcAuHeovklsAt50RvYqV6wbX1bbXttfI4PpYrTbdyNQENoUVdiHYL1DdS4J -OaL+qEfYh50fzr7Q+OxufPA9Tew23cycpklw8LXK1k/sTj6lfnZDEUQExLuBa62s -4NUqbjWrwBX1/Q0gu3dUyrdUnJvYQUsqXCfg5zjcPm2/AfeGtDXcKpeJ9NOHG14m -CeKMBvqKXVtNrVR6YLhzqp13Wz3620RKvkTgz/tuOhvnNtPkwaiVcNdkTAE4jmCz -vCLEppBpAoGBAPhduQYANFUx88Pvi8J0zxshXoPXADXkYz987lwU8p3CqgQyGkc4 -+QkAkb01v7a+Ba0yEnLuUYHawvJjmZoqwP/l7ivbaH+4jYjtXFQd5z3rV4Br7yEa -TIzBFJSEXu+0JSAO+5kCY0W4y9U3uyI2ala63XQ1KGkHCHVTF9hw04lzAoGBAPAa -CU/be/Hu0MnT6qTWdsaeMlgfWd6fQULdZIAjELDEGpBkLM6sE4tWQCvOIZkBdn/W -1LsgXbVMk2j9Z4HI09hJ+Un9o0G/LgfIbegrB3qcl8EW4QHRWYD1DIAuSwU2USDA -aqVZRUVDrw3bXtwbJygg0KK3U7kveJi/5Q/quhonAoGAfUqifSF7RQM3Cr/8TDOr -Cu+xg2bhx04Ytz1p7W/v2KAMLqOr2AQotV7hJodH2INRz8ZzwiDxY0WdlLOkGD67 -MBypItzDgbLzS4EaU6DmN7lSYgmRD6z5swPwQt/oGJpTJDIKwGyQ8epAgaZks+Sp -jmkkMGxkn18Z6D3ryJdaT3sCgYEAkE5FvH5hDzaGvFcm1u2jM5rPDVHCdN/onGRt -y+nHVUZ35MKgf5LCqGMtbGaWoNxVOV6IOvOn/phInLh45nMqh/tl3geYgKKD6t+W -1wxNkW7econqsGn/zlb5rWylYDuWJQYg0sJFtOyKINzxNX8IWMDoRHPE2S9j3wKu -jJN6SM8CgYEAgBzMhqfcclZwzXT+foSdFiqTOe9EDfYXu9LUiov46aD72s/P2/Vz -/IZdU+QXgiK+UZa8QrO4DUUHw1HILHzKBlyFa6n6vVFF6urvJhBaKiErStHbsTin -QrKaqvv+WOy3Pq0F6mZIEYM3HT9qKPOgYAPQTOwR1NGXcmvq1ihuhwk= ------END RSA PRIVATE KEY----- diff --git a/example-cluster/insecure_key.pub b/example-cluster/insecure_key.pub deleted file mode 100755 index b2c4dac36..000000000 --- a/example-cluster/insecure_key.pub +++ /dev/null @@ -1 +0,0 @@ -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDo8SADMCrRE1eu+8WhvewNhkViIvVaFbxSqYPEW/gioS8jD3Q/Hlz31g2h0+CNY+qZ772GTWHHGPH15rCRs0fsaBk1BwNuE0UshTp6XH1DFT0zpVfBklU5K/2hTFFihUsmxMFDBrze2sZ/+0JreeLohZo86FyExKWJJtQlQY71dJZHlBCNgX+oz3o4mBdIleDdyJ67N60Nne/ZzDtlKuZlfwhnls+2jCa5KXHG6m4mveaYrWaQVRZKzr+4lK+VqEKLjwMuHx7jnlesB0IpWfhrN1wykuJCZgh7aI8Mfo215cTjws5jE4u42JOhJbK8xInLeGBZgkZ1UyWd34CnvZ6F robj@C02RJ7CUG8WN diff --git a/example-cluster/logging.conf b/example-cluster/logging.conf deleted file mode 100644 index 823742af3..000000000 --- a/example-cluster/logging.conf +++ /dev/null @@ -1,69 +0,0 @@ -[loggers] -keys=root, twisted, tron, tron.serialize.runstate.statemanager, tron.api.www.access, task_processing, tron.mesos.task_output, pymesos - -[handlers] -keys=stdoutHandler, nullHandler - -[formatters] -keys=defaultFormatter - -[logger_root] -level=DEBUG -handlers=stdoutHandler -propagate=1 - -[logger_twisted] -level=WARN -handlers=stdoutHandler -qualname=twisted -propagate=0 - -[logger_tron] -level=DEBUG -handlers=stdoutHandler -qualname=tron -propagate=0 - -[logger_tron.api.www.access] -level=INFO -handlers=stdoutHandler -qualname=tron.api.www.access -propagate=0 - -[logger_tron.serialize.runstate.statemanager] -level=DEBUG -handlers=stdoutHandler -qualname=tron.serialize.runstate.statemanager -propagate=0 - -[logger_task_processing] -level=INFO -handlers=stdoutHandler -qualname=task_processing -propagate=0 - -[logger_pymesos] -level=DEBUG -handlers=stdoutHandler -qualname=pymesos -propagate=0 - -[logger_tron.mesos.task_output] -level=INFO -handlers=nullHandler -qualname=tron.mesos.task_output -propagate=0 - -[handler_stdoutHandler] -class=logging.StreamHandler -level=DEBUG -formatter=defaultFormatter -args=() - -[handler_nullHandler] -class=logging.NullHandler -level=DEBUG -args=() - -[formatter_defaultFormatter] -format=%(asctime)s %(name)s %(levelname)s %(message)s diff --git a/example-cluster/start.sh b/example-cluster/start.sh deleted file mode 100755 index 50412770e..000000000 --- a/example-cluster/start.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash - -set -e - -if ! service ssh status >/dev/null 2>&1 ; then - echo Setting up SSH - apt-get -qq -y install ssh >/dev/null - service ssh start -fi - -if [ -z "$SSH_AUTH_SOCK" ]; then - echo Setting up SSH agent - mkdir -p ~/.ssh - cp example-cluster/insecure_key ~/.ssh/id_rsa - cp example-cluster/insecure_key.pub ~/.ssh/authorized_keys - chmod -R 0600 ~/.ssh - eval $(ssh-agent) -fi - -if ! pip3.6 list --format=columns | grep 'tron.*/work' > /dev/null; then - echo Installing packages - pip3.6 install -q -r requirements.txt -e . -fi - -echo Starting Tron -FAKETIME_X=${FAKETIME_X:-10} -exec faketime -f "+0.0y x$FAKETIME_X" \ - trond -l logging.conf -w /nail/tron -v --debug -H 0.0.0.0 diff --git a/example-cluster/sync-from-yelp-prod.sh b/example-cluster/sync-from-yelp-prod.sh deleted file mode 100755 index 60cabe6f2..000000000 --- a/example-cluster/sync-from-yelp-prod.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -rsync --exclude=.stderr --exclude=.stdout -aPv tron-prod:/nail/tron/config example-cluster/ -rsync --exclude=.stderr --exclude=.stdout -aPv tron-prod:/nail/tron/tron_state_0.6.1.5.gdbm example-cluster/ diff --git a/itest.sh b/itest.sh index 9d1393479..a16b10bda 100755 --- a/itest.sh +++ b/itest.sh @@ -19,7 +19,6 @@ eval $(ssh-agent) trond --help tronfig --help -/opt/venvs/tron/bin/python --version | grep -q '3\.6' /opt/venvs/tron/bin/python - <=0.13.3 humanize ipdb ipython -Jinja2>=2.10.1 +Jinja2>=3.1.2 lockfile moto psutil diff --git a/requirements.txt b/requirements.txt index 931e45a4b..4ffb5b8d6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,41 +1,40 @@ addict==2.2.1 argcomplete==1.9.5 -asn1crypto==1.1.0 +asttokens==2.2.1 attrs==19.3.0 Automat==20.2.0 aws-sam-translator==1.15.1 aws-xray-sdk==2.4.2 backcall==0.1.0 boto==2.49.0 -boto3==1.9.249 -botocore==1.12.249 -bsddb3==6.2.6 +boto3==1.26.85 +botocore==1.29.86 +bsddb3==6.2.7 cachetools==4.2.1 -certifi==2019.11.28 +certifi==2022.12.7 cffi==1.12.3 cfn-lint==0.24.4 chardet==3.0.4 constantly==15.1.0 -cryptography==2.7 +cryptography==39.0.1 dataclasses==0.6 DateTime==4.3 decorator==4.4.0 docker==4.1.0 -docutils==0.15.2 ecdsa==0.13.3 -future==0.18.1 +executing==1.2.0 +future==0.18.3 google-auth==1.23.0 http-parser==0.9.0 humanize==0.5.1 hyperlink==19.0.0 idna==2.8 -importlib-metadata==1.3.0 -incremental==17.5.0 +incremental==22.10.0 ipdb==0.13.2 -ipython==7.8.0 +ipython==8.10.0 ipython-genutils==0.2.0 -jedi==0.15.1 -Jinja2==2.10.3 +jedi==0.16.0 +Jinja2==3.1.2 jmespath==0.9.4 jsondiff==1.1.2 jsonpatch==1.24 @@ -44,24 +43,24 @@ jsonpointer==2.0 jsonschema==3.2.0 kubernetes==22.6.0 lockfile==0.12.2 -MarkupSafe==1.1.1 +MarkupSafe==2.1.1 +matplotlib-inline==0.1.3 mock==3.0.5 -more-itertools==8.0.2 moto==1.3.13 oauthlib==3.1.0 -parso==0.5.1 +parso==0.7.0 pexpect==4.7.0 pickleshare==0.7.5 -prompt-toolkit==2.0.10 +prompt-toolkit==3.0.38 psutil==5.6.6 ptyprocess==0.6.0 +pure-eval==0.2.2 py-bcrypt==0.4 pyasn1==0.4.7 pyasn1-modules==0.2.8 pycparser==2.19 pyformance==0.4 -Pygments==2.4.2 -PyHamcrest==2.0.2 +Pygments==2.7.4 pymesos==0.3.9 pyrsistent==0.15.4 pysensu-yelp==0.4.4 @@ -71,23 +70,23 @@ python-jose==3.0.1 pytimeparse==1.1.8 pytz==2019.3 PyYAML==6.0.1 -requests==2.22.0 +requests==2.25.0 requests-oauthlib==1.2.0 responses==0.10.6 -rsa==4.0 -s3transfer==0.2.1 -setuptools==41.4.0 -six==1.14.0 +rsa==4.9 +s3transfer==0.6.0 +setuptools==65.5.1 +six==1.15.0 sshpubkeys==3.1.0 -task-processing==0.12.2 -traitlets==4.3.3 -Twisted==20.3.0 -typing-extensions==3.10.0.0 -urllib3==1.25.6 +stack-data==0.6.2 +task-processing==0.13.0 +traitlets==5.0.0 +Twisted==22.10.0 +typing-extensions==4.5.0 +urllib3==1.26.5 wcwidth==0.1.7 websocket-client==0.56.0 -Werkzeug==0.16.0 +Werkzeug==2.2.3 wrapt==1.11.2 xmltodict==0.12.0 -zipp==0.6.0 zope.interface==5.1.0 diff --git a/setup.py b/setup.py index a3c0d7fb2..f7e4acc94 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ description="Job scheduling and monitoring system", classifiers=[ "Programming Language :: Python", - "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.8", "Operating System :: OS Independent", "License :: OSI Approved :: Apache Software License", "Topic :: System :: Monitoring", @@ -27,7 +27,10 @@ "Intended Audience :: System Administrators", "Development Status :: 4 - Beta", ], - packages=find_packages(exclude=["tests.*", "tests", "example-cluster"],) + ["tronweb"], + packages=find_packages( + exclude=["tests.*", "tests", "example-cluster"], + ) + + ["tronweb"], scripts=glob.glob("bin/*") + glob.glob("tron/bin/*.py"), include_package_data=True, long_description=""" diff --git a/example-cluster/tronfig/MASTER.yaml b/testfiles/MASTER.yaml similarity index 100% rename from example-cluster/tronfig/MASTER.yaml rename to testfiles/MASTER.yaml diff --git a/testifycompat/bin/migrate.py b/testifycompat/bin/migrate.py index 03ca6717d..9f3ea5066 100644 --- a/testifycompat/bin/migrate.py +++ b/testifycompat/bin/migrate.py @@ -32,8 +32,14 @@ def replace(pattern, repl): replace(r"^from testify.assertions import ", "from testifycompat import "), replace(r"^import testify as T", "import testifycompat as T"), # Replace test classes - replace(r"^class (?:Test)?(\w+)(?:Test|TestCase)\((?:T\.)?TestCase\):$", "class Test\\1(object):",), - replace(r"^class (?:Test)?(\w+)(?:Test|TestCase)(\(\w+TestCase\)):$", "class Test\\1\\2:",), + replace( + r"^class (?:Test)?(\w+)(?:Test|TestCase)\((?:T\.)?TestCase\):$", + "class Test\\1(object):", + ), + replace( + r"^class (?:Test)?(\w+)(?:Test|TestCase)(\(\w+TestCase\)):$", + "class Test\\1\\2:", + ), # Replace some old assertions replace(r"self.assert_\((.*)\)", "assert \\1"), ] diff --git a/tests/actioncommand_test.py b/tests/actioncommand_test.py index 3ab006589..6e75cca62 100644 --- a/tests/actioncommand_test.py +++ b/tests/actioncommand_test.py @@ -61,7 +61,9 @@ def test_write_stderr_no_fh(self): def test_write_stderr(self): message = "this is the message" serializer = mock.create_autospec(filehandler.FileHandleManager) - fh = serializer.open.return_value = mock.create_autospec(filehandler.FileHandleWrapper,) + fh = serializer.open.return_value = mock.create_autospec( + filehandler.FileHandleWrapper, + ) ac = ActionCommand("action.1.do", "do", serializer) ac.write_stderr(message) @@ -108,18 +110,32 @@ def test_is_done(self): class TestCreateActionCommandFactoryFromConfig(TestCase): def test_create_default_action_command_no_config(self): config = () - factory = actioncommand.create_action_runner_factory_from_config(config,) + factory = actioncommand.create_action_runner_factory_from_config( + config, + ) assert_equal(type(factory), actioncommand.NoActionRunnerFactory) def test_create_default_action_command(self): - config = schema.ConfigActionRunner(schema.ActionRunnerTypes.none.value, None, None,) - factory = actioncommand.create_action_runner_factory_from_config(config,) + config = schema.ConfigActionRunner( + schema.ActionRunnerTypes.none.value, + None, + None, + ) + factory = actioncommand.create_action_runner_factory_from_config( + config, + ) assert type(factory) is actioncommand.NoActionRunnerFactory def test_create_action_command_with_simple_runner(self): status_path, exec_path = "/tmp/what", "/remote/bin" - config = schema.ConfigActionRunner(schema.ActionRunnerTypes.subprocess.value, status_path, exec_path,) - factory = actioncommand.create_action_runner_factory_from_config(config,) + config = schema.ConfigActionRunner( + schema.ActionRunnerTypes.subprocess.value, + status_path, + exec_path, + ) + factory = actioncommand.create_action_runner_factory_from_config( + config, + ) assert_equal(factory.status_path, status_path) assert_equal(factory.exec_path, exec_path) @@ -129,11 +145,16 @@ class TestSubprocessActionRunnerFactory(TestCase): def setup_factory(self): self.status_path = "status_path" self.exec_path = "exec_path" - self.factory = actioncommand.SubprocessActionRunnerFactory(self.status_path, self.exec_path,) + self.factory = actioncommand.SubprocessActionRunnerFactory( + self.status_path, + self.exec_path, + ) def test_from_config(self): config = mock.Mock() - runner_factory = actioncommand.SubprocessActionRunnerFactory.from_config(config,) + runner_factory = actioncommand.SubprocessActionRunnerFactory.from_config( + config, + ) assert_equal(runner_factory.status_path, config.remote_status_path) assert_equal(runner_factory.exec_path, config.remote_exec_path) @@ -144,7 +165,8 @@ def test_create(self): action_command = self.factory.create(id, command, serializer) assert_equal(action_command.id, id) assert_equal( - action_command.command, self.factory.build_command.return_value, + action_command.command, + self.factory.build_command.return_value, ) assert_equal(action_command.stdout, serializer.open.return_value) assert_equal(action_command.stderr, serializer.open.return_value) @@ -155,7 +177,13 @@ def test_build_command_complex_quoting(self): exec_name = "action_runner.py" actual = self.factory.build_command(id, command, exec_name) assert_equal( - shlex.split(actual), [f"{self.exec_path}/{exec_name}", f"{self.status_path}/{id}", command, id,], + shlex.split(actual), + [ + f"{self.exec_path}/{exec_name}", + f"{self.status_path}/{id}", + command, + id, + ], ) def test_build_stop_action_command(self): @@ -163,10 +191,12 @@ def test_build_stop_action_command(self): autospec_method(self.factory.build_command) action_command = self.factory.build_stop_action_command(id, command) assert_equal( - action_command.id, f"{id}.{self.factory.build_command.return_value}", + action_command.id, + f"{id}.{self.factory.build_command.return_value}", ) assert_equal( - action_command.command, self.factory.build_command.return_value, + action_command.command, + self.factory.build_command.return_value, ) def test__eq__true(self): diff --git a/tests/api/adapter_test.py b/tests/api/adapter_test.py index 6e5b647bc..e0fffc324 100644 --- a/tests/api/adapter_test.py +++ b/tests/api/adapter_test.py @@ -93,7 +93,8 @@ def test_get_state(self): @mock.patch("tron.api.adapter.NodeAdapter", autospec=True) def test_get_node(self, mock_node_adapter): assert_equal( - self.adapter.get_node(), mock_node_adapter.return_value.get_repr.return_value, + self.adapter.get_node(), + mock_node_adapter.return_value.get_repr.return_value, ) mock_node_adapter.assert_called_with(self.original.node) @@ -136,7 +137,12 @@ def setup_adapter(self): self.action_runs = mock.create_autospec( actionrun.ActionRunCollection, action_graph=actiongraph.ActionGraph( - {"a1": self.a1, "a2": self.a2,}, {"a1": set(), "a2": {"a1"}}, {"a1": set(), "a2": set()}, + { + "a1": self.a1, + "a2": self.a2, + }, + {"a1": set(), "a2": {"a1"}}, + {"a1": set(), "a2": set()}, ), ) self.adapter = adapter.ActionRunGraphAdapter(self.action_runs) @@ -156,7 +162,10 @@ class TestJobRunAdapter(TestCase): def setup_adapter(self): action_runs = mock.MagicMock() action_runs.__iter__.return_value = iter([mock.Mock(), mock.Mock()]) - self.job_run = mock.Mock(action_runs=action_runs, action_graph=mocks.MockActionGraph(),) + self.job_run = mock.Mock( + action_runs=action_runs, + action_graph=mocks.MockActionGraph(), + ) self.adapter = JobRunAdapter(self.job_run, include_action_runs=True) def test__init__(self): @@ -194,7 +203,8 @@ def test_repr(self, mock_many): result = self.adapter.get_repr() assert_equal(result["name"], self.pool.get_name.return_value) mock_many.assert_called_with( - adapter.NodeAdapter, self.pool.get_nodes.return_value, + adapter.NodeAdapter, + self.pool.get_nodes.return_value, ) diff --git a/tests/api/controller_test.py b/tests/api/controller_test.py index fd7d70864..2c37c1dae 100644 --- a/tests/api/controller_test.py +++ b/tests/api/controller_test.py @@ -20,7 +20,11 @@ class TestJobCollectionController: @pytest.fixture(autouse=True) def setup_controller(self): - self.collection = mock.create_autospec(JobCollection, enable=mock.Mock(), disable=mock.Mock(),) + self.collection = mock.create_autospec( + JobCollection, + enable=mock.Mock(), + disable=mock.Mock(), + ) self.controller = JobCollectionController(self.collection) def test_handle_command_unknown(self): @@ -30,27 +34,45 @@ def test_handle_command_unknown(self): def test_handle_command_move_non_existing_job(self): self.collection.get_names.return_value = [] - result = self.controller.handle_command("move", old_name="old.test", new_name="new.test",) + result = self.controller.handle_command( + "move", + old_name="old.test", + new_name="new.test", + ) assert "doesn't exist" in result def test_handle_command_move_to_existing_job(self): self.collection.get_names.return_value = ["old.test", "new.test"] - result = self.controller.handle_command("move", old_name="old.test", new_name="new.test",) + result = self.controller.handle_command( + "move", + old_name="old.test", + new_name="new.test", + ) assert "exists already" in result def test_handle_command_move(self): self.collection.get_names.return_value = ["old.test"] - result = self.controller.handle_command("move", old_name="old.test", new_name="new.test",) + result = self.controller.handle_command( + "move", + old_name="old.test", + new_name="new.test", + ) assert "Error" not in result class TestActionRunController: @pytest.fixture(autouse=True) def setup_controller(self): - self.action_run = mock.create_autospec(actionrun.ActionRun, cancel=mock.Mock(),) + self.action_run = mock.create_autospec( + actionrun.ActionRun, + cancel=mock.Mock(), + ) self.job_run = mock.create_autospec(jobrun.JobRun) self.job_run.is_scheduled = False - self.controller = controller.ActionRunController(self.action_run, self.job_run,) + self.controller = controller.ActionRunController( + self.action_run, + self.job_run, + ) self.job_run.action_runs.cleanup_action_run = None def test_handle_command_start_failed(self): @@ -103,13 +125,22 @@ def test_handle_retry_new_command(self): class TestJobRunController: @pytest.fixture(autouse=True) def setup_controller(self): - self.job_run = mock.create_autospec(jobrun.JobRun, run_time=mock.Mock(), cancel=mock.Mock(),) + self.job_run = mock.create_autospec( + jobrun.JobRun, + run_time=mock.Mock(), + cancel=mock.Mock(), + ) self.job_scheduler = mock.create_autospec(JobScheduler) - self.controller = controller.JobRunController(self.job_run, self.job_scheduler,) + self.controller = controller.JobRunController( + self.job_run, + self.job_scheduler, + ) def test_handle_command_restart(self): self.controller.handle_command("restart") - self.job_scheduler.manual_start.assert_called_with(self.job_run.run_time,) + self.job_scheduler.manual_start.assert_called_with( + self.job_run.run_time, + ) def test_handle_mapped_command(self): result = self.controller.handle_command("start") @@ -191,7 +222,11 @@ def test_update_config_failure(self): self.manager.get_hash.return_value = config_hash self.manager.write_config.side_effect = [ConfigError("It broke"), None] self.controller.read_config = mock.Mock(return_value={"config": old_content}) - error = self.controller.update_config(name, content, config_hash,) + error = self.controller.update_config( + name, + content, + config_hash, + ) assert error == "It broke" self.manager.write_config.call_args_list = [ (name, content), diff --git a/tests/api/resource_test.py b/tests/api/resource_test.py index 5e4f312e4..d05a686df 100644 --- a/tests/api/resource_test.py +++ b/tests/api/resource_test.py @@ -23,10 +23,14 @@ from tron.core.job_scheduler import JobScheduler with mock.patch( - "tron.api.async_resource.AsyncResource.bounded", lambda fn: fn, autospec=None, + "tron.api.async_resource.AsyncResource.bounded", + lambda fn: fn, + autospec=None, ): with mock.patch( - "tron.api.async_resource.AsyncResource.exclusive", lambda fn: fn, autospec=None, + "tron.api.async_resource.AsyncResource.exclusive", + lambda fn: fn, + autospec=None, ): from tron.api import resource as www @@ -46,7 +50,10 @@ def mock_request(): @pytest.fixture def mock_respond(): - with mock.patch("tron.api.resource.respond", autospec=True,) as mock_respond: + with mock.patch( + "tron.api.resource.respond", + autospec=True, + ) as mock_respond: mock_respond.side_effect = lambda request, response, code=None: response yield mock_respond @@ -90,7 +97,9 @@ def test_handle_command_unknown(self, mock_respond): www.handle_command(request, mock_controller, obj) mock_controller.handle_command.assert_called_with(command) mock_respond.assert_called_with( - request=request, response={"error": f"Unknown command '{command}' for '{obj}'"}, code=http.NOT_IMPLEMENTED, + request=request, + response={"error": f"Unknown command '{command}' for '{obj}'"}, + code=http.NOT_IMPLEMENTED, ) def test_handle_command(self, mock_respond): @@ -100,7 +109,8 @@ def test_handle_command(self, mock_respond): www.handle_command(request, mock_controller, obj) mock_controller.handle_command.assert_called_with(command) mock_respond.assert_called_with( - request=request, response={"result": mock_controller.handle_command.return_value}, + request=request, + response={"result": mock_controller.handle_command.return_value}, ) def test_handle_command_error(self, mock_respond): @@ -120,7 +130,8 @@ def setup_resource(self): self.job_run = mock.MagicMock() self.action_run = mock.MagicMock(output_path=["one"]) with mock.patch("tron.config.static_config.load_yaml_file", autospec=True,), mock.patch( - "tron.config.static_config.build_configuration_watcher", autospec=True, + "tron.config.static_config.build_configuration_watcher", + autospec=True, ): self.resource = www.ActionRunResource(self.action_run, self.job_run) @@ -273,7 +284,8 @@ def test_getChild(self): def test_getChild_action_run_history(self): autospec_method( - self.resource.get_run_from_identifier, return_value=None, + self.resource.get_run_from_identifier, + return_value=None, ) action_name = "action_name" action_runs = [mock.Mock(), mock.Mock()] @@ -289,7 +301,9 @@ class TestConfigResource: def setup_resource(self): self.mcp = mock.create_autospec(mcp.MasterControlProgram) self.resource = www.ConfigResource(self.mcp) - self.controller = self.resource.controller = mock.create_autospec(controller.ConfigController,) + self.controller = self.resource.controller = mock.create_autospec( + controller.ConfigController, + ) def test_render_GET(self, mock_respond): name = "the_name" @@ -297,7 +311,8 @@ def test_render_GET(self, mock_respond): self.resource.render_GET(request) self.controller.read_config.assert_called_with(name) mock_respond.assert_called_with( - request=request, response=self.resource.controller.read_config.return_value, + request=request, + response=self.resource.controller.read_config.return_value, ) def test_render_POST_update(self, mock_respond): @@ -335,7 +350,8 @@ def test_render_GET(self, request, mock_respond): "boot_time": self.mcp.boot_time, } mock_respond.assert_called_with( - request=request, response=expected_response, + request=request, + response=expected_response, ) @@ -345,14 +361,18 @@ def test_render_GET(self, mock_view_metrics, request, mock_respond): resource = www.MetricsResource() resource.render_GET(request) mock_respond.assert_called_with( - request=request, response=mock_view_metrics.return_value, + request=request, + response=mock_view_metrics.return_value, ) class TestTronSite: @mock.patch("tron.api.resource.meter", autospec=True) def test_log_request(self, mock_meter): - site = www.TronSite.create(mock.create_autospec(mcp.MasterControlProgram), "webpath",) + site = www.TronSite.create( + mock.create_autospec(mcp.MasterControlProgram), + "webpath", + ) request = mock.Mock(code=500) site.log(request) assert mock_meter.call_count == 1 diff --git a/tests/assertions.py b/tests/assertions.py index 75b0466fa..3b52570e5 100644 --- a/tests/assertions.py +++ b/tests/assertions.py @@ -12,7 +12,9 @@ def assert_raises(expected_exception_class, callable_obj, *args, **kwargs): except expected_exception_class as e: # we got the expected exception return e - assert_not_reached("No exception was raised (expected %s)" % expected_exception_class,) + assert_not_reached( + "No exception was raised (expected %s)" % expected_exception_class, + ) def assert_length(sequence, expected, msg=None): diff --git a/tests/bin/action_runner_test.py b/tests/bin/action_runner_test.py index e6decb2d0..c67890307 100644 --- a/tests/bin/action_runner_test.py +++ b/tests/bin/action_runner_test.py @@ -23,9 +23,18 @@ def test_get_content(self): ) as fakepid: faketime.return_value = 0 fakepid.return_value = 2 - content = self.status_file.get_content(command=command, proc=proc, run_id=run_id,) + content = self.status_file.get_content( + command=command, + proc=proc, + run_id=run_id, + ) expected = dict( - run_id=run_id, command=command, pid=proc.pid, return_code=proc.returncode, runner_pid=2, timestamp=0, + run_id=run_id, + command=command, + pid=proc.pid, + return_code=proc.returncode, + runner_pid=2, + timestamp=0, ) assert_equal(content, expected) @@ -73,11 +82,18 @@ def test_run_proc(self): self.mock_isdir.return_value = True self.mock_access.return_value = True action_runner.run_proc( - self.output_path, self.command, self.run_id, self.proc, + self.output_path, + self.command, + self.run_id, + self.proc, + ) + self.mock_status_file.assert_called_with( + self.output_path + "/" + action_runner.STATUS_FILE, ) - self.mock_status_file.assert_called_with(self.output_path + "/" + action_runner.STATUS_FILE,) self.mock_status_file.return_value.wrap.assert_called_with( - command=self.command, run_id=self.run_id, proc=self.proc, + command=self.command, + run_id=self.run_id, + proc=self.proc, ) self.proc.wait.assert_called_with() @@ -85,7 +101,9 @@ def test_run_proc(self): class TestBuildEnvironment: def test_build_environment(self): with mock.patch( - "tron.bin.action_runner.os.environ", dict(PATH="/usr/bin/nowhere"), autospec=None, + "tron.bin.action_runner.os.environ", + dict(PATH="/usr/bin/nowhere"), + autospec=None, ): env = action_runner.build_environment("MASTER.foo.10.bar") @@ -99,7 +117,9 @@ def test_build_environment(self): def test_build_environment_invalid_run_id(self): with mock.patch( - "tron.bin.action_runner.os.environ", dict(PATH="/usr/bin/nowhere"), autospec=None, + "tron.bin.action_runner.os.environ", + dict(PATH="/usr/bin/nowhere"), + autospec=None, ): env = action_runner.build_environment("asdf") @@ -113,7 +133,9 @@ def test_build_environment_invalid_run_id(self): def test_build_environment_too_long_run_id(self): with mock.patch( - "tron.bin.action_runner.os.environ", dict(PATH="/usr/bin/nowhere"), autospec=None, + "tron.bin.action_runner.os.environ", + dict(PATH="/usr/bin/nowhere"), + autospec=None, ): env = action_runner.build_environment("MASTER.foo.10.bar.baz") diff --git a/tests/bin/action_status_test.py b/tests/bin/action_status_test.py index 670637bb2..4294dcd8c 100644 --- a/tests/bin/action_status_test.py +++ b/tests/bin/action_status_test.py @@ -25,7 +25,9 @@ def setup_status_file(self): @mock.patch("tron.bin.action_status.os.killpg", autospec=True) @mock.patch( - "tron.bin.action_status.os.getpgid", autospec=True, return_value=42, + "tron.bin.action_status.os.getpgid", + autospec=True, + return_value=42, ) def test_send_signal(self, mock_getpgid, mock_kill): action_status.send_signal(signal.SIGKILL, self.status_file) @@ -40,7 +42,9 @@ def test_get_field_retrieves_last_entry(self): "run_id": "MASTER.foo.bar.1234", "command": "echo " + "really_long" * 100, } - self.status_file.write(yaml.safe_dump(additional_status_content, explicit_start=True),) + self.status_file.write( + yaml.safe_dump(additional_status_content, explicit_start=True), + ) self.status_file.flush() self.status_file.seek(0) assert action_status.get_field("return_code", self.status_file) == 0 diff --git a/tests/bin/check_tron_jobs_test.py b/tests/bin/check_tron_jobs_test.py index 10de68a04..2d905eb24 100644 --- a/tests/bin/check_tron_jobs_test.py +++ b/tests/bin/check_tron_jobs_test.py @@ -23,13 +23,23 @@ class TestCheckJobs(TestCase): @patch("tron.bin.check_tron_jobs.cmd_utils", autospec=True) @patch("tron.bin.check_tron_jobs.parse_cli", autospec=True) def test_check_job_result_exception( - self, mock_args, mock_cmd_utils, mock_client, mock_check_job_result, + self, + mock_args, + mock_cmd_utils, + mock_client, + mock_check_job_result, ): type(mock_args.return_value).job = PropertyMock(return_value=None) mock_client.return_value.jobs.return_value = [ - {"name": "job1",}, - {"name": "job2",}, - {"name": "job3",}, + { + "name": "job1", + }, + { + "name": "job2", + }, + { + "name": "job3", + }, ] mock_check_job_result.side_effect = [ KeyError("foo"), @@ -49,20 +59,32 @@ def test_job_succeeded(self): { "id": "MASTER.test.3", "state": "scheduled", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() + 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() + 600), + ), "end_time": None, }, { "id": "MASTER.test.2", "state": "running", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), "end_time": None, }, { "id": "MASTER.test.1", "state": "succeeded", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1800),), - "end_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1700),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1800), + ), + "end_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1700), + ), }, ], } @@ -78,24 +100,42 @@ def test_job_running_and_action_succeeded(self): { "id": "MASTER.test.3", "state": "scheduled", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() + 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() + 600), + ), "end_time": None, }, { "id": "MASTER.test.2", "state": "running", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), "end_time": None, "runs": [ - {"id": "MASTER.test.2.action2", "state": "running",}, - {"id": "MASTER.test.1.action1", "state": "succeeded",}, + { + "id": "MASTER.test.2.action2", + "state": "running", + }, + { + "id": "MASTER.test.1.action1", + "state": "succeeded", + }, ], # noqa: E122 }, { "id": "MASTER.test.1", "state": "succeeded", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1800),), - "end_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1700),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1800), + ), + "end_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1700), + ), }, ], } @@ -109,28 +149,41 @@ def test_get_relevant_action_picks_the_first_one_succeeded(self): "id": "MASTER.test.action1", "action_name": "action1", "state": "succeeded", - "start_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1200),), + "start_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1200), + ), "duration": "0:18:01.475067", }, { "id": "MASTER.test.action2", "action_name": "action2", "state": "succeeded", - "start_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + "start_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), "duration": "0:08:02.005783", }, { "id": "MASTER.test.action1", "action_name": "action1", "state": "succeeded", - "start_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()),), + "start_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time()), + ), "duration": "0:00:01.006305", }, ] actual = check_tron_jobs.get_relevant_action( action_runs=action_runs, last_state=State.SUCCEEDED, - actions_expected_runtime={"action1": 86400.0, "action2": 86400.0, "action3": 86400.0,}, + actions_expected_runtime={ + "action1": 86400.0, + "action2": 86400.0, + "action3": 86400.0, + }, ) assert_equal(actual["id"], "MASTER.test.action1") @@ -143,20 +196,32 @@ def test_job_failed(self): { "id": "MASTER.test.3", "state": "scheduled", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() + 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() + 600), + ), "end_time": None, }, { "id": "MASTER.test.2", "state": "running", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), "end_time": None, }, { "id": "MASTER.test.1", "state": "failed", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1800),), - "end_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1700),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1800), + ), + "end_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1700), + ), }, ], } @@ -172,20 +237,35 @@ def test_most_recent_end_time_job_failed(self): { "id": "MASTER.test.3", "state": "scheduled", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() + 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() + 600), + ), "end_time": None, }, { "id": "MASTER.test.2", "state": "succeeded", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1800),), - "end_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1700),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1800), + ), + "end_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1700), + ), }, { "id": "MASTER.test.1", "state": "failed", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), - "end_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 500),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), + "end_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 500), + ), }, ], } @@ -201,26 +281,47 @@ def test_rerun_job_failed(self): { "id": "MASTER.test.4", "state": "scheduled", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() + 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() + 600), + ), "end_time": None, }, { "id": "MASTER.test.3", "state": "failed", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1800),), - "end_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 100),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1800), + ), + "end_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 100), + ), }, { "id": "MASTER.test.2", "state": "succeeded", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), - "end_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 500),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), + "end_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 500), + ), }, { "id": "MASTER.test.1", "state": "failed", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1800),), - "end_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1700),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1800), + ), + "end_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1700), + ), }, ], } @@ -236,24 +337,42 @@ def test_job_running_but_action_failed_already(self): { "id": "MASTER.test.3", "state": "scheduled", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() + 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() + 600), + ), "end_time": None, }, { "id": "MASTER.test.2", "state": "running", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), "end_time": None, "runs": [ - {"id": "MASTER.test.2.action2", "state": "running",}, - {"id": "MASTER.test.1.action1", "state": "failed",}, + { + "id": "MASTER.test.2.action2", + "state": "running", + }, + { + "id": "MASTER.test.1.action1", + "state": "failed", + }, ], # noqa: E122 }, { "id": "MASTER.test.1", "state": "succeeded", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1800),), - "end_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1700),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1800), + ), + "end_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1700), + ), }, ], } @@ -264,7 +383,12 @@ def test_job_running_but_action_failed_already(self): def test_get_relevant_action_picks_the_one_that_failed(self): action_runs = [ { - "node": {"username": "batch", "hostname": "localhost", "name": "localhost", "port": 22,}, + "node": { + "username": "batch", + "hostname": "localhost", + "name": "localhost", + "port": 22, + }, "raw_command": "/bin/false", "requirements": [], "run_num": "582", @@ -281,7 +405,12 @@ def test_get_relevant_action_picks_the_one_that_failed(self): "job_name": "MASTER.kwatest", }, { - "node": {"username": "batch", "hostname": "localhost", "name": "localhost", "port": 22,}, + "node": { + "username": "batch", + "hostname": "localhost", + "name": "localhost", + "port": 22, + }, "raw_command": "/bin/true", "requirements": [], "run_num": "582", @@ -299,7 +428,9 @@ def test_get_relevant_action_picks_the_one_that_failed(self): }, ] actual = check_tron_jobs.get_relevant_action( - action_runs=action_runs, last_state=State.FAILED, actions_expected_runtime={}, + action_runs=action_runs, + last_state=State.FAILED, + actions_expected_runtime={}, ) assert_equal(actual["state"], "failed") @@ -312,13 +443,19 @@ def test_job_next_run_starting_no_overlap_is_stuck(self): { "id": "MASTER.test.2", "state": "queued", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), "end_time": None, }, { "id": "MASTER.test.1", "state": "running", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1200),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1200), + ), "end_time": None, }, ], @@ -336,20 +473,32 @@ def test_job_next_run_starting_overlap_allowed_not_stuck(self): { "id": "MASTER.test.3", "state": "queued", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), "end_time": None, }, { "id": "MASTER.test.2", "state": "running", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1200),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1200), + ), "end_time": None, }, { "id": "MASTER.test.1", "state": "succeeded", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1800),), - "end_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1700),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1800), + ), + "end_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1700), + ), }, ], } @@ -367,20 +516,32 @@ def test_job_next_run_starting_no_queueing_not_stuck(self): { "id": "MASTER.test.3", "state": "cancelled", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), "end_time": None, }, { "id": "MASTER.test.2", "state": "running", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1200),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1200), + ), "end_time": None, }, { "id": "MASTER.test.1", "state": "succeeded", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1800),), - "end_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1700),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1800), + ), + "end_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1700), + ), }, ], } @@ -398,7 +559,10 @@ def test_job_running_job_exceeds_expected_runtime(self): { "id": "MASTER.test.100", "state": "scheduled", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() + 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() + 600), + ), "end_time": None, "start_time": None, "duration": "", @@ -406,8 +570,14 @@ def test_job_running_job_exceeds_expected_runtime(self): { "id": "MASTER.test.99", "state": "running", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), - "start_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), + "start_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), "end_time": None, "duration": "0:10:01.883601", }, @@ -427,7 +597,10 @@ def test_job_waiting_job_exceeds_expected_runtime_already_started(self): { "id": "MASTER.test.100", "state": "scheduled", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() + 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() + 600), + ), "end_time": None, "start_time": None, "duration": "", @@ -435,8 +608,14 @@ def test_job_waiting_job_exceeds_expected_runtime_already_started(self): { "id": "MASTER.test.99", "state": "waiting", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), - "start_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), + "start_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), "end_time": None, "duration": "0:10:01.883601", }, @@ -450,19 +629,28 @@ def test_job_running_action_exceeds_expected_runtime(self): job_runs = { "status": "running", "next_run": None, - "actions_expected_runtime": {"action1": 720.0, "action2": 480.0,}, + "actions_expected_runtime": { + "action1": 720.0, + "action2": 480.0, + }, "runs": [ dict( id="MASTER.test.3", state="scheduled", - run_time=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() + 600),), + run_time=time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() + 600), + ), end_time=None, duration="", ), dict( id="MASTER.test.2", state="running", - run_time=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + run_time=time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), end_time=None, duration="0:10:01.883601", runs=[ @@ -470,14 +658,20 @@ def test_job_running_action_exceeds_expected_runtime(self): id="MASTER.test.2.action2", state="running", action_name="action2", - start_time=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + start_time=time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), duration="0:10:01.883601", ), dict( id="MASTER.test.2.action1", state="running", action_name="action1", - start_time=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + start_time=time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), duration="0:10:01.885401", ), ], @@ -485,8 +679,14 @@ def test_job_running_action_exceeds_expected_runtime(self): dict( id="MASTER.test.1", state="succeeded", - run_time=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1800),), - end_time=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1700),), + run_time=time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1800), + ), + end_time=time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1700), + ), duration="0:15:00.453601", ), ], @@ -499,12 +699,18 @@ def test_job_running_action_exceeds_expected_runtime_and_other_action_failed(sel job_runs = { "status": "running", "next_run": None, - "actions_expected_runtime": {"action1": 720.0, "action2": 480.0,}, + "actions_expected_runtime": { + "action1": 720.0, + "action2": 480.0, + }, "runs": [ dict( id="MASTER.test.1", state="running", - run_time=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + run_time=time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), end_time=None, duration="0:10:01.883601", runs=[ @@ -512,14 +718,20 @@ def test_job_running_action_exceeds_expected_runtime_and_other_action_failed(sel id="MASTER.test.1.action2", state="failed", action_name="action2", - start_time=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + start_time=time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), duration="0:10:01.883601", ), dict( id="MASTER.test.1.action1", state="running", action_name="action1", - start_time=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + start_time=time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), duration="0:10:01.885401", ), ], @@ -538,13 +750,19 @@ def test_job_stuck_when_runtime_not_sorted(self): { "id": "MASTER.test.2", "state": "running", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), "end_time": None, }, { "id": "MASTER.test.1", "state": "scheduled", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time()), + ), "end_time": None, }, ], @@ -558,26 +776,39 @@ def test_get_relevant_action_pick_the_one_stuck(self): { "id": "MASTER.test.1.action3", "state": "succeeded", - "start_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1200),), + "start_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1200), + ), "duration": "0:18:01.475067", }, { "id": "MASTER.test.1.action2", "state": "running", - "start_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1100),), + "start_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1100), + ), "duration": "0:18:40.005783", }, { "id": "MASTER.test.1.action1", "state": "succeeded", - "start_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1000),), + "start_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1000), + ), "duration": "0:00:01.006305", }, ] actual = check_tron_jobs.get_relevant_action( action_runs=action_runs, last_state=State.STUCK, - actions_expected_runtime={"action1": 86400.0, "action2": 86400.0, "action3": 86400.0,}, + actions_expected_runtime={ + "action1": 86400.0, + "action2": 86400.0, + "action3": 86400.0, + }, ) assert_equal(actual["id"], "MASTER.test.1.action2") @@ -586,19 +817,28 @@ def test_get_relevant_action_pick_the_one_exceeds_expected_runtime(self): { "id": "MASTER.test.1.action3", "state": "running", - "start_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + "start_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), "duration": "0:10:00.006305", }, { "id": "MASTER.test.1.action2", "state": "running", - "start_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + "start_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), "duration": "0:10:00.006383", }, { "id": "MASTER.test.1.action1", "state": "succeeded", - "start_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + "start_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), "duration": "0:10:00.006331", }, ] @@ -608,31 +848,44 @@ def test_get_relevant_action_pick_the_one_exceeds_expected_runtime(self): "action1": 900.0, } actual = check_tron_jobs.get_relevant_action( - action_runs=action_runs, last_state=State.STUCK, actions_expected_runtime=actions_expected_runtime, + action_runs=action_runs, + last_state=State.STUCK, + actions_expected_runtime=actions_expected_runtime, ) assert_equal(actual["id"], "MASTER.test.1.action3") - def test_get_relevant_action_pick_the_one_exceeds_expected_runtime_with_long_duration(self,): + def test_get_relevant_action_pick_the_one_exceeds_expected_runtime_with_long_duration( + self, + ): action_runs = [ { "id": "MASTER.test.1.action3", "action_name": "action3", "state": "running", - "start_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + "start_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), "duration": "1 day, 0:10:00.006305", }, { "id": "MASTER.test.1.action2", "action_name": "action2", "state": "running", - "start_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + "start_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), "duration": "2 days, 0:10:00.006383", }, { "id": "MASTER.test.1.action1", "action_name": "action1", "state": "running", - "start_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + "start_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), "duration": "1 day, 0:10:00.006331", }, ] @@ -642,12 +895,14 @@ def test_get_relevant_action_pick_the_one_exceeds_expected_runtime_with_long_dur "action1": 100000.0, } actual = check_tron_jobs.get_relevant_action( - action_runs=action_runs, last_state=State.STUCK, actions_expected_runtime=actions_expected_runtime, + action_runs=action_runs, + last_state=State.STUCK, + actions_expected_runtime=actions_expected_runtime, ) assert_equal(actual["id"], "MASTER.test.1.action2") def test_no_job_scheduled_or_queuing(self): - """ If the past 2 runs succeeded but no future job is scheuled, + """If the past 2 runs succeeded but no future job is scheuled, we should consider the job to have suceeded. """ job_runs = { @@ -657,14 +912,26 @@ def test_no_job_scheduled_or_queuing(self): { "id": "MASTER.test.2", "state": "succeeded", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 300),), - "end_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 300), + ), + "end_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), }, { "id": "MASTER.test.1", "state": "succeeded", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 900),), - "end_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1200),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 900), + ), + "end_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1200), + ), }, ], "monitoring": {}, @@ -682,7 +949,10 @@ def test_job_no_runs_to_check(self): { "id": "MASTER.test.1", "state": "scheduled", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() + 1200),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() + 1200), + ), "end_time": None, }, ], @@ -710,20 +980,32 @@ def test_job_unknown(self): { "id": "MASTER.test.3", "state": "scheduled", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), "end_time": None, }, { "id": "MASTER.test.2", "state": "unknown", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1200),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1200), + ), "end_time": None, }, { "id": "MASTER.test.1", "state": "succeeded", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1800),), - "end_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1700),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1800), + ), + "end_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1700), + ), }, ], } @@ -739,24 +1021,42 @@ def test_job_running_but_action_unknown_already(self): { "id": "MASTER.test.3", "state": "scheduled", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() + 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() + 600), + ), "end_time": None, }, { "id": "MASTER.test.2", "state": "running", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), "end_time": None, "runs": [ - {"id": "MASTER.test.2.action2", "state": "running",}, - {"id": "MASTER.test.1.action1", "state": "unknown",}, + { + "id": "MASTER.test.2.action2", + "state": "running", + }, + { + "id": "MASTER.test.1.action1", + "state": "unknown", + }, ], # noqa: E122 }, { "id": "MASTER.test.1", "state": "succeeded", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1800),), - "end_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1700),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1800), + ), + "end_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1700), + ), }, ], } @@ -772,24 +1072,42 @@ def test_job_waiting_but_action_unknown_already(self): { "id": "MASTER.test.3", "state": "scheduled", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() + 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() + 600), + ), "end_time": None, }, { "id": "MASTER.test.2", "state": "waiting", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), "end_time": None, "runs": [ - {"id": "MASTER.test.2.action2", "state": "waiting",}, - {"id": "MASTER.test.1.action1", "state": "unknown",}, + { + "id": "MASTER.test.2.action2", + "state": "waiting", + }, + { + "id": "MASTER.test.1.action1", + "state": "unknown", + }, ], # noqa: E122 }, { "id": "MASTER.test.1", "state": "succeeded", - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1800),), - "end_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1700),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1800), + ), + "end_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1700), + ), }, ], } @@ -801,23 +1119,35 @@ def test_job_waiting_but_action_unknown_already(self): def test_guess_realert_every(self): job_runs = { "status": "running", - "next_run": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() + 600),), + "next_run": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() + 600), + ), "runs": [ { "id": "MASTER.test.3", "state": "scheduled", "start_time": None, - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() + 600),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() + 600), + ), }, { "id": "MASTER.test.2", "state": "failed", - "start_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + "start_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), }, { "id": "MASTER.test.1", "state": "succeeded", - "start_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1800),), + "start_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1800), + ), }, ], # noqa: E122 } @@ -827,20 +1157,36 @@ def test_guess_realert_every(self): def test_guess_realert_every_no_action_run_starts(self): job_runs = { "status": "running", - "next_run": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() + 600),), + "next_run": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() + 600), + ), "runs": [ - {"id": "MASTER.test.3", "state": "scheduled", "start_time": None,}, + { + "id": "MASTER.test.3", + "state": "scheduled", + "start_time": None, + }, { "id": "MASTER.test.2", "state": "failed", "start_time": None, - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 200),), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 200), + ), }, { "id": "MASTER.test.1", "state": "succeeded", - "start_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), - "run_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + "start_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), + "run_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), }, ], # noqa: E122 } @@ -852,16 +1198,26 @@ def test_guess_realert_every_queue_job(self): "status": "running", "next_run": None, "runs": [ - {"id": "MASTER.test.3", "state": "queued", "start_time": None,}, + { + "id": "MASTER.test.3", + "state": "queued", + "start_time": None, + }, { "id": "MASTER.test.2", "state": "running", - "start_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 600),), + "start_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 600), + ), }, { "id": "MASTER.test.1", "state": "succeeded", - "start_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 1800),), + "start_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 1800), + ), }, ], } @@ -871,18 +1227,31 @@ def test_guess_realert_every_queue_job(self): def test_guess_realert_every_frequent_run(self): job_runs = { "status": "running", - "next_run": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() + 10),), + "next_run": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() + 10), + ), "runs": [ - {"id": "MASTER.test.3", "state": "scheduled", "start_time": None,}, + { + "id": "MASTER.test.3", + "state": "scheduled", + "start_time": None, + }, { "id": "MASTER.test.2", "state": "failed", - "start_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 10),), + "start_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 10), + ), }, { "id": "MASTER.test.1", "state": "succeeded", - "start_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 20),), + "start_time": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() - 20), + ), }, ], # noqa: E122 } @@ -892,8 +1261,17 @@ def test_guess_realert_every_frequent_run(self): def test_guess_realert_every_first_time_job(self): job_runs = { "status": "enabled", - "next_run": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() + 600),), - "runs": [{"id": "MASTER.test.1", "state": "scheduled", "start_time": None,},], + "next_run": time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(time.time() + 600), + ), + "runs": [ + { + "id": "MASTER.test.1", + "state": "scheduled", + "start_time": None, + }, + ], } realert_every = check_tron_jobs.guess_realert_every(job_runs) assert_equal(realert_every, -1) @@ -1039,13 +1417,18 @@ def test_sort_runs_by_interval_day_old_empty_buckets(self): assert len(run_buckets) == 7 @patch( - "tron.bin.check_tron_jobs.guess_realert_every", mock.Mock(return_value=1), autospec=None, + "tron.bin.check_tron_jobs.guess_realert_every", + mock.Mock(return_value=1), + autospec=None, ) @patch("tron.bin.check_tron_jobs.Client", autospec=True) @patch("tron.bin.check_tron_jobs.compute_check_result_for_job_runs", autospec=True) @patch("tron.bin.check_tron_jobs.get_object_type_from_identifier", autospec=True) def test_compute_check_result_for_job_not_precious( - self, mock_get_obj_type, mock_check_job_runs, mock_client, + self, + mock_get_obj_type, + mock_check_job_runs, + mock_client, ): client = mock_client("fake_server") client.cluster_name = "fake_cluster" @@ -1056,18 +1439,27 @@ def test_compute_check_result_for_job_not_precious( "status": "fake_status", } - results = check_tron_jobs.compute_check_result_for_job(client, self.job, url_index={},) + results = check_tron_jobs.compute_check_result_for_job( + client, + self.job, + url_index={}, + ) # make sure all job runs for a job are included by not incl count arg assert client.job.call_args_list == [ - mock.call(mock_get_obj_type.return_value.url, include_action_runs=True,), + mock.call( + mock_get_obj_type.return_value.url, + include_action_runs=True, + ), ] assert len(results) == 1 assert results[0]["name"] == "check_tron_job.fake_job" assert mock_check_job_runs.call_count == 1 @patch( - "tron.bin.check_tron_jobs.guess_realert_every", mock.Mock(return_value=1), autospec=None, + "tron.bin.check_tron_jobs.guess_realert_every", + mock.Mock(return_value=1), + autospec=None, ) @patch("tron.bin.check_tron_jobs.Client", autospec=True) def test_compute_check_result_for_job_disabled(self, mock_client): @@ -1075,21 +1467,30 @@ def test_compute_check_result_for_job_disabled(self, mock_client): client.cluster_name = "fake_cluster" self.job["status"] = "disabled" - results = check_tron_jobs.compute_check_result_for_job(client, self.job, url_index={},) + results = check_tron_jobs.compute_check_result_for_job( + client, + self.job, + url_index={}, + ) assert len(results) == 1 assert results[0]["status"] == 0 assert results[0]["output"] == "OK: fake_job is disabled and won't be checked." @patch( - "tron.bin.check_tron_jobs.guess_realert_every", mock.Mock(return_value=1), autospec=None, + "tron.bin.check_tron_jobs.guess_realert_every", + mock.Mock(return_value=1), + autospec=None, ) @patch("time.time", mock.Mock(return_value=1539460800.0), autospec=None) @patch("tron.bin.check_tron_jobs.Client", autospec=True) @patch("tron.bin.check_tron_jobs.compute_check_result_for_job_runs", autospec=True) @patch("tron.bin.check_tron_jobs.get_object_type_from_identifier", autospec=True) def test_compute_check_result_for_job_enabled( - self, mock_get_obj_type, mock_check_job_runs, mock_client, + self, + mock_get_obj_type, + mock_check_job_runs, + mock_client, ): client = mock_client("fake_server") client.cluster_name = "fake_cluster" @@ -1101,14 +1502,21 @@ def test_compute_check_result_for_job_enabled( "status": "fake_status", } - results = check_tron_jobs.compute_check_result_for_job(client, self.job, url_index={},) + results = check_tron_jobs.compute_check_result_for_job( + client, + self.job, + url_index={}, + ) # Test that hide_stderr is passed to check_job_runs assert mock_check_job_runs.call_args_list[0][1]["hide_stderr"] is True # make sure all job runs for a job are included by not incl count arg assert client.job.call_args_list == [ - mock.call(mock_get_obj_type.return_value.url, include_action_runs=True,), + mock.call( + mock_get_obj_type.return_value.url, + include_action_runs=True, + ), ] assert len(results) == 4 assert {res["name"] for res in results} == { diff --git a/tests/bin/get_tron_metrics_test.py b/tests/bin/get_tron_metrics_test.py index 1cdbfe8b3..2a04b8b75 100644 --- a/tests/bin/get_tron_metrics_test.py +++ b/tests/bin/get_tron_metrics_test.py @@ -11,7 +11,11 @@ def test_send_data_metric(): process.communicate = mock.Mock(return_value=(b"fake_output", b"fake_error")) cmd_str = "meteorite data -v fake_name fake_metric_type fake_value " "-d fake_dim_key:fake_dim_value" - with mock.patch("subprocess.Popen", mock.Mock(return_value=process), autospec=None,) as mock_popen: + with mock.patch( + "subprocess.Popen", + mock.Mock(return_value=process), + autospec=None, + ) as mock_popen: get_tron_metrics.send_data_metric( name="fake_name", metric_type="fake_metric_type", @@ -21,7 +25,11 @@ def test_send_data_metric(): ) assert mock_popen.call_count == 1 - assert mock_popen.call_args == mock.call(cmd_str.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE,) + assert mock_popen.call_args == mock.call( + cmd_str.split(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) def test_send_data_metric_dry_run(): @@ -45,7 +53,11 @@ def test_send_counter(mock_send_data_metric): assert mock_send_data_metric.call_count == 1 assert mock_send_data_metric.call_args == mock.call( - name="fake_name", metric_type="counter", value="fake_count", dimensions={}, dry_run=False, + name="fake_name", + metric_type="counter", + value="fake_count", + dimensions={}, + dry_run=False, ) @@ -57,7 +69,11 @@ def test_send_gauge(mock_send_data_metric): assert mock_send_data_metric.call_count == 1 assert mock_send_data_metric.call_args == mock.call( - name="fake_name", metric_type="gauge", value="fake_value", dimensions={}, dry_run=False, + name="fake_name", + metric_type="gauge", + value="fake_value", + dimensions={}, + dry_run=False, ) @@ -71,13 +87,24 @@ def test_send_meter(mock_send_counter): @mock.patch("tron.bin.get_tron_metrics.send_gauge", autospec=True) def test_send_histogram(mock_send_gauge): - kwargs = dict(p50="fake_p50", p75="fake_p75", p95="fake_p95", p99="fake_p99",) - p50_kwargs = dict(**kwargs, value="fake_p50",) + kwargs = dict( + p50="fake_p50", + p75="fake_p75", + p95="fake_p95", + p99="fake_p99", + ) + p50_kwargs = dict( + **kwargs, + value="fake_p50", + ) get_tron_metrics.send_histogram("fake_name", **kwargs) assert mock_send_gauge.call_count == len(kwargs) - assert mock_send_gauge.call_args_list[0] == mock.call("fake_name.p50", **p50_kwargs,) + assert mock_send_gauge.call_args_list[0] == mock.call( + "fake_name.p50", + **p50_kwargs, + ) @mock.patch("tron.bin.get_tron_metrics.send_meter", autospec=True) @@ -97,14 +124,18 @@ def test_send_metrics(cluster): metrics = dict(counter=[dict(name="fake_name")]) with mock.patch( - "tron.bin.get_tron_metrics._METRIC_SENDERS", dict(counter=mock_send_counter), autospec=None, + "tron.bin.get_tron_metrics._METRIC_SENDERS", + dict(counter=mock_send_counter), + autospec=None, ): get_tron_metrics.send_metrics(metrics, cluster=cluster, dry_run=True) assert mock_send_counter.call_count == 1 if cluster: assert mock_send_counter.call_args == mock.call( - "fake_name", dry_run=True, dimensions={"tron_cluster": "fake_cluster"}, + "fake_name", + dry_run=True, + dimensions={"tron_cluster": "fake_cluster"}, ) else: assert mock_send_counter.call_args == mock.call("fake_name", dry_run=True) diff --git a/tests/bin/recover_batch_test.py b/tests/bin/recover_batch_test.py index a9fb60b3f..7d22332cf 100644 --- a/tests/bin/recover_batch_test.py +++ b/tests/bin/recover_batch_test.py @@ -18,7 +18,11 @@ def mock_file(): @mock.patch.object(recover_batch, "reactor") @mock.patch("tron.bin.recover_batch.get_exit_code", autospec=True) @pytest.mark.parametrize( - "exit_code,error_msg,should_stop", [(1, "failed", True), (None, None, False),], + "exit_code,error_msg,should_stop", + [ + (1, "failed", True), + (None, None, False), + ], ) def test_notify(mock_get_exit_code, mock_reactor, exit_code, error_msg, should_stop): mock_get_exit_code.return_value = exit_code, error_msg @@ -38,7 +42,12 @@ def test_notify(mock_get_exit_code, mock_reactor, exit_code, error_msg, should_s @pytest.mark.parametrize( "line,exit_code,is_running,error_msg", [ - ({"return_code": 0, "runner_pid": 12345}, 0, False, None,), # action runner finishes successfully + ( + {"return_code": 0, "runner_pid": 12345}, + 0, + False, + None, + ), # action runner finishes successfully ( # action runner is killed {"return_code": -9, "runner_pid": 12345}, 9, @@ -51,12 +60,27 @@ def test_notify(mock_get_exit_code, mock_reactor, exit_code, error_msg, should_s False, "Action runner pid 12345 no longer running. Assuming an exit of 1.", ), - ({"runner_pid": 12345}, None, True, None,), # No return code but action_runner pid is running - ({}, None, Exception, None,), # No return code or PID from the file + ( + {"runner_pid": 12345}, + None, + True, + None, + ), # No return code but action_runner pid is running + ( + {}, + None, + Exception, + None, + ), # No return code or PID from the file ], ) def test_get_exit_code( - mock_read_last_yaml_entries, mock_pid_running, line, exit_code, is_running, error_msg, + mock_read_last_yaml_entries, + mock_pid_running, + line, + exit_code, + is_running, + error_msg, ): fake_path = "/file/path" mock_read_last_yaml_entries.return_value = line @@ -92,7 +116,12 @@ def test_read_last_yaml_roundtrip(mock_file): @mock.patch("tron.bin.recover_batch.StatusFileWatcher", autospec=True) @pytest.mark.parametrize("existing_code,watcher_code", [(None, 1), (123, None)]) def test_run( - mock_watcher, mock_get_exit_code, mock_queue, mock_reactor, existing_code, watcher_code, + mock_watcher, + mock_get_exit_code, + mock_queue, + mock_reactor, + existing_code, + watcher_code, ): mock_get_exit_code.return_value = (existing_code, "") mock_queue.return_value.get.return_value = (watcher_code, "") diff --git a/tests/command_context_test.py b/tests/command_context_test.py index 95710ae08..31a7c9f36 100644 --- a/tests/command_context_test.py +++ b/tests/command_context_test.py @@ -34,7 +34,9 @@ def test_build_filled_context_no_objects(self): assert not output.next def test_build_filled_context_single(self): - output = command_context.build_filled_context(command_context.JobContext,) + output = command_context.build_filled_context( + command_context.JobContext, + ) assert isinstance(output.base, command_context.JobContext) assert not output.next @@ -80,8 +82,13 @@ class Obj: class ChainedDictContextTestCase(SimpleContextTestCaseBase): @setup def build_context(self): - self.next_context = command_context.CommandContext(dict(foo="bar", next_foo="next_bar"),) - self.context = command_context.CommandContext(dict(), self.next_context,) + self.next_context = command_context.CommandContext( + dict(foo="bar", next_foo="next_bar"), + ) + self.context = command_context.CommandContext( + dict(), + self.next_context, + ) def test_chain_get(self): assert_equal(self.context["next_foo"], "next_bar") @@ -90,8 +97,13 @@ def test_chain_get(self): class ChainedDictOverrideContextTestCase(SimpleContextTestCaseBase): @setup def build_context(self): - self.next_context = command_context.CommandContext(dict(foo="your mom", next_foo="next_bar"),) - self.context = command_context.CommandContext(dict(foo="bar"), self.next_context,) + self.next_context = command_context.CommandContext( + dict(foo="your mom", next_foo="next_bar"), + ) + self.context = command_context.CommandContext( + dict(foo="bar"), + self.next_context, + ) def test_chain_get(self): assert_equal(self.context["next_foo"], "next_bar") @@ -106,7 +118,9 @@ class MyObject(TestCase): obj = MyObject() obj.foo = "bar" - self.next_context = command_context.CommandContext(dict(foo="your mom", next_foo="next_bar"),) + self.next_context = command_context.CommandContext( + dict(foo="your mom", next_foo="next_bar"), + ) self.context = command_context.CommandContext(obj, self.next_context) def test_chain_get(self): @@ -118,8 +132,15 @@ class TestJobContext(TestCase): def setup_job(self): self.last_success = mock.Mock(run_time=datetime.datetime(2012, 3, 14)) mock_scheduler = mock.create_autospec(scheduler.GeneralScheduler) - run_collection = mock.create_autospec(JobRunCollection, last_success=self.last_success,) - self.job = job.Job("MASTER.jobname", mock_scheduler, run_collection=run_collection,) + run_collection = mock.create_autospec( + JobRunCollection, + last_success=self.last_success, + ) + self.job = job.Job( + "MASTER.jobname", + mock_scheduler, + run_collection=run_collection, + ) self.context = command_context.JobContext(self.job) def test_name(self): @@ -185,7 +206,11 @@ class TestActionRunContext(TestCase): @setup def build_context(self): mock_node = mock.create_autospec(node.Node, hostname="something") - self.action_run = mock.create_autospec(actionrun.ActionRun, action_name="something", node=mock_node,) + self.action_run = mock.create_autospec( + actionrun.ActionRun, + action_name="something", + node=mock_node, + ) self.context = command_context.ActionRunContext(self.action_run) def test_actionname(self): diff --git a/tests/commands/backfill_test.py b/tests/commands/backfill_test.py index 809b6bb1d..9388141c5 100644 --- a/tests/commands/backfill_test.py +++ b/tests/commands/backfill_test.py @@ -1,6 +1,6 @@ import datetime +from unittest import mock -import mock import pytest from tron.commands import backfill @@ -44,7 +44,9 @@ def fake_backfill_run(mock_client): tron_client = mock_client.return_value tron_client.url_base = "http://localhost" yield backfill.BackfillRun( - tron_client, client.TronObjectIdentifier("JOB", "/a_job"), TEST_DATETIME_1, + tron_client, + client.TronObjectIdentifier("JOB", "/a_job"), + TEST_DATETIME_1, ) @@ -107,7 +109,12 @@ async def change_run_state(): ], ) def test_backfill_run_cancel( - mock_client_request, fake_backfill_run, event_loop, run_id, response, expected, + mock_client_request, + fake_backfill_run, + event_loop, + run_id, + response, + expected, ): fake_backfill_run.run_id = run_id mock_client_request.return_value = response @@ -118,19 +125,26 @@ def test_backfill_run_cancel( def test_run_backfill_for_date_range_job_dne(mock_get_obj_type, event_loop): mock_get_obj_type.side_effect = ValueError with pytest.raises(ValueError): - event_loop.run_until_complete(backfill.run_backfill_for_date_range("a_server", "a_job", []),) + event_loop.run_until_complete( + backfill.run_backfill_for_date_range("a_server", "a_job", []), + ) @mock.patch.object(client, "get_object_type_from_identifier", autospec=True) def test_run_backfill_for_date_range_not_a_job(mock_get_obj_type, event_loop): mock_get_obj_type.return_value = client.TronObjectIdentifier("JOB_RUN", "a_url") with pytest.raises(ValueError): - event_loop.run_until_complete(backfill.run_backfill_for_date_range("a_server", "a_job", []),) + event_loop.run_until_complete( + backfill.run_backfill_for_date_range("a_server", "a_job", []), + ) @pytest.mark.parametrize( "ignore_errors,expected", - [(True, {"succeeded", "failed", "unknown"}), (False, {"succeeded", "failed", "not started"}),], + [ + (True, {"succeeded", "failed", "unknown"}), + (False, {"succeeded", "failed", "not started"}), + ], ) @mock.patch.object(client, "get_object_type_from_identifier", autospec=True) def test_run_backfill_for_date_range_normal(mock_get_obj_type, event_loop, ignore_errors, expected): @@ -144,7 +158,13 @@ async def fake_run_until_completion(self): mock_get_obj_type.return_value = client.TronObjectIdentifier("JOB", "a_url") backfill_runs = event_loop.run_until_complete( - backfill.run_backfill_for_date_range("a_server", "a_job", dates, max_parallel=2, ignore_errors=ignore_errors,) + backfill.run_backfill_for_date_range( + "a_server", + "a_job", + dates, + max_parallel=2, + ignore_errors=ignore_errors, + ) ) assert {br.run_state for br in backfill_runs} == expected diff --git a/tests/commands/client_test.py b/tests/commands/client_test.py index f6ecf4608..485fcc11d 100644 --- a/tests/commands/client_test.py +++ b/tests/commands/client_test.py @@ -18,7 +18,8 @@ def build_file_mock(content): return mock.Mock( - read=mock.Mock(return_value=content), headers=mock.Mock(get_content_charset=mock.Mock(return_value="utf-8")), + read=mock.Mock(return_value=content), + headers=mock.Mock(get_content_charset=mock.Mock(return_value="utf-8")), ) @@ -29,7 +30,10 @@ def setup_options(self): @setup_teardown def patch_urllib(self): - patcher = mock.patch("tron.commands.client.urllib.request.urlopen", autospec=True,) + patcher = mock.patch( + "tron.commands.client.urllib.request.urlopen", + autospec=True, + ) with patcher as self.mock_urlopen: yield @@ -91,13 +95,24 @@ def setup_client(self): @setup_teardown def patch_request(self): - with mock.patch("tron.commands.client.request", autospec=True,) as self.mock_request: + with mock.patch( + "tron.commands.client.request", + autospec=True, + ) as self.mock_request: yield def test_request_error(self): - error_response = Response(error="404", msg="Not Found", content="big kahuna error",) + error_response = Response( + error="404", + msg="Not Found", + content="big kahuna error", + ) client.request = mock.Mock(return_value=error_response) - exception = assert_raises(client.RequestError, self.client.request, "/jobs",) + exception = assert_raises( + client.RequestError, + self.client.request, + "/jobs", + ) assert str(exception) == error_response.content @@ -128,7 +143,9 @@ def test_config_post(self): def test_config_get_default(self): self.client.config("config_name") - self.client.request.assert_called_with("/api/config?name=config_name",) + self.client.request.assert_called_with( + "/api/config?name=config_name", + ) def test_http_get(self): self.client.http_get("/api/jobs", {"include": 1}) @@ -136,15 +153,21 @@ def test_http_get(self): def test_action_runs(self): self.client.action_runs("/api/jobs/name/0/act", num_lines=40) - self.client.request.assert_called_with("/api/jobs/name/0/act?include_stderr=1&include_stdout=1&num_lines=40",) + self.client.request.assert_called_with( + "/api/jobs/name/0/act?include_stderr=1&include_stdout=1&num_lines=40", + ) def test_job_runs(self): self.client.job_runs("/api/jobs/name/0") - self.client.request.assert_called_with("/api/jobs/name/0?include_action_graph=0&include_action_runs=1",) + self.client.request.assert_called_with( + "/api/jobs/name/0?include_action_graph=0&include_action_runs=1", + ) def test_job(self): self.client.job("/api/jobs/name", count=20) - self.client.request.assert_called_with("/api/jobs/name?include_action_runs=0&num_runs=20",) + self.client.request.assert_called_with( + "/api/jobs/name?include_action_runs=0&num_runs=20", + ) def test_jobs(self): self.client.jobs() @@ -156,7 +179,10 @@ def test_jobs(self): class TestUserAttribution(TestCase): def test_default_user_agent(self): url = "http://localhost:8089/" - with mock.patch("tron.commands.client.os.environ", autospec=True,) as mock_environ: + with mock.patch( + "tron.commands.client.os.environ", + autospec=True, + ) as mock_environ: mock_environ.get.return_value = "testuser" default_client = client.Client(url, user_attribution=False) # we do not add user attribution by default @@ -164,7 +190,10 @@ def test_default_user_agent(self): def test_attributed_user_agent(self): url = "http://localhost:8089/" - with mock.patch("tron.commands.client.os.environ", autospec=True,) as mock_environ: + with mock.patch( + "tron.commands.client.os.environ", + autospec=True, + ) as mock_environ: mock_environ.get.return_value = "testuser" default_client = client.Client(url, user_attribution=True) # we do not add user attribution by default @@ -187,7 +216,11 @@ def setup_client(self): self.options = mock.Mock() self.index = { "namespaces": ["OTHER", "MASTER"], - "jobs": {"MASTER.namea": "", "MASTER.nameb": "", "OTHER.nameg": "",}, + "jobs": { + "MASTER.namea": "", + "MASTER.nameb": "", + "OTHER.nameg": "", + }, } def test_get_url_from_identifier_job_no_namespace(self): @@ -196,17 +229,26 @@ def test_get_url_from_identifier_job_no_namespace(self): assert_equal(identifier.type, TronObjectType.job) def test_get_url_from_identifier_job(self): - identifier = get_object_type_from_identifier(self.index, "MASTER.namea",) + identifier = get_object_type_from_identifier( + self.index, + "MASTER.namea", + ) assert_equal(identifier.url, "/api/jobs/MASTER.namea") assert_equal(identifier.type, TronObjectType.job) def test_get_url_from_identifier_job_run(self): - identifier = get_object_type_from_identifier(self.index, "MASTER.nameb.7",) + identifier = get_object_type_from_identifier( + self.index, + "MASTER.nameb.7", + ) assert_equal(identifier.url, "/api/jobs/MASTER.nameb/7") assert_equal(identifier.type, TronObjectType.job_run) def test_get_url_from_identifier_action_run(self): - identifier = get_object_type_from_identifier(self.index, "MASTER.nameb.7.run",) + identifier = get_object_type_from_identifier( + self.index, + "MASTER.nameb.7.run", + ) assert_equal(identifier.url, "/api/jobs/MASTER.nameb/7/run") assert_equal(identifier.type, TronObjectType.action_run) @@ -216,7 +258,12 @@ def test_get_url_from_identifier_job_no_namespace_not_master(self): assert_equal(identifier.type, TronObjectType.job) def test_get_url_from_identifier_no_match(self): - exc = assert_raises(ValueError, get_object_type_from_identifier, self.index, "MASTER.namec",) + exc = assert_raises( + ValueError, + get_object_type_from_identifier, + self.index, + "MASTER.namec", + ) assert_in("namec", str(exc)) diff --git a/tests/commands/cmd_utils_test.py b/tests/commands/cmd_utils_test.py index d40c29042..ac9fffaee 100644 --- a/tests/commands/cmd_utils_test.py +++ b/tests/commands/cmd_utils_test.py @@ -42,7 +42,11 @@ def test_filter_jobs_actions_runs_with_nothing(self): prefix = "" expected = ["M.foo", "M.bar"] assert_equal( - cmd_utils.filter_jobs_actions_runs(prefix, inputs,), expected, + cmd_utils.filter_jobs_actions_runs( + prefix, + inputs, + ), + expected, ) def test_filter_jobs_actions_runs_with_almost_a_job(self): @@ -56,7 +60,11 @@ def test_filter_jobs_actions_runs_with_almost_a_job(self): prefix = "M.f" expected = ["M.foo"] assert_equal( - cmd_utils.filter_jobs_actions_runs(prefix, inputs,), expected, + cmd_utils.filter_jobs_actions_runs( + prefix, + inputs, + ), + expected, ) def test_filter_jobs_actions_runs_with_a_job_run(self): @@ -71,7 +79,11 @@ def test_filter_jobs_actions_runs_with_a_job_run(self): prefix = "M.foo." expected = ["M.foo.1", "M.foo.2"] assert_equal( - cmd_utils.filter_jobs_actions_runs(prefix, inputs,), expected, + cmd_utils.filter_jobs_actions_runs( + prefix, + inputs, + ), + expected, ) def test_filter_jobs_actions_runs_with_a_job_run_and_id(self): @@ -85,7 +97,11 @@ def test_filter_jobs_actions_runs_with_a_job_run_and_id(self): prefix = "M.foo.1" expected = ["M.foo.1", "M.foo.1.action1"] assert_equal( - cmd_utils.filter_jobs_actions_runs(prefix, inputs,), expected, + cmd_utils.filter_jobs_actions_runs( + prefix, + inputs, + ), + expected, ) @@ -97,9 +113,14 @@ def test_build_option_parser(self): usage = "Something" epilog = "Something" argparse.ArgumentParser = mock.Mock() - parser = cmd_utils.build_option_parser(usage=usage, epilog=epilog,) + parser = cmd_utils.build_option_parser( + usage=usage, + epilog=epilog, + ) argparse.ArgumentParser.assert_called_with( - usage=usage, formatter_class=argparse.RawDescriptionHelpFormatter, epilog=epilog, + usage=usage, + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=epilog, ) assert parser.add_argument.call_count == 5 @@ -125,10 +146,16 @@ def test_suggest_possibilities_none(self): def test_suggest_possibilities_many(self): expected = "FOOO, FOOBAR" - actual = cmd_utils.suggest_possibilities(word="FOO", possibilities=["FOOO", "FOOBAR"],) + actual = cmd_utils.suggest_possibilities( + word="FOO", + possibilities=["FOOO", "FOOBAR"], + ) assert_in(expected, actual) def test_suggest_possibilities_one(self): expected = "FOOBAR?" - actual = cmd_utils.suggest_possibilities(word="FOO", possibilities=["FOOBAR", "BAZ"],) + actual = cmd_utils.suggest_possibilities( + word="FOO", + possibilities=["FOOBAR", "BAZ"], + ) assert_in(expected, actual) diff --git a/tests/commands/display_test.py b/tests/commands/display_test.py index 002135b83..97cb53769 100644 --- a/tests/commands/display_test.py +++ b/tests/commands/display_test.py @@ -49,8 +49,18 @@ class TestDisplayJobs(TestCase): @setup def setup_data(self): self.data = [ - dict(name="important_things", status="running", scheduler=mock.MagicMock(), last_success=None,), - dict(name="other_thing", status="enabled", scheduler=mock.MagicMock(), last_success="2012-01-23 10:23:23",), + dict( + name="important_things", + status="running", + scheduler=mock.MagicMock(), + last_success=None, + ), + dict( + name="other_thing", + status="enabled", + scheduler=mock.MagicMock(), + last_success="2012-01-23 10:23:23", + ), ] def do_format(self): @@ -69,7 +79,10 @@ def setup_data(self): self.data = { "id": "something.23", "state": "UNKWN", - "node": {"hostname": "something", "username": "a",}, + "node": { + "hostname": "something", + "username": "a", + }, "run_time": "sometime", "start_time": "sometime", "end_time": "sometime", diff --git a/tests/commands/retry_test.py b/tests/commands/retry_test.py index b9bef8647..e601d4dcf 100644 --- a/tests/commands/retry_test.py +++ b/tests/commands/retry_test.py @@ -1,6 +1,6 @@ import random +from unittest import mock -import mock import pytest from tron.commands import client @@ -86,7 +86,8 @@ def test_retry_action_init_ok(fake_retry_action): assert fake_retry_action.retry_params == dict(command="retry", use_latest_command=1) assert fake_retry_action.full_action_name == "a_fake_job.0.a_fake_action" fake_retry_action.tron_client.action_runs.assert_called_once_with( - "/a_fake_job/0/a_fake_action", num_lines=0, + "/a_fake_job/0/a_fake_action", + num_lines=0, ) assert fake_retry_action.action_name == "a_fake_action" assert fake_retry_action.action_run_id.url == "/a_fake_job/0/a_fake_action" @@ -100,7 +101,8 @@ def test_check_trigger_statuses(fake_retry_action, event_loop): expected = dict(a_fake_trigger_0=True, a_fake_trigger_1=False) assert expected == event_loop.run_until_complete(fake_retry_action.check_trigger_statuses()) assert fake_retry_action.tron_client.action_runs.call_args_list[1] == mock.call( # 0th call is in init - "/a_fake_job/0/a_fake_action", num_lines=0, + "/a_fake_job/0/a_fake_action", + num_lines=0, ) @@ -139,7 +141,11 @@ def test_wait_for_deps_all_deps_done(fake_retry_action, event_loop): "a_fake_trigger_0 (done), a_fake_trigger_1 (done)", ] fake_retry_action.tron_client.action_runs.side_effect = [ - dict(action_name="a_fake_action", requirements=["required_action_0", "required_action_1"], triggered_by=r,) + dict( + action_name="a_fake_action", + requirements=["required_action_0", "required_action_1"], + triggered_by=r, + ) for r in triggered_by_results ] diff --git a/tests/config/config_parse_test.py b/tests/config/config_parse_test.py index 2553d04b0..99f2b4368 100644 --- a/tests/config/config_parse_test.py +++ b/tests/config/config_parse_test.py @@ -36,7 +36,10 @@ ssh_options=dict(agent=False, identities=["tests/test_id_rsa"]), time_zone="EST", output_stream_dir="/tmp", - nodes=[dict(name="node0", hostname="node0"), dict(name="node1", hostname="node1"),], + nodes=[ + dict(name="node0", hostname="node0"), + dict(name="node1", hostname="node1"), + ], node_pools=[dict(name="NodePool", nodes=["node0", "node1"])], ) @@ -55,7 +58,14 @@ def make_ssh_options(): def make_mock_schedule(): - return ConfigDailyScheduler(days=set(), hour=0, minute=0, second=0, original="00:00:00 ", jitter=None,) + return ConfigDailyScheduler( + days=set(), + hour=0, + minute=0, + second=0, + original="00:00:00 ", + jitter=None, + ) def make_command_context(): @@ -67,14 +77,27 @@ def make_command_context(): def make_nodes(): return { - "node0": schema.ConfigNode(name="node0", username="foo", hostname="node0", port=22,), - "node1": schema.ConfigNode(name="node1", username="foo", hostname="node1", port=22,), + "node0": schema.ConfigNode( + name="node0", + username="foo", + hostname="node0", + port=22, + ), + "node1": schema.ConfigNode( + name="node1", + username="foo", + hostname="node1", + port=22, + ), } def make_node_pools(): return { - "NodePool": schema.ConfigNodePool(nodes=("node0", "node1"), name="NodePool",), + "NodePool": schema.ConfigNodePool( + nodes=("node0", "node1"), + name="NodePool", + ), } @@ -122,7 +145,12 @@ def make_job(**kwargs): kwargs.setdefault( "schedule", schedule_parse.ConfigDailyScheduler( - days=set(), hour=16, minute=30, second=0, original="16:30:00 ", jitter=None, + days=set(), + hour=16, + minute=30, + second=0, + original="16:30:00 ", + jitter=None, ), ) kwargs.setdefault("actions", {"action": make_action()}) @@ -141,16 +169,29 @@ def make_job(**kwargs): def make_master_jobs(): return { "MASTER.test_job0": make_job( - name="MASTER.test_job0", schedule=make_mock_schedule(), expected_runtime=datetime.timedelta(1), + name="MASTER.test_job0", + schedule=make_mock_schedule(), + expected_runtime=datetime.timedelta(1), ), "MASTER.test_job1": make_job( name="MASTER.test_job1", schedule=schedule_parse.ConfigDailyScheduler( - days={1, 3, 5}, hour=0, minute=30, second=0, original="00:30:00 MWF", jitter=None, + days={1, 3, 5}, + hour=0, + minute=30, + second=0, + original="00:30:00 MWF", + jitter=None, ), actions={ - "action": make_action(requires=("action1",), expected_runtime=datetime.timedelta(0, 7200),), - "action1": make_action(name="action1", expected_runtime=datetime.timedelta(0, 7200),), + "action": make_action( + requires=("action1",), + expected_runtime=datetime.timedelta(0, 7200), + ), + "action1": make_action( + name="action1", + expected_runtime=datetime.timedelta(0, 7200), + ), }, time_zone=pytz.timezone("Pacific/Auckland"), expected_runtime=datetime.timedelta(1), @@ -160,7 +201,12 @@ def make_master_jobs(): "MASTER.test_job2": make_job( name="MASTER.test_job2", node="node1", - actions={"action2_0": make_action(name="action2_0", command="test_command2.0",),}, + actions={ + "action2_0": make_action( + name="action2_0", + command="test_command2.0", + ), + }, time_zone=pytz.timezone("Pacific/Auckland"), expected_runtime=datetime.timedelta(1), cleanup_action=None, @@ -172,7 +218,11 @@ def make_master_jobs(): actions={ "action": make_action(), "action1": make_action(name="action1"), - "action2": make_action(name="action2", requires=("action", "action1"), node="node0",), + "action2": make_action( + name="action2", + requires=("action", "action1"), + node="node0", + ), }, cleanup_action=None, expected_runtime=datetime.timedelta(1), @@ -181,7 +231,12 @@ def make_master_jobs(): name="MASTER.test_job4", node="NodePool", schedule=schedule_parse.ConfigDailyScheduler( - original="00:00:00 ", hour=0, minute=0, second=0, days=set(), jitter=None, + original="00:00:00 ", + hour=0, + minute=0, + second=0, + days=set(), + jitter=None, ), all_nodes=True, enabled=False, @@ -192,7 +247,12 @@ def make_master_jobs(): name="MASTER.test_job_mesos", node="NodePool", schedule=schedule_parse.ConfigDailyScheduler( - original="00:00:00 ", hour=0, minute=0, second=0, days=set(), jitter=None, + original="00:00:00 ", + hour=0, + minute=0, + second=0, + days=set(), + jitter=None, ), actions={ "action_mesos": make_action( @@ -212,7 +272,12 @@ def make_master_jobs(): name="MASTER.test_job_k8s", node="NodePool", schedule=schedule_parse.ConfigDailyScheduler( - original="00:00:00 ", hour=0, minute=0, second=0, days=set(), jitter=None, + original="00:00:00 ", + hour=0, + minute=0, + second=0, + days=set(), + jitter=None, ), actions={ "action_k8s": make_action( @@ -238,7 +303,13 @@ def make_master_jobs(): ), ), node_selectors={"yelp.com/pool": "default"}, - node_affinities=(ConfigNodeAffinity(key="instance_type", operator="In", value=("a1.1xlarge",),),), + node_affinities=( + ConfigNodeAffinity( + key="instance_type", + operator="In", + value=("a1.1xlarge",), + ), + ), ), }, cleanup_action=None, @@ -296,8 +367,17 @@ class ConfigTestCase(TestCase): allow_overlap=True, time_zone="Pacific/Auckland", actions=[ - dict(name="action", command="command", requires=["action1"], expected_runtime="2h",), - dict(name="action1", command="command", expected_runtime="2h",), + dict( + name="action", + command="command", + requires=["action1"], + expected_runtime="2h", + ), + dict( + name="action1", + command="command", + expected_runtime="2h", + ), ], ), dict( @@ -315,7 +395,11 @@ class ConfigTestCase(TestCase): actions=dict( action=dict(command="command"), action1=dict(command="command"), - action2=dict(node="node0", command="command", requires=["action", "action1"],), + action2=dict( + node="node0", + command="command", + requires=["action", "action1"], + ), ), ), dict( @@ -362,7 +446,9 @@ class ConfigTestCase(TestCase): secret_name="secret1", container_path="/b/c", default_mode="0644", - items=[dict(key="secret1", path="abcd", mode="777"),], + items=[ + dict(key="secret1", path="abcd", mode="777"), + ], ), ], cap_add=["KILL"], @@ -376,7 +462,12 @@ class ConfigTestCase(TestCase): ) config = dict( - command_context=dict(batch_dir="/tron/batch/test/foo", python="/usr/bin/python",), **BASE_CONFIG, **JOBS_CONFIG, + command_context=dict( + batch_dir="/tron/batch/test/foo", + python="/usr/bin/python", + ), + **BASE_CONFIG, + **JOBS_CONFIG, ) @mock.patch.dict("tron.config.config_parse.ValidateNode.defaults") @@ -447,7 +538,18 @@ def test_attributes_with_master_context(self): }, ) master_config = dict( - nodes=[dict(name="node0", hostname="node0",),], node_pools=[dict(name="nodepool0", nodes=["node0"],),], + nodes=[ + dict( + name="node0", + hostname="node0", + ), + ], + node_pools=[ + dict( + name="nodepool0", + nodes=["node0"], + ), + ], ) test_config = validate_fragment( "test_namespace", @@ -468,7 +570,14 @@ def test_attributes_with_master_context(self): assert_equal(test_config, expected) def test_invalid_job_node_with_master_context(self): - master_config = dict(nodes=[dict(name="node0", hostname="node0",),],) + master_config = dict( + nodes=[ + dict( + name="node0", + hostname="node0", + ), + ], + ) test_config = dict( jobs=[ dict( @@ -482,12 +591,29 @@ def test_invalid_job_node_with_master_context(self): ], ) expected_message = "Unknown node name node1 at test_namespace.NamedConfigFragment.jobs.Job.test_job.node" - exception = assert_raises(ConfigError, validate_fragment, "test_namespace", test_config, master_config,) + exception = assert_raises( + ConfigError, + validate_fragment, + "test_namespace", + test_config, + master_config, + ) assert_in(expected_message, str(exception)) def test_invalid_action_node_with_master_context(self): master_config = dict( - nodes=[dict(name="node0", hostname="node0",),], node_pools=[dict(name="nodepool0", nodes=["node0"],),], + nodes=[ + dict( + name="node0", + hostname="node0", + ), + ], + node_pools=[ + dict( + name="nodepool0", + nodes=["node0"], + ), + ], ) test_config = dict( jobs=[ @@ -503,25 +629,52 @@ def test_invalid_action_node_with_master_context(self): ) expected_message = "Unknown node name nodepool1 at test_namespace.NamedConfigFragment.jobs.Job.test_job.actions.Action.action.node" - exception = assert_raises(ConfigError, validate_fragment, "test_namespace", test_config, master_config,) + exception = assert_raises( + ConfigError, + validate_fragment, + "test_namespace", + test_config, + master_config, + ) assert_in(expected_message, str(exception)) class TestJobConfig(TestCase): def test_no_actions(self): - test_config = dict(jobs=[dict(name="test_job0", node="node0", schedule="daily 00:30:00 "),], **BASE_CONFIG,) + test_config = dict( + jobs=[ + dict(name="test_job0", node="node0", schedule="daily 00:30:00 "), + ], + **BASE_CONFIG, + ) expected_message = "Job test_job0 is missing options: actions" - exception = assert_raises(ConfigError, valid_config, test_config,) + exception = assert_raises( + ConfigError, + valid_config, + test_config, + ) assert_in(expected_message, str(exception)) def test_empty_actions(self): test_config = dict( - jobs=[dict(name="test_job0", node="node0", schedule="daily 00:30:00 ", actions=None,),], **BASE_CONFIG, + jobs=[ + dict( + name="test_job0", + node="node0", + schedule="daily 00:30:00 ", + actions=None, + ), + ], + **BASE_CONFIG, ) expected_message = "Value at config.jobs.Job.test_job0.actions" - exception = assert_raises(ConfigError, valid_config, test_config,) + exception = assert_raises( + ConfigError, + valid_config, + test_config, + ) assert_in(expected_message, str(exception)) def test_dupe_names(self): @@ -531,14 +684,21 @@ def test_dupe_names(self): name="test_job0", node="node0", schedule="daily 00:30:00", - actions=[dict(name="action", command="cmd"), dict(name="action", command="cmd"),], + actions=[ + dict(name="action", command="cmd"), + dict(name="action", command="cmd"), + ], ), ], **BASE_CONFIG, ) expected = "Duplicate name action at config.jobs.Job.test_job0.actions" - exception = assert_raises(ConfigError, valid_config, test_config,) + exception = assert_raises( + ConfigError, + valid_config, + test_config, + ) assert_in(expected, str(exception)) def test_bad_requires(self): @@ -554,14 +714,24 @@ def test_bad_requires(self): name="test_job1", node="node0", schedule="daily 00:30:00", - actions=[dict(name="action1", command="cmd", requires=["action"],),], + actions=[ + dict( + name="action1", + command="cmd", + requires=["action"], + ), + ], ), ], **BASE_CONFIG, ) expected_message = "jobs.MASTER.test_job1.action1 has a dependency " '"action" that is not in the same job!' - exception = assert_raises(ConfigError, valid_config, test_config,) + exception = assert_raises( + ConfigError, + valid_config, + test_config, + ) assert_in(expected_message, str(exception)) def test_circular_dependency(self): @@ -572,8 +742,16 @@ def test_circular_dependency(self): node="node0", schedule="daily 00:30:00", actions=[ - dict(name="action1", command="cmd", requires=["action2"],), - dict(name="action2", command="cmd", requires=["action1"],), + dict( + name="action1", + command="cmd", + requires=["action2"], + ), + dict( + name="action2", + command="cmd", + requires=["action1"], + ), ], ), ], @@ -581,7 +759,11 @@ def test_circular_dependency(self): ) expect = "Circular dependency in job.MASTER.test_job0: action1 -> action2" - exception = assert_raises(ConfigError, valid_config, test_config,) + exception = assert_raises( + ConfigError, + valid_config, + test_config, + ) assert_in(expect, str(exception)) def test_circular_dependency_multiaction(self): @@ -592,11 +774,31 @@ def test_circular_dependency_multiaction(self): node="node0", schedule="daily 00:30:00", actions=[ - dict(name="action1", command="cmd", requires=["action2"],), - dict(name="action2", command="cmd", requires=["action3"],), - dict(name="action3", command="cmd", requires=["action4"],), - dict(name="action4", command="cmd", requires=["action5"],), - dict(name="action5", command="cmd", requires=["action3"],), + dict( + name="action1", + command="cmd", + requires=["action2"], + ), + dict( + name="action2", + command="cmd", + requires=["action3"], + ), + dict( + name="action3", + command="cmd", + requires=["action4"], + ), + dict( + name="action4", + command="cmd", + requires=["action5"], + ), + dict( + name="action5", + command="cmd", + requires=["action3"], + ), ], ), ], @@ -604,7 +806,11 @@ def test_circular_dependency_multiaction(self): ) expect = "Circular dependency in job.MASTER.test_job0: action3 -> action4 -> action5" - exception = assert_raises(ConfigError, valid_config, test_config,) + exception = assert_raises( + ConfigError, + valid_config, + test_config, + ) assert_in(expect, str(exception)) def test_config_cleanup_name_collision(self): @@ -614,13 +820,19 @@ def test_config_cleanup_name_collision(self): name="test_job0", node="node0", schedule="daily 00:30:00", - actions=[dict(name=CLEANUP_ACTION_NAME, command="cmd"),], + actions=[ + dict(name=CLEANUP_ACTION_NAME, command="cmd"), + ], ), ], **BASE_CONFIG, ) expected_message = "config.jobs.Job.test_job0.actions.Action.cleanup.name" - exception = assert_raises(ConfigError, valid_config, test_config,) + exception = assert_raises( + ConfigError, + valid_config, + test_config, + ) assert_in(expected_message, str(exception)) def test_config_cleanup_action_name(self): @@ -630,7 +842,9 @@ def test_config_cleanup_action_name(self): name="test_job0", node="node0", schedule="daily 00:30:00", - actions=[dict(name="action", command="cmd"),], + actions=[ + dict(name="action", command="cmd"), + ], cleanup_action=dict(name="gerald", command="cmd"), ), ], @@ -638,7 +852,11 @@ def test_config_cleanup_action_name(self): ) expected_msg = "Cleanup actions cannot have custom names" - exception = assert_raises(ConfigError, valid_config, test_config,) + exception = assert_raises( + ConfigError, + valid_config, + test_config, + ) assert_in(expected_msg, str(exception)) def test_config_cleanup_requires(self): @@ -648,7 +866,9 @@ def test_config_cleanup_requires(self): name="test_job0", node="node0", schedule="daily 00:30:00", - actions=[dict(name="action", command="cmd"),], + actions=[ + dict(name="action", command="cmd"), + ], cleanup_action=dict(command="cmd", requires=["action"]), ), ], @@ -656,14 +876,33 @@ def test_config_cleanup_requires(self): ) expected_msg = "Unknown keys in CleanupAction : requires" - exception = assert_raises(ConfigError, valid_config, test_config,) + exception = assert_raises( + ConfigError, + valid_config, + test_config, + ) assert_equal(expected_msg, str(exception)) def test_validate_job_no_actions(self): - job_config = dict(name="job_name", node="localhost", schedule="daily 00:30:00", actions=[],) - config_context = config_utils.ConfigContext("config", ["localhost"], None, None,) + job_config = dict( + name="job_name", + node="localhost", + schedule="daily 00:30:00", + actions=[], + ) + config_context = config_utils.ConfigContext( + "config", + ["localhost"], + None, + None, + ) expected_msg = "Required non-empty list at config.Job.job_name.actions" - exception = assert_raises(ConfigError, valid_job, job_config, config_context,) + exception = assert_raises( + ConfigError, + valid_job, + job_config, + config_context, + ) assert_in(expected_msg, str(exception)) @@ -678,7 +917,9 @@ def test_missing_secret_name(self): def test_validate_job_extra_secret_env(self): secret_env = dict( - secret_name="tron-secret-k8s-name-no--secret--name", key="no_secret_name", extra_key="unknown", + secret_name="tron-secret-k8s-name-no--secret--name", + key="no_secret_name", + extra_key="unknown", ) with pytest.raises(ConfigError) as missing_exc: config_parse.valid_secret_source(secret_env, NullConfigContext) @@ -686,7 +927,10 @@ def test_validate_job_extra_secret_env(self): assert "Unknown keys in SecretSource : extra_key" in str(missing_exc.value) def test_valid_job_secret_env_success(self): - secret_env = dict(secret_name="tron-secret-k8s-name-no--secret--name", key="no_secret_name",) + secret_env = dict( + secret_name="tron-secret-k8s-name-no--secret--name", + key="no_secret_name", + ) expected_env = schema.ConfigSecretSource(**secret_env) @@ -696,14 +940,20 @@ def test_valid_job_secret_env_success(self): class TestNodeConfig(TestCase): def test_validate_node_pool(self): - config_node_pool = valid_node_pool(dict(name="theName", nodes=["node1", "node2"]),) + config_node_pool = valid_node_pool( + dict(name="theName", nodes=["node1", "node2"]), + ) assert_equal(config_node_pool.name, "theName") assert_equal(len(config_node_pool.nodes), 2) def test_overlap_node_and_node_pools(self): tron_config = dict( - nodes=[dict(name="sameName", hostname="localhost"),], - node_pools=[dict(name="sameName", nodes=["sameNode"]),], + nodes=[ + dict(name="sameName", hostname="localhost"), + ], + node_pools=[ + dict(name="sameName", nodes=["sameNode"]), + ], ) expected_msg = "Node and NodePool names must be unique sameName" exception = assert_raises(ConfigError, valid_config, tron_config) @@ -723,13 +973,23 @@ def test_invalid_node_name(self): ) expected_msg = "Unknown node name unknown_node at config.jobs.Job.test_job0.node" - exception = assert_raises(ConfigError, valid_config, test_config,) + exception = assert_raises( + ConfigError, + valid_config, + test_config, + ) assert_equal(expected_msg, str(exception)) def test_invalid_nested_node_pools(self): test_config = dict( - nodes=[dict(name="node0", hostname="node0"), dict(name="node1", hostname="node1"),], - node_pools=[dict(name="pool0", nodes=["node1"]), dict(name="pool1", nodes=["node0", "pool0"]),], + nodes=[ + dict(name="node0", hostname="node0"), + dict(name="node1", hostname="node1"), + ], + node_pools=[ + dict(name="pool0", nodes=["node1"]), + dict(name="pool1", nodes=["node0", "pool0"]), + ], jobs=[ dict( name="test_job0", @@ -741,13 +1001,23 @@ def test_invalid_nested_node_pools(self): ) expected_msg = "NodePool pool1 contains other NodePools: pool0" - exception = assert_raises(ConfigError, valid_config, test_config,) + exception = assert_raises( + ConfigError, + valid_config, + test_config, + ) assert_in(expected_msg, str(exception)) def test_invalid_node_pool_config(self): test_config = dict( - nodes=[dict(name="node0", hostname="node0"), dict(name="node1", hostname="node1"),], - node_pools=[dict(name="pool0", hostname=["node1"]), dict(name="pool1", nodes=["node0", "pool0"]),], + nodes=[ + dict(name="node0", hostname="node0"), + dict(name="node1", hostname="node1"), + ], + node_pools=[ + dict(name="pool0", hostname=["node1"]), + dict(name="pool1", nodes=["node0", "pool0"]), + ], jobs=[ dict( name="test_job0", @@ -759,13 +1029,22 @@ def test_invalid_node_pool_config(self): ) expected_msg = "NodePool pool0 is missing options" - exception = assert_raises(ConfigError, valid_config, test_config,) + exception = assert_raises( + ConfigError, + valid_config, + test_config, + ) assert_in(expected_msg, str(exception)) def test_invalid_named_update(self): test_config = dict(bozray=None) expected_message = "Unknown keys in NamedConfigFragment : bozray" - exception = assert_raises(ConfigError, validate_fragment, "foo", test_config,) + exception = assert_raises( + ConfigError, + validate_fragment, + "foo", + test_config, + ) assert_in(expected_message, str(exception)) @@ -779,7 +1058,11 @@ def test_valid_jobs_success(self): schedule="daily", expected_runtime="20m", actions=[ - dict(name="action", command="command", expected_runtime="20m",), + dict( + name="action", + command="command", + expected_runtime="20m", + ), dict( name="action_mesos", command="command", @@ -787,11 +1070,26 @@ def test_valid_jobs_success(self): cpus=4, mem=300, disk=600, - constraints=[dict(attribute="pool", operator="LIKE", value="default",),], + constraints=[ + dict( + attribute="pool", + operator="LIKE", + value="default", + ), + ], docker_image="my_container:latest", - docker_parameters=[dict(key="label", value="labelA"), dict(key="label", value="labelB"),], + docker_parameters=[ + dict(key="label", value="labelA"), + dict(key="label", value="labelB"), + ], env=dict(USER="batch"), - extra_volumes=[dict(container_path="/tmp", host_path="/home/tmp", mode="RO",),], + extra_volumes=[ + dict( + container_path="/tmp", + host_path="/home/tmp", + mode="RO", + ), + ], ), dict( name="test_trigger_attrs", @@ -811,48 +1109,85 @@ def test_valid_jobs_success(self): name="MASTER.test_job0", schedule=make_mock_schedule(), actions={ - "action": make_action(expected_runtime=datetime.timedelta(0, 1200),), + "action": make_action( + expected_runtime=datetime.timedelta(0, 1200), + ), "action_mesos": make_action( name="action_mesos", executor=schema.ExecutorTypes.mesos.value, cpus=4.0, mem=300.0, disk=600.0, - constraints=(schema.ConfigConstraint(attribute="pool", operator="LIKE", value="default",),), + constraints=( + schema.ConfigConstraint( + attribute="pool", + operator="LIKE", + value="default", + ), + ), docker_image="my_container:latest", docker_parameters=( - schema.ConfigParameter(key="label", value="labelA",), - schema.ConfigParameter(key="label", value="labelB",), + schema.ConfigParameter( + key="label", + value="labelA", + ), + schema.ConfigParameter( + key="label", + value="labelB", + ), ), env={"USER": "batch"}, extra_volumes=( schema.ConfigVolume( - container_path="/tmp", host_path="/home/tmp", mode=schema.VolumeModes.RO.value, + container_path="/tmp", + host_path="/home/tmp", + mode=schema.VolumeModes.RO.value, ), ), expected_runtime=datetime.timedelta(hours=24), ), "test_trigger_attrs": make_action( - name="test_trigger_attrs", command="foo", triggered_by=("foo.bar",), trigger_downstreams=True, + name="test_trigger_attrs", + command="foo", + triggered_by=("foo.bar",), + trigger_downstreams=True, ), }, expected_runtime=datetime.timedelta(0, 1200), ), } - context = config_utils.ConfigContext("config", ["node0"], None, MASTER_NAMESPACE,) + context = config_utils.ConfigContext( + "config", + ["node0"], + None, + MASTER_NAMESPACE, + ) config_parse.validate_jobs(test_config, context) assert expected_jobs == test_config["jobs"] class TestValidMesosAction(TestCase): def test_missing_docker_image(self): - config = dict(name="test_missing", command="echo hello", executor="mesos", cpus=0.2, mem=150, disk=450,) + config = dict( + name="test_missing", + command="echo hello", + executor="mesos", + cpus=0.2, + mem=150, + disk=450, + ) with pytest.raises(ConfigError): config_parse.valid_action(config, NullConfigContext) def test_cleanup_missing_docker_image(self): - config = dict(command="echo hello", executor="mesos", cpus=0.2, mem=150, disk=450,) + config = dict( + command="echo hello", + executor="mesos", + cpus=0.2, + mem=150, + disk=450, + ) with pytest.raises(ConfigError): config_parse.valid_action(config, NullConfigContext) @@ -864,7 +1199,10 @@ def test_valid_cleanup_action_name_pass(self): def test_valid_cleanup_action_name_fail(self): assert_raises( - ConfigError, valid_cleanup_action_name, "other", NullConfigContext, + ConfigError, + valid_cleanup_action_name, + "other", + NullConfigContext, ) @@ -882,7 +1220,12 @@ def test_valid_dir(self): assert_equal(self.dir, path) def test_missing_dir(self): - exception = assert_raises(ConfigError, valid_output_stream_dir, "bogus-dir", NullConfigContext,) + exception = assert_raises( + ConfigError, + valid_output_stream_dir, + "bogus-dir", + NullConfigContext, + ) assert_in("is not a directory", str(exception)) # TODO: docker tests run as root so everything is writeable @@ -913,17 +1256,32 @@ def test_validator_passes(self): def test_validator_unknown_variable_error(self): template = "The {one} thing I {seven} is {unknown}" - exception = assert_raises(ConfigError, self.validator, template, NullConfigContext,) + exception = assert_raises( + ConfigError, + self.validator, + template, + NullConfigContext, + ) assert_in("Unknown context variable", str(exception)) def test_validator_passes_with_context(self): template = "The {one} thing I {seven} is {mars}" - context = config_utils.ConfigContext(None, None, {"mars": "ok"}, None,) + context = config_utils.ConfigContext( + None, + None, + {"mars": "ok"}, + None, + ) assert self.validator(template, context) == template def test_validator_valid_string_without_no_percent_escape(self): template = "The {one} {seven} thing is {mars} --year %Y" - context = config_utils.ConfigContext(path=None, nodes=None, command_context={"mars": "ok"}, namespace=None,) + context = config_utils.ConfigContext( + path=None, + nodes=None, + command_context={"mars": "ok"}, + namespace=None, + ) assert self.validator(template, context) @@ -973,7 +1331,9 @@ def test_create(self): def test_create_missing_master(self): config_mapping = {"other": mock.Mock()} assert_raises( - ConfigError, config_parse.ConfigContainer.create, config_mapping, + ConfigError, + config_parse.ConfigContainer.create, + config_mapping, ) def test_get_job_names(self): @@ -1018,13 +1378,19 @@ def test_post_validation_failed(self): if "SSH_AUTH_SOCK" in os.environ: del os.environ["SSH_AUTH_SOCK"] assert_raises( - ConfigError, config_parse.valid_ssh_options.validate, self.config, self.context, + ConfigError, + config_parse.valid_ssh_options.validate, + self.config, + self.context, ) @mock.patch.dict("tron.config.config_parse.os.environ") def test_post_validation_success(self): os.environ["SSH_AUTH_SOCK"] = "something" - config = config_parse.valid_ssh_options.validate(self.config, self.context,) + config = config_parse.valid_ssh_options.validate( + self.config, + self.context, + ) assert_equal(config.agent, True) @@ -1035,12 +1401,22 @@ def setup_context(self): self.private_file = tempfile.NamedTemporaryFile() def test_valid_identity_file_missing_private_key(self): - exception = assert_raises(ConfigError, config_parse.valid_identity_file, "/file/not/exist", self.context,) + exception = assert_raises( + ConfigError, + config_parse.valid_identity_file, + "/file/not/exist", + self.context, + ) assert_in("Private key file", str(exception)) def test_valid_identity_files_missing_public_key(self): filename = self.private_file.name - exception = assert_raises(ConfigError, config_parse.valid_identity_file, filename, self.context,) + exception = assert_raises( + ConfigError, + config_parse.valid_identity_file, + filename, + self.context, + ) assert_in("Public key file", str(exception)) def test_valid_identity_files_valid(self): @@ -1067,17 +1443,28 @@ def setup_context(self): self.known_hosts_file = tempfile.NamedTemporaryFile() def test_valid_known_hosts_file_exists(self): - filename = config_parse.valid_known_hosts_file(self.known_hosts_file.name, self.context,) + filename = config_parse.valid_known_hosts_file( + self.known_hosts_file.name, + self.context, + ) assert_equal(filename, self.known_hosts_file.name) def test_valid_known_hosts_file_missing(self): - exception = assert_raises(ConfigError, config_parse.valid_known_hosts_file, "/bogus/path", self.context,) + exception = assert_raises( + ConfigError, + config_parse.valid_known_hosts_file, + "/bogus/path", + self.context, + ) assert_in("Known hosts file /bogus/path", str(exception)) def test_valid_known_hosts_file_missing_partial_context(self): context = config_utils.PartialConfigContext expected = "/bogus/does/not/exist" - filename = config_parse.valid_known_hosts_file(expected, context,) + filename = config_parse.valid_known_hosts_file( + expected, + context, + ) assert_equal(filename, expected) @@ -1093,7 +1480,10 @@ def test_missing_container_path(self): "mode": "RO", } assert_raises( - ConfigError, config_parse.valid_volume.validate, config, self.context, + ConfigError, + config_parse.valid_volume.validate, + config, + self.context, ) def test_missing_host_path(self): @@ -1103,7 +1493,10 @@ def test_missing_host_path(self): "mode": "RO", } assert_raises( - ConfigError, config_parse.valid_volume.validate, config, self.context, + ConfigError, + config_parse.valid_volume.validate, + config, + self.context, ) def test_invalid_mode(self): @@ -1113,7 +1506,10 @@ def test_invalid_mode(self): "mode": "RA", } assert_raises( - ConfigError, config_parse.valid_volume.validate, config, self.context, + ConfigError, + config_parse.valid_volume.validate, + config, + self.context, ) def test_valid(self): @@ -1123,14 +1519,23 @@ def test_valid(self): "mode": schema.VolumeModes.RO.value, } assert_equal( - schema.ConfigVolume(**config), config_parse.valid_volume.validate(config, self.context), + schema.ConfigVolume(**config), + config_parse.valid_volume.validate(config, self.context), ) def test_mesos_default_volumes(self): mesos_options = {"master_address": "mesos_master"} mesos_options["default_volumes"] = [ - {"container_path": "/nail/srv", "host_path": "/tmp", "mode": "RO",}, - {"container_path": "/nail/srv", "host_path": "/tmp", "mode": "invalid",}, + { + "container_path": "/nail/srv", + "host_path": "/tmp", + "mode": "RO", + }, + { + "container_path": "/nail/srv", + "host_path": "/tmp", + "mode": "invalid", + }, ] with pytest.raises(ConfigError): @@ -1138,13 +1543,24 @@ def test_mesos_default_volumes(self): # After we fix the error, expect error to go away. mesos_options["default_volumes"][1]["mode"] = "RW" - assert config_parse.valid_mesos_options.validate(mesos_options, self.context,) + assert config_parse.valid_mesos_options.validate( + mesos_options, + self.context, + ) def test_k8s_default_volumes(self): k8s_options = {"kubeconfig_path": "some_path"} k8s_options["default_volumes"] = [ - {"container_path": "/nail/srv", "host_path": "/tmp", "mode": "RO",}, - {"container_path": "/nail/srv", "host_path": "/tmp", "mode": "invalid",}, + { + "container_path": "/nail/srv", + "host_path": "/tmp", + "mode": "RO", + }, + { + "container_path": "/nail/srv", + "host_path": "/tmp", + "mode": "invalid", + }, ] with pytest.raises(ConfigError): @@ -1152,7 +1568,10 @@ def test_k8s_default_volumes(self): # After we fix the error, expect error to go away. k8s_options["default_volumes"][1]["mode"] = "RW" - assert config_parse.valid_kubernetes_options.validate(k8s_options, self.context,) + assert config_parse.valid_kubernetes_options.validate( + k8s_options, + self.context, + ) class TestValidPermissionMode: @@ -1175,8 +1594,14 @@ class TestValidSecretVolumeItem: "config", [ {"path": "abc"}, - {"key": "abc",}, - {"key": "abc", "path": "abc", "extra_key": None,}, + { + "key": "abc", + }, + { + "key": "abc", + "path": "abc", + "extra_key": None, + }, {"key": "abc", "path": "abc", "mode": "a"}, ], ) @@ -1185,7 +1610,8 @@ def test_invalid(self, config): config_parse.valid_secret_volume_item(config, NullConfigContext) @pytest.mark.parametrize( - "config", [{"key": "abc", "path": "abc"}, {"key": "abc", "path": "abc", "mode": "777"}], + "config", + [{"key": "abc", "path": "abc"}, {"key": "abc", "path": "abc", "mode": "777"}], ) def test_valid_job_secret_volume_success(self, config): config_parse.valid_secret_volume_item(config, NullConfigContext) @@ -1200,15 +1626,23 @@ class TestValidSecretVolume: secret_name="secret1", container_path="/b/c", default_mode="0644", - items=[dict(key="secret1", path="abcd", mode="7778"),], + items=[ + dict(key="secret1", path="abcd", mode="7778"), + ], ), dict( secret_volume_name="abc", container_path="/b/c", default_mode="0644", - items=[dict(key="secret1", path="abcd", mode="7777"),], + items=[ + dict(key="secret1", path="abcd", mode="7777"), + ], + ), + dict( + secret_volume_name="abc", + secret_name="secret1", + container_path=123, ), - dict(secret_volume_name="abc", secret_name="secret1", container_path=123,), dict( secret_volume_name="abc", secret_name="secret1", @@ -1226,7 +1660,9 @@ def test_wrong_item_key(self): secret_volume_name="abc", secret_name="secret1", container_path="/b/c", - items=[dict(key="secret2", path="abc"),], + items=[ + dict(key="secret2", path="abc"), + ], ) with pytest.raises(ConfigError): config_parse.valid_secret_volume(config, NullConfigContext) @@ -1239,10 +1675,21 @@ def test_wrong_item_key(self): secret_name="secret1", container_path="/b/c", default_mode="0644", - items=[dict(key="secret1", path="abc"),], + items=[ + dict(key="secret1", path="abc"), + ], + ), + dict( + secret_volume_name="abc", + secret_name="secret1", + container_path="/b/c", + items=[], + ), + dict( + secret_volume_name="abc", + secret_name="secret1", + container_path="/b/c", ), - dict(secret_volume_name="abc", secret_name="secret1", container_path="/b/c", items=[],), - dict(secret_volume_name="abc", secret_name="secret1", container_path="/b/c",), ], ) def test_valid(self, config): @@ -1255,7 +1702,13 @@ def context(self): return config_utils.NullConfigContext @pytest.mark.parametrize( - "url", ["http://blah.com", "http://blah.com/", "blah.com", "blah.com/",], + "url", + [ + "http://blah.com", + "http://blah.com/", + "blah.com", + "blah.com/", + ], ) def test_valid(self, url, context): normalized = "http://blah.com" @@ -1263,7 +1716,14 @@ def test_valid(self, url, context): assert result == normalized @pytest.mark.parametrize( - "url", ["https://blah.com", "http://blah.com/something", "blah.com/other", "http://", "blah.com?a=1",], + "url", + [ + "https://blah.com", + "http://blah.com/something", + "blah.com/other", + "http://", + "blah.com?a=1", + ], ) def test_invalid(self, url, context): with pytest.raises(ConfigError): diff --git a/tests/config/config_utils_test.py b/tests/config/config_utils_test.py index 1c01d8ae7..48a9b3a87 100644 --- a/tests/config/config_utils_test.py +++ b/tests/config/config_utils_test.py @@ -73,9 +73,15 @@ def test_validate(self): assert_equal(self.validator("b", self.context), "b") def test_invalid(self): - exception = assert_raises(ConfigError, self.validator, "c", self.context,) + exception = assert_raises( + ConfigError, + self.validator, + "c", + self.context, + ) assert_in( - "Value at is not in %s: " % str(set(self.enum)), str(exception), + "Value at is not in %s: " % str(set(self.enum)), + str(exception), ) @@ -98,7 +104,10 @@ def test_valid_time_with_seconds(self): def test_valid_time_invalid(self): assert_raises( - ConfigError, config_utils.valid_time, "14:32:12:34", self.context, + ConfigError, + config_utils.valid_time, + "14:32:12:34", + self.context, ) assert_raises(ConfigError, config_utils.valid_time, None, self.context) @@ -109,27 +118,43 @@ def setup_config(self): self.context = config_utils.NullConfigContext def test_valid_time_delta_invalid(self): - exception = assert_raises(ConfigError, config_utils.valid_time_delta, "no time", self.context,) + exception = assert_raises( + ConfigError, + config_utils.valid_time_delta, + "no time", + self.context, + ) assert_in("not a valid time delta: no time", str(exception)) def test_valid_time_delta_valid_seconds(self): for jitter in [" 82s ", "82 s", "82 sec", "82seconds "]: delta = datetime.timedelta(seconds=82) assert_equal( - delta, config_utils.valid_time_delta(jitter, self.context,), + delta, + config_utils.valid_time_delta( + jitter, + self.context, + ), ) def test_valid_time_delta_valid_minutes(self): for jitter in ["10m", "10 m", "10 min", " 10minutes"]: delta = datetime.timedelta(seconds=600) assert_equal( - delta, config_utils.valid_time_delta(jitter, self.context,), + delta, + config_utils.valid_time_delta( + jitter, + self.context, + ), ) def test_valid_time_delta_invalid_unit(self): for jitter in ["1 year", "3 mo", "3 months"]: assert_raises( - ConfigError, config_utils.valid_time_delta, jitter, self.context, + ConfigError, + config_utils.valid_time_delta, + jitter, + self.context, ) @@ -137,7 +162,12 @@ class TestConfigContext(TestCase): def test_build_config_context(self): path, nodes, namespace = "path", {1, 2, 3}, "namespace" command_context = mock.MagicMock() - parent_context = config_utils.ConfigContext(path, nodes, command_context, namespace,) + parent_context = config_utils.ConfigContext( + path, + nodes, + command_context, + namespace, + ) child = parent_context.build_child_context("child") assert_equal(child.path, "%s.child" % path) @@ -147,7 +177,11 @@ def test_build_config_context(self): assert not child.partial -StubConfigObject = schema.config_object_factory("StubConfigObject", ["req1", "req2"], ["opt1", "opt2"],) +StubConfigObject = schema.config_object_factory( + "StubConfigObject", + ["req1", "req2"], + ["opt1", "opt2"], +) class StubValidator(config_utils.Validator): @@ -161,7 +195,12 @@ def setup_validator(self): def test_validate_with_none(self): expected_msg = "A StubObject is required" - exception = assert_raises(ConfigError, self.validator.validate, None, config_utils.NullConfigContext,) + exception = assert_raises( + ConfigError, + self.validator.validate, + None, + config_utils.NullConfigContext, + ) assert_in(expected_msg, str(exception)) def test_validate_optional_with_none(self): diff --git a/tests/config/manager_test.py b/tests/config/manager_test.py index 14c078d78..c0074eca6 100644 --- a/tests/config/manager_test.py +++ b/tests/config/manager_test.py @@ -120,7 +120,11 @@ def test_build_file_path_with_invalid_chars(self): assert_equal(path, os.path.join(self.temp_dir, "_etc_passwd.yaml")) path = self.manager.build_file_path("../../etc/passwd") assert_equal( - path, os.path.join(self.temp_dir, "______etc_passwd.yaml",), + path, + os.path.join( + self.temp_dir, + "______etc_passwd.yaml", + ), ) def test_read_raw_config(self): @@ -141,7 +145,9 @@ def test_write_config(self): self.manifest.get_file_name.assert_called_with(name) assert not self.manifest.add.call_count self.manager.validate_with_fragment.assert_called_with( - name, self.content, should_validate_missing_dependency=False, + name, + self.content, + should_validate_missing_dependency=False, ) def test_write_config_new_name(self): @@ -171,10 +177,12 @@ def test_delete_missing_namespace(self, mock_remove): assert_equal(mock_remove.call_count, 0) @mock.patch( - "tron.config.manager.JobGraph", autospec=True, + "tron.config.manager.JobGraph", + autospec=True, ) @mock.patch( - "tron.config.manager.config_parse.ConfigContainer", autospec=True, + "tron.config.manager.config_parse.ConfigContainer", + autospec=True, ) def test_validate_with_fragment(self, mock_config_container, mock_job_graph): name = "the_name" @@ -186,12 +194,14 @@ def test_validate_with_fragment(self, mock_config_container, mock_job_graph): expected_mapping[name] = self.content mock_config_container.create.assert_called_with(expected_mapping) mock_job_graph.assert_called_once_with( - mock_config_container.create.return_value, should_validate_missing_dependency=True, + mock_config_container.create.return_value, + should_validate_missing_dependency=True, ) @mock.patch("tron.config.manager.read", autospec=True) @mock.patch( - "tron.config.manager.config_parse.ConfigContainer", autospec=True, + "tron.config.manager.config_parse.ConfigContainer", + autospec=True, ) def test_load(self, mock_config_container, mock_read): content_items = self.content.items() diff --git a/tests/config/schedule_parse_test.py b/tests/config/schedule_parse_test.py index 98e5ce587..63ec5459b 100644 --- a/tests/config/schedule_parse_test.py +++ b/tests/config/schedule_parse_test.py @@ -33,14 +33,19 @@ def test_pad_negative_size(self): class TestScheduleConfigFromString(TestCase): @mock.patch( - "tron.config.schedule_parse.parse_groc_expression", autospec=True, + "tron.config.schedule_parse.parse_groc_expression", + autospec=True, ) def test_groc_config(self, mock_parse_groc): schedule = "every Mon,Wed at 12:00" context = config_utils.NullConfigContext config = schedule_parse.schedule_config_from_string(schedule, context) assert_equal(config, mock_parse_groc.return_value) - generic_config = schedule_parse.ConfigGenericSchedule("groc daily", schedule, None,) + generic_config = schedule_parse.ConfigGenericSchedule( + "groc daily", + schedule, + None, + ) mock_parse_groc.assert_called_with(generic_config, context) @@ -56,12 +61,20 @@ def assert_validation(self, schedule, expected, mock_schedulers): def test_cron_from_dict(self): schedule = {"type": "cron", "value": "* * * * *"} - config = schedule_parse.ConfigGenericSchedule("cron", schedule["value"], datetime.timedelta(),) + config = schedule_parse.ConfigGenericSchedule( + "cron", + schedule["value"], + datetime.timedelta(), + ) self.assert_validation(schedule, config) def test_cron_from_dict_with_jitter(self): schedule = {"type": "cron", "value": "* * * * *", "jitter": "5 min"} - config = schedule_parse.ConfigGenericSchedule("cron", schedule["value"], datetime.timedelta(minutes=5),) + config = schedule_parse.ConfigGenericSchedule( + "cron", + schedule["value"], + datetime.timedelta(minutes=5), + ) self.assert_validation(schedule, config) diff --git a/tests/core/action_test.py b/tests/core/action_test.py index 1345d52b5..d0124919d 100644 --- a/tests/core/action_test.py +++ b/tests/core/action_test.py @@ -21,9 +21,20 @@ def test_from_config_full(self, disk): cpus=1, mem=100, disk=disk, # default: 1024.0 - constraints=[ConfigConstraint(attribute="pool", operator="LIKE", value="default",),], + constraints=[ + ConfigConstraint( + attribute="pool", + operator="LIKE", + value="default", + ), + ], docker_image="fake-docker.com:400/image", - docker_parameters=[ConfigParameter(key="test", value=123,),], + docker_parameters=[ + ConfigParameter( + key="test", + value=123, + ), + ], env={"TESTING": "true"}, secret_env={"TEST_SECRET": ConfigSecretSource(secret_name="tron-secret-svc-sec--A", key="sec_A")}, secret_volumes=[ @@ -35,7 +46,13 @@ def test_from_config_full(self, disk): items=[ConfigSecretVolumeItem(key="key", path="path", mode="0755")], ), ], - extra_volumes=[ConfigVolume(host_path="/tmp", container_path="/nail/tmp", mode="RO",),], + extra_volumes=[ + ConfigVolume( + host_path="/tmp", + container_path="/nail/tmp", + mode="RO", + ), + ], trigger_downstreams=True, triggered_by=["foo.bar"], ) @@ -61,7 +78,12 @@ def test_from_config_full(self, disk): assert command_config.extra_volumes == {("/nail/tmp", "/tmp", "RO")} def test_from_config_none_values(self): - config = ConfigAction(name="ted", command="do something", node="first", executor="ssh",) + config = ConfigAction( + name="ted", + command="do something", + node="first", + executor="ssh", + ) new_action = Action.from_config(config) assert new_action.name == config.name assert new_action.executor == config.executor diff --git a/tests/core/actiongraph_test.py b/tests/core/actiongraph_test.py index 99519bcd6..7efed3a53 100644 --- a/tests/core/actiongraph_test.py +++ b/tests/core/actiongraph_test.py @@ -45,7 +45,10 @@ def test_get_dependencies(self): assert self.action_graph.get_dependencies("base_one") == [] assert self.action_graph.get_dependencies("base_one", include_triggers=True)[0].name == "MASTER.otherjob.first" assert sorted(d.name for d in self.action_graph.get_dependencies("dep_multi")) == sorted( - ["dep_one_one", "base_two",] + [ + "dep_one_one", + "base_two", + ] ) def test_names(self): @@ -56,7 +59,8 @@ def test_names(self): def test__getitem__(self): assert_equal( - self.action_graph["base_one"], self.action_map["base_one"], + self.action_graph["base_one"], + self.action_map["base_one"], ) def test__getitem__miss(self): diff --git a/tests/core/actionrun_test.py b/tests/core/actionrun_test.py index 7e4ccbe76..1110679e6 100644 --- a/tests/core/actionrun_test.py +++ b/tests/core/actionrun_test.py @@ -42,7 +42,10 @@ def output_path(): @pytest.fixture def mock_current_time(): - with mock.patch("tron.core.actionrun.timeutils.current_time", autospec=True,) as mock_current_time: + with mock.patch( + "tron.core.actionrun.timeutils.current_time", + autospec=True, + ) as mock_current_time: yield mock_current_time @@ -77,13 +80,23 @@ def setup_action_runs(self): a2.name = "act2" actions = [a1, a2] self.action_graph = actiongraph.ActionGraph( - {a.name: a for a in actions}, {"act1": set(), "act2": set()}, {"act1": set(), "act2": set()}, + {a.name: a for a in actions}, + {"act1": set(), "act2": set()}, + {"act1": set(), "act2": set()}, ) mock_node = mock.create_autospec(node.Node) - self.job_run = jobrun.JobRun("jobname", 7, self.run_time, mock_node, action_graph=self.action_graph,) + self.job_run = jobrun.JobRun( + "jobname", + 7, + self.run_time, + mock_node, + action_graph=self.action_graph, + ) - self.action_runner = mock.create_autospec(actioncommand.SubprocessActionRunnerFactory,) + self.action_runner = mock.create_autospec( + actioncommand.SubprocessActionRunnerFactory, + ) @pytest.fixture def state_data(self): @@ -101,7 +114,10 @@ def state_data(self): } def test_build_action_run_collection(self): - collection = ActionRunFactory.build_action_run_collection(self.job_run, self.action_runner,) + collection = ActionRunFactory.build_action_run_collection( + self.job_run, + self.action_runner, + ) assert collection.action_graph == self.action_graph assert "act1" in collection.run_map assert "act2" in collection.run_map @@ -128,10 +144,15 @@ def test_action_run_collection_from_state(self, state_data): ), ], "node_name": "anode", - "action_runner": {"status_path": "/tmp/foo", "exec_path": "/bin/foo",}, + "action_runner": { + "status_path": "/tmp/foo", + "exec_path": "/bin/foo", + }, } collection = ActionRunFactory.action_run_collection_from_state( - self.job_run, state_data, cleanup_action_state_data, + self.job_run, + state_data, + cleanup_action_state_data, ) assert collection.action_graph == self.action_graph @@ -142,10 +163,16 @@ def test_action_run_collection_from_state(self, state_data): def test_build_run_for_action(self): expected_command = "doit" action = MagicMock( - node_pool=None, is_cleanup=False, command_config=ActionCommandConfig(command=expected_command), + node_pool=None, + is_cleanup=False, + command_config=ActionCommandConfig(command=expected_command), ) action.name = "theaction" - action_run = ActionRunFactory.build_run_for_action(self.job_run, action, self.action_runner,) + action_run = ActionRunFactory.build_run_for_action( + self.job_run, + action, + self.action_runner, + ) assert action_run.job_run_id == self.job_run.id assert action_run.node == self.job_run.node @@ -156,10 +183,16 @@ def test_build_run_for_action(self): def test_build_run_for_action_with_node(self): expected_command = "doit" action = MagicMock( - node_pool=None, is_cleanup=True, command_config=ActionCommandConfig(command=expected_command), + node_pool=None, + is_cleanup=True, + command_config=ActionCommandConfig(command=expected_command), ) action.node_pool = mock.create_autospec(node.NodePool) - action_run = ActionRunFactory.build_run_for_action(self.job_run, action, self.action_runner,) + action_run = ActionRunFactory.build_run_for_action( + self.job_run, + action, + self.action_runner, + ) assert action_run.job_run_id == self.job_run.id assert action_run.node == action.node_pool.next() @@ -168,8 +201,16 @@ def test_build_run_for_action_with_node(self): assert action_run.command == expected_command def test_build_run_for_ssh_action(self): - action = MagicMock(name="theaction", command="doit", executor=ExecutorTypes.ssh.value,) - action_run = ActionRunFactory.build_run_for_action(self.job_run, action, self.action_runner,) + action = MagicMock( + name="theaction", + command="doit", + executor=ExecutorTypes.ssh.value, + ) + action_run = ActionRunFactory.build_run_for_action( + self.job_run, + action, + self.action_runner, + ) assert action_run.__class__ == SSHActionRun def test_build_run_for_mesos_action(self): @@ -179,14 +220,30 @@ def test_build_run_for_mesos_action(self): disk=600, constraints=[["pool", "LIKE", "default"]], docker_image="fake-docker.com:400/image", - docker_parameters=[{"key": "test", "value": 123,}], + docker_parameters=[ + { + "key": "test", + "value": 123, + } + ], env={"TESTING": "true"}, - extra_volumes=[{"path": "/tmp",}], + extra_volumes=[ + { + "path": "/tmp", + } + ], ) action = MagicMock( - name="theaction", command="doit", executor=ExecutorTypes.mesos.value, command_config=command_config, + name="theaction", + command="doit", + executor=ExecutorTypes.mesos.value, + command_config=command_config, + ) + action_run = ActionRunFactory.build_run_for_action( + self.job_run, + action, + self.action_runner, ) - action_run = ActionRunFactory.build_run_for_action(self.job_run, action, self.action_runner,) assert action_run.__class__ == MesosActionRun assert action_run.command_config.cpus == command_config.cpus assert action_run.command_config.mem == command_config.mem @@ -198,7 +255,10 @@ def test_build_run_for_mesos_action(self): assert action_run.command_config.extra_volumes == command_config.extra_volumes def test_action_run_from_state_ssh(self, state_data): - action_run = ActionRunFactory.action_run_from_state(self.job_run, state_data,) + action_run = ActionRunFactory.action_run_from_state( + self.job_run, + state_data, + ) assert action_run.job_run_id == state_data["job_run_id"] assert not action_run.is_cleanup @@ -206,7 +266,10 @@ def test_action_run_from_state_ssh(self, state_data): def test_action_run_from_state_mesos(self, state_data): state_data["executor"] = ExecutorTypes.mesos.value - action_run = ActionRunFactory.action_run_from_state(self.job_run, state_data,) + action_run = ActionRunFactory.action_run_from_state( + self.job_run, + state_data, + ) assert action_run.job_run_id == state_data["job_run_id"] action_name = state_data["action_name"] @@ -217,7 +280,10 @@ def test_action_run_from_state_mesos(self, state_data): def test_action_run_from_state_kubernetes(self, state_data): state_data["executor"] = ExecutorTypes.kubernetes.value - action_run = ActionRunFactory.action_run_from_state(self.job_run, state_data,) + action_run = ActionRunFactory.action_run_from_state( + self.job_run, + state_data, + ) assert action_run.job_run_id == state_data["job_run_id"] action_name = state_data["action_name"] @@ -228,7 +294,10 @@ def test_action_run_from_state_kubernetes(self, state_data): def test_action_run_from_state_spark(self, state_data): state_data["executor"] = ExecutorTypes.spark.value - action_run = ActionRunFactory.action_run_from_state(self.job_run, state_data,) + action_run = ActionRunFactory.action_run_from_state( + self.job_run, + state_data, + ) assert action_run.job_run_id == state_data["job_run_id"] action_name = state_data["action_name"] @@ -544,7 +613,9 @@ def test_trigger_timeout_default(self): day = datetime.timedelta(days=1) tomorrow = today + day action_run = ActionRunFactory.build_run_for_action( - mock.Mock(run_time=today), mock.Mock(trigger_timeout=None), mock.Mock(), + mock.Mock(run_time=today), + mock.Mock(trigger_timeout=None), + mock.Mock(), ) assert action_run.trigger_timeout_timestamp == tomorrow.timestamp() @@ -553,7 +624,9 @@ def test_trigger_timeout_custom(self): hour = datetime.timedelta(hours=1) target = today + hour action_run = ActionRunFactory.build_run_for_action( - mock.Mock(run_time=today), mock.Mock(trigger_timeout=hour), mock.Mock(), + mock.Mock(run_time=today), + mock.Mock(trigger_timeout=hour), + mock.Mock(), ) assert action_run.trigger_timeout_timestamp == target.timestamp() @@ -615,7 +688,8 @@ def test_setup_subscriptions_timeout_in_future(self, reactor, mock_current_time) self.action_run.trigger_timeout_timestamp = now.timestamp() + 10 self.action_run.setup_subscriptions() reactor.callLater.assert_called_once_with( - 10.0, self.action_run.trigger_timeout_reached, + 10.0, + self.action_run.trigger_timeout_reached, ) @mock.patch("tron.core.actionrun.reactor", autospec=True) @@ -625,7 +699,8 @@ def test_setup_subscriptions_timeout_in_past(self, reactor, mock_current_time): self.action_run.trigger_timeout_timestamp = now.timestamp() - 10 self.action_run.setup_subscriptions() reactor.callLater.assert_called_once_with( - 1, self.action_run.trigger_timeout_reached, + 1, + self.action_run.trigger_timeout_reached, ) @mock.patch("tron.core.actionrun.EventBus", autospec=True) @@ -663,7 +738,9 @@ def test_trigger_notify_clears_trigger_timeout(self): class TestSSHActionRun: @pytest.fixture(autouse=True) def setup_action_run(self, output_path): - self.action_runner = mock.create_autospec(actioncommand.NoActionRunnerFactory,) + self.action_runner = mock.create_autospec( + actioncommand.NoActionRunnerFactory, + ) self.command = "do command {actionname}" self.action_run = SSHActionRun( job_run_id="job_name.5", @@ -694,22 +771,32 @@ def test_build_action_command(self, mock_filehandler): assert action_command == self.action_run.action_command assert action_command == self.action_runner.create.return_value self.action_runner.create.assert_called_with( - self.action_run.id, attempt.rendered_command, serializer, + self.action_run.id, + attempt.rendered_command, + serializer, + ) + mock_filehandler.OutputStreamSerializer.assert_called_with( + self.action_run.output_path, ) - mock_filehandler.OutputStreamSerializer.assert_called_with(self.action_run.output_path,) self.action_run.watch.assert_called_with(action_command) def test_handler_running(self): attempt = self.action_run.create_attempt() self.action_run.build_action_command(attempt) self.action_run.machine.transition("start") - assert self.action_run.handler(self.action_run.action_command, ActionCommand.RUNNING,) + assert self.action_run.handler( + self.action_run.action_command, + ActionCommand.RUNNING, + ) assert self.action_run.is_running def test_handler_failstart(self): attempt = self.action_run.create_attempt() self.action_run.build_action_command(attempt) - assert self.action_run.handler(self.action_run.action_command, ActionCommand.FAILSTART,) + assert self.action_run.handler( + self.action_run.action_command, + ActionCommand.FAILSTART, + ) assert self.action_run.is_failed def test_handler_exiting_fail(self): @@ -717,7 +804,10 @@ def test_handler_exiting_fail(self): self.action_run.build_action_command(attempt) self.action_run.action_command.exit_status = -1 self.action_run.machine.transition("start") - assert self.action_run.handler(self.action_run.action_command, ActionCommand.EXITING,) + assert self.action_run.handler( + self.action_run.action_command, + ActionCommand.EXITING, + ) assert self.action_run.is_failed assert self.action_run.exit_status == -1 @@ -727,15 +817,24 @@ def test_handler_exiting_success(self): self.action_run.action_command.exit_status = 0 self.action_run.machine.transition("start") self.action_run.machine.transition("started") - assert self.action_run.handler(self.action_run.action_command, ActionCommand.EXITING,) + assert self.action_run.handler( + self.action_run.action_command, + ActionCommand.EXITING, + ) assert self.action_run.is_succeeded assert self.action_run.exit_status == 0 def test_handler_exiting_failunknown(self): - self.action_run.action_command = mock.create_autospec(actioncommand.ActionCommand, exit_status=None,) + self.action_run.action_command = mock.create_autospec( + actioncommand.ActionCommand, + exit_status=None, + ) self.action_run.machine.transition("start") self.action_run.machine.transition("started") - assert self.action_run.handler(self.action_run.action_command, ActionCommand.EXITING,) + assert self.action_run.handler( + self.action_run.action_command, + ActionCommand.EXITING, + ) assert self.action_run.is_unknown assert self.action_run.exit_status is None assert self.action_run.end_time is not None @@ -743,7 +842,13 @@ def test_handler_exiting_failunknown(self): def test_handler_unhandled(self): attempt = self.action_run.create_attempt() self.action_run.build_action_command(attempt) - assert self.action_run.handler(self.action_run.action_command, ActionCommand.PENDING,) is None + assert ( + self.action_run.handler( + self.action_run.action_command, + ActionCommand.PENDING, + ) + is None + ) assert self.action_run.is_scheduled def test_recover_no_action_runner(self): @@ -754,7 +859,10 @@ def test_recover_no_action_runner(self): class TestSSHActionRunRecover: @pytest.fixture(autouse=True) def setup_action_run(self, output_path): - self.action_runner = SubprocessActionRunnerFactory(status_path="/tmp/foo", exec_path="/bin/foo",) + self.action_runner = SubprocessActionRunnerFactory( + status_path="/tmp/foo", + exec_path="/bin/foo", + ) self.command = "do command {actionname}" self.action_run = SSHActionRun( job_run_id="job_name.5", @@ -794,10 +902,16 @@ def test_recover_action_runner(self): @mock.patch("tron.core.actionrun.reactor", autospec=True) def test_handler_exiting_failunknown(self, mock_reactor): - self.action_run.action_command = mock.create_autospec(actioncommand.ActionCommand, exit_status=None,) + self.action_run.action_command = mock.create_autospec( + actioncommand.ActionCommand, + exit_status=None, + ) self.action_run.machine.transition("start") self.action_run.machine.transition("started") - delay_deferred = self.action_run.handler(self.action_run.action_command, ActionCommand.EXITING,) + delay_deferred = self.action_run.handler( + self.action_run.action_command, + ActionCommand.EXITING, + ) assert delay_deferred == mock_reactor.callLater.return_value assert self.action_run.is_running assert self.action_run.exit_status is None @@ -822,13 +936,17 @@ def test_handler_exiting_failunknown(self, mock_reactor): @mock.patch("tron.core.actionrun.SSHActionRun.do_recover", autospec=True) @mock.patch("tron.core.actionrun.reactor", autospec=True) def test_handler_exiting_failunknown_max_retries(self, mock_reactor, mock_do_recover): - self.action_run.action_command = mock.create_autospec(actioncommand.ActionCommand, exit_status=None,) + self.action_run.action_command = mock.create_autospec( + actioncommand.ActionCommand, + exit_status=None, + ) self.action_run.machine.transition("start") self.action_run.machine.transition("started") def exit_unknown(*args, **kwargs): self.action_run.handler( - self.action_run.action_command, ActionCommand.EXITING, + self.action_run.action_command, + ActionCommand.EXITING, ) # Each time do_recover is called, end up exiting unknown again @@ -857,10 +975,15 @@ def setup_action_run(self, mock_current_time): self.output_path = ["one", "two"] self.run_node = MagicMock() mock_current_time.return_value = self.now - self.command_config = ActionCommandConfig(command="do {actionname}", cpus=1,) + self.command_config = ActionCommandConfig( + command="do {actionname}", + cpus=1, + ) self.action_config = mock.Mock(command_config=self.command_config) self.action_graph = actiongraph.ActionGraph( - {"theaction": self.action_config}, {"theaction": set()}, {"theaction": set()}, + {"theaction": self.action_config}, + {"theaction": set()}, + {"theaction": set()}, ) @pytest.fixture @@ -902,7 +1025,11 @@ def state_data_old(self): def test_from_state_old(self, state_data_old): state_data = state_data_old action_run = ActionRun.from_state( - state_data, self.parent_context, list(self.output_path), self.run_node, self.action_graph, + state_data, + self.parent_context, + list(self.output_path), + self.run_node, + self.action_graph, ) for key, value in state_data.items(): @@ -920,7 +1047,11 @@ def test_from_state_old_with_mesos_task_id(self, state_data_old): state_data = state_data_old state_data["mesos_task_id"] = "task" action_run = ActionRun.from_state( - state_data, self.parent_context, list(self.output_path), self.run_node, self.action_graph, + state_data, + self.parent_context, + list(self.output_path), + self.run_node, + self.action_graph, ) for key, value in state_data.items(): @@ -936,7 +1067,11 @@ def test_from_state_old_not_started(self, state_data_old): state_data["start_time"] = None state_data["state"] = "scheduled" action_run = ActionRun.from_state( - state_data, self.parent_context, list(self.output_path), self.run_node, self.action_graph, + state_data, + self.parent_context, + list(self.output_path), + self.run_node, + self.action_graph, ) for key, value in state_data.items(): @@ -953,7 +1088,11 @@ def test_from_state_old_rendered_and_exited(self, state_data_old): state_data["rendered_command"] = "do things theaction" state_data["exit_status"] = 0 action_run = ActionRun.from_state( - state_data, self.parent_context, list(self.output_path), self.run_node, self.action_graph, + state_data, + self.parent_context, + list(self.output_path), + self.run_node, + self.action_graph, ) for key, value in state_data.items(): @@ -972,7 +1111,11 @@ def test_from_state_old_retries(self, state_data_old): state_data["exit_status"] = 0 state_data["exit_statuses"] = [1] action_run = ActionRun.from_state( - state_data, self.parent_context, list(self.output_path), self.run_node, self.action_graph, + state_data, + self.parent_context, + list(self.output_path), + self.run_node, + self.action_graph, ) for key, value in state_data.items(): @@ -993,43 +1136,74 @@ def test_from_state_old_retries(self, state_data_old): def test_from_state_running(self, state_data): state_data["state"] = "running" action_run = ActionRun.from_state( - state_data, self.parent_context, self.output_path, self.run_node, self.action_graph, lambda: None, + state_data, + self.parent_context, + self.output_path, + self.run_node, + self.action_graph, + lambda: None, ) assert action_run.is_unknown def test_from_state_starting(self, state_data): state_data["state"] = "starting" action_run = ActionRun.from_state( - state_data, self.parent_context, self.output_path, self.run_node, self.action_graph, lambda: None, + state_data, + self.parent_context, + self.output_path, + self.run_node, + self.action_graph, + lambda: None, ) assert action_run.is_unknown def test_from_state_queued(self, state_data): state_data["state"] = "queued" action_run = ActionRun.from_state( - state_data, self.parent_context, self.output_path, self.run_node, self.action_graph, lambda: None, + state_data, + self.parent_context, + self.output_path, + self.run_node, + self.action_graph, + lambda: None, ) assert action_run.is_queued def test_from_state_no_node_name(self, state_data): del state_data["node_name"] action_run = ActionRun.from_state( - state_data, self.parent_context, self.output_path, self.run_node, self.action_graph, lambda: None, + state_data, + self.parent_context, + self.output_path, + self.run_node, + self.action_graph, + lambda: None, ) assert action_run.node == self.run_node @mock.patch("tron.core.actionrun.node.NodePoolRepository", autospec=True) def test_from_state_with_node_exists(self, mock_store, state_data): ActionRun.from_state( - state_data, self.parent_context, self.output_path, self.run_node, self.action_graph, lambda: None, + state_data, + self.parent_context, + self.output_path, + self.run_node, + self.action_graph, + lambda: None, ) mock_store.get_instance().get_node.assert_called_with( - state_data["node_name"], self.run_node, + state_data["node_name"], + self.run_node, ) def test_from_state_after_rendered_command(self, state_data): action_run = ActionRun.from_state( - state_data, self.parent_context, self.output_path, self.run_node, self.action_graph, lambda: None, + state_data, + self.parent_context, + self.output_path, + self.run_node, + self.action_graph, + lambda: None, ) assert action_run.command_config == self.command_config assert len(action_run.attempts) == len(state_data["attempts"]) @@ -1039,7 +1213,12 @@ def test_from_state_after_rendered_command(self, state_data): def test_from_state_action_config_gone(self, state_data): state_data["action_name"] = "old_action" action_run = ActionRun.from_state( - state_data, self.parent_context, self.output_path, self.run_node, self.action_graph, lambda: None, + state_data, + self.parent_context, + self.output_path, + self.run_node, + self.action_graph, + lambda: None, ) assert action_run.command_config.command == "" assert action_run.command == state_data["attempts"][-1]["rendered_command"] @@ -1049,7 +1228,11 @@ class TestActionRunCollection: def _build_run(self, action): mock_node = mock.create_autospec(node.Node) return ActionRun( - "id", action.name, mock_node, command_config=action.command_config, output_path=self.output_path, + "id", + action.name, + mock_node, + command_config=action.command_config, + output_path=self.output_path, ) @pytest.fixture(autouse=True) @@ -1058,7 +1241,11 @@ def setup_runs(self, output_path): actions = [] for name in action_names: - m = mock.Mock(name=name, required_actions=[], command_config=ActionCommandConfig(command="old"),) + m = mock.Mock( + name=name, + required_actions=[], + command_config=ActionCommandConfig(command="old"), + ) m.name = name actions.append(m) @@ -1105,7 +1292,11 @@ def test_update_action_config(self): new_action_names = ["new_name", "second_name", "cleanup"] new_actions = [] for name in new_action_names: - action = mock.Mock(name=name, required_actions=[], command_config=ActionCommandConfig(command="new"),) + action = mock.Mock( + name=name, + required_actions=[], + command_config=ActionCommandConfig(command="new"), + ) action.name = name new_actions.append(action) @@ -1180,7 +1371,8 @@ def test_is_done_true_because_blocked(self): assert self.collection.is_done assert self.collection.is_failed self.collection._is_run_blocked.assert_called_with( - self.run_map["second_name"], in_job_only=True, + self.run_map["second_name"], + in_job_only=True, ) def test_is_done_true(self): @@ -1241,7 +1433,13 @@ def test_end_time_not_started(self): class TestActionRunCollectionIsRunBlocked: def _build_run(self, name): mock_node = mock.create_autospec(node.Node) - return ActionRun("id", name, mock_node, self.command_config, output_path=self.output_path,) + return ActionRun( + "id", + name, + mock_node, + self.command_config, + output_path=self.output_path, + ) @pytest.fixture(autouse=True) def setup_collection(self, output_path): @@ -1368,7 +1566,11 @@ def test_submit_command(self, mock_cluster_repo, mock_filehandler): mesos_task_id="last_attempt", ), ] - with mock.patch.object(self.action_run, "watch", autospec=True,) as mock_watch: + with mock.patch.object( + self.action_run, + "watch", + autospec=True, + ) as mock_watch: new_attempt = self.action_run.create_attempt() self.action_run.submit_command(new_attempt) @@ -1390,12 +1592,16 @@ def test_submit_command(self, mock_cluster_repo, mock_filehandler): mock_watch.assert_called_once_with(task) assert self.action_run.last_attempt.mesos_task_id == task.get_mesos_id.return_value - mock_filehandler.OutputStreamSerializer.assert_called_with(self.action_run.output_path,) + mock_filehandler.OutputStreamSerializer.assert_called_with( + self.action_run.output_path, + ) @mock.patch("tron.core.actionrun.filehandler", autospec=True) @mock.patch("tron.core.actionrun.MesosClusterRepository", autospec=True) def test_submit_command_task_none( - self, mock_cluster_repo, mock_filehandler, + self, + mock_cluster_repo, + mock_filehandler, ): # Task is None if Mesos is disabled mock_get_cluster = mock_cluster_repo.get_cluster @@ -1418,7 +1624,11 @@ def test_recover(self, mock_cluster_repo, mock_filehandler): last_attempt.end_time = 1000 last_attempt.exit_status = 0 serializer = mock_filehandler.OutputStreamSerializer.return_value - with mock.patch.object(self.action_run, "watch", autospec=True,) as mock_watch: + with mock.patch.object( + self.action_run, + "watch", + autospec=True, + ) as mock_watch: assert self.action_run.recover() mock_get_cluster = mock_cluster_repo.get_cluster @@ -1442,7 +1652,9 @@ def test_recover(self, mock_cluster_repo, mock_filehandler): assert self.action_run.exit_status is None assert last_attempt.end_time is None assert last_attempt.exit_status is None - mock_filehandler.OutputStreamSerializer.assert_called_with(self.action_run.output_path,) + mock_filehandler.OutputStreamSerializer.assert_called_with( + self.action_run.output_path, + ) @mock.patch("tron.core.actionrun.filehandler", autospec=True) @mock.patch("tron.core.actionrun.MesosClusterRepository", autospec=True) @@ -1458,7 +1670,9 @@ def test_recover_done_no_change(self, mock_cluster_repo, mock_filehandler): @mock.patch("tron.core.actionrun.filehandler", autospec=True) @mock.patch("tron.core.actionrun.MesosClusterRepository", autospec=True) def test_recover_no_mesos_task_id( - self, mock_cluster_repo, mock_filehandler, + self, + mock_cluster_repo, + mock_filehandler, ): self.action_run.machine.state = ActionRun.UNKNOWN last_attempt = self.action_run.create_attempt() @@ -1493,7 +1707,9 @@ def test_kill_task(self, mock_cluster_repo): self.action_run.machine.state = ActionRun.RUNNING self.action_run.kill() - mock_get_cluster.return_value.kill.assert_called_once_with(last_attempt.mesos_task_id,) + mock_get_cluster.return_value.kill.assert_called_once_with( + last_attempt.mesos_task_id, + ) @mock.patch("tron.core.actionrun.MesosClusterRepository", autospec=True) def test_kill_task_no_task_id(self, mock_cluster_repo): @@ -1510,7 +1726,9 @@ def test_stop_task(self, mock_cluster_repo): self.action_run.machine.state = ActionRun.RUNNING self.action_run.stop() - mock_get_cluster.return_value.kill.assert_called_once_with(last_attempt.mesos_task_id,) + mock_get_cluster.return_value.kill.assert_called_once_with( + last_attempt.mesos_task_id, + ) @mock.patch("tron.core.actionrun.MesosClusterRepository", autospec=True) def test_stop_task_no_task_id(self, mock_cluster_repo): @@ -1520,30 +1738,48 @@ def test_stop_task_no_task_id(self, mock_cluster_repo): assert error_message == "Error: Can't find task id for the action." def test_handler_exiting_unknown(self): - self.action_run.action_command = mock.create_autospec(actioncommand.ActionCommand, exit_status=None,) + self.action_run.action_command = mock.create_autospec( + actioncommand.ActionCommand, + exit_status=None, + ) self.action_run.machine.transition("start") self.action_run.machine.transition("started") - assert self.action_run.handler(self.action_run.action_command, ActionCommand.EXITING,) + assert self.action_run.handler( + self.action_run.action_command, + ActionCommand.EXITING, + ) assert self.action_run.is_unknown assert self.action_run.exit_status is None assert self.action_run.end_time is not None def test_handler_exiting_unknown_retry(self): - self.action_run.action_command = mock.create_autospec(actioncommand.ActionCommand, exit_status=None,) + self.action_run.action_command = mock.create_autospec( + actioncommand.ActionCommand, + exit_status=None, + ) self.action_run.retries_remaining = 1 self.action_run.start = mock.Mock() self.action_run.machine.transition("start") self.action_run.machine.transition("started") - assert self.action_run.handler(self.action_run.action_command, ActionCommand.EXITING,) + assert self.action_run.handler( + self.action_run.action_command, + ActionCommand.EXITING, + ) assert self.action_run.retries_remaining == 0 assert not self.action_run.is_unknown assert self.action_run.start.call_count == 1 def test_handler_exiting_failstart_failed(self): - self.action_run.action_command = mock.create_autospec(actioncommand.ActionCommand, exit_status=1,) + self.action_run.action_command = mock.create_autospec( + actioncommand.ActionCommand, + exit_status=1, + ) self.action_run.machine.transition("start") - assert self.action_run.handler(self.action_run.action_command, ActionCommand.FAILSTART,) + assert self.action_run.handler( + self.action_run.action_command, + ActionCommand.FAILSTART, + ) assert self.action_run.is_failed @@ -1578,30 +1814,48 @@ def mock_k8s_action_run(self): ) def test_k8s_handler_exiting_unknown(self, mock_k8s_action_run): - mock_k8s_action_run.action_command = mock.create_autospec(actioncommand.ActionCommand, exit_status=None,) + mock_k8s_action_run.action_command = mock.create_autospec( + actioncommand.ActionCommand, + exit_status=None, + ) mock_k8s_action_run.machine.transition("start") mock_k8s_action_run.machine.transition("started") - assert mock_k8s_action_run.handler(mock_k8s_action_run.action_command, ActionCommand.EXITING,) + assert mock_k8s_action_run.handler( + mock_k8s_action_run.action_command, + ActionCommand.EXITING, + ) assert mock_k8s_action_run.is_unknown assert mock_k8s_action_run.exit_status is None assert mock_k8s_action_run.end_time is not None def test_handler_exiting_unknown_retry(self, mock_k8s_action_run): - mock_k8s_action_run.action_command = mock.create_autospec(actioncommand.ActionCommand, exit_status=None,) + mock_k8s_action_run.action_command = mock.create_autospec( + actioncommand.ActionCommand, + exit_status=None, + ) mock_k8s_action_run.retries_remaining = 1 mock_k8s_action_run.start = mock.Mock() mock_k8s_action_run.machine.transition("start") mock_k8s_action_run.machine.transition("started") - assert mock_k8s_action_run.handler(mock_k8s_action_run.action_command, ActionCommand.EXITING,) + assert mock_k8s_action_run.handler( + mock_k8s_action_run.action_command, + ActionCommand.EXITING, + ) assert mock_k8s_action_run.retries_remaining == 0 assert not mock_k8s_action_run.is_unknown assert mock_k8s_action_run.start.call_count == 1 def test_handler_exiting_failstart_failed(self, mock_k8s_action_run): - mock_k8s_action_run.action_command = mock.create_autospec(actioncommand.ActionCommand, exit_status=1,) + mock_k8s_action_run.action_command = mock.create_autospec( + actioncommand.ActionCommand, + exit_status=1, + ) mock_k8s_action_run.machine.transition("start") - assert mock_k8s_action_run.handler(mock_k8s_action_run.action_command, ActionCommand.FAILSTART,) + assert mock_k8s_action_run.handler( + mock_k8s_action_run.action_command, + ActionCommand.FAILSTART, + ) assert mock_k8s_action_run.is_failed @mock.patch("tron.core.actionrun.filehandler", autospec=True) @@ -1615,7 +1869,11 @@ def test_recover(self, mock_cluster_repo, mock_filehandler, mock_k8s_action_run) last_attempt.end_time = 1000 last_attempt.exit_status = 0 serializer = mock_filehandler.OutputStreamSerializer.return_value - with mock.patch.object(mock_k8s_action_run, "watch", autospec=True,) as mock_watch: + with mock.patch.object( + mock_k8s_action_run, + "watch", + autospec=True, + ) as mock_watch: assert mock_k8s_action_run.recover() mock_get_cluster = mock_cluster_repo.get_cluster @@ -1652,12 +1910,17 @@ def test_recover(self, mock_cluster_repo, mock_filehandler, mock_k8s_action_run) assert mock_k8s_action_run.exit_status is None assert last_attempt.end_time is None assert last_attempt.exit_status is None - mock_filehandler.OutputStreamSerializer.assert_called_with(mock_k8s_action_run.output_path,) + mock_filehandler.OutputStreamSerializer.assert_called_with( + mock_k8s_action_run.output_path, + ) @mock.patch("tron.core.actionrun.filehandler", autospec=True) @mock.patch("tron.core.actionrun.MesosClusterRepository", autospec=True) def test_recover_done_no_change( - self, mock_cluster_repo, mock_filehandler, mock_k8s_action_run, + self, + mock_cluster_repo, + mock_filehandler, + mock_k8s_action_run, ): mock_k8s_action_run.machine.state = ActionRun.SUCCEEDED last_attempt = mock_k8s_action_run.create_attempt() @@ -1670,7 +1933,10 @@ def test_recover_done_no_change( @mock.patch("tron.core.actionrun.filehandler", autospec=True) @mock.patch("tron.core.actionrun.KubernetesClusterRepository", autospec=True) def test_recover_no_k8s_task_id( - self, mock_cluster_repo, mock_filehandler, mock_k8s_action_run, + self, + mock_cluster_repo, + mock_filehandler, + mock_k8s_action_run, ): print(f"cluster: {type(mock_cluster_repo)} filehand: {type(mock_filehandler)} ar: {type(mock_k8s_action_run)}") mock_k8s_action_run.machine.state = ActionRun.UNKNOWN diff --git a/tests/core/job_collection_test.py b/tests/core/job_collection_test.py index f9f0cb6cf..7ec0500c8 100644 --- a/tests/core/job_collection_test.py +++ b/tests/core/job_collection_test.py @@ -52,13 +52,19 @@ def test_update_from_config_reconfigure_one_namespace(self): job_scheduler.get_job.assert_called_with() def test_move_running_job(self): - with mock.patch("tron.core.job_collection.JobCollection.get_by_name", autospec=None,) as mock_scheduler: + with mock.patch( + "tron.core.job_collection.JobCollection.get_by_name", + autospec=None, + ) as mock_scheduler: mock_scheduler.return_value.get_job.return_value.status = Job.STATUS_RUNNING result = self.collection.move("old.test", "new.test") assert "Job is still running." in result def test_move(self): - with mock.patch("tron.core.job_collection.JobCollection.get_by_name", autospec=None,) as mock_scheduler: + with mock.patch( + "tron.core.job_collection.JobCollection.get_by_name", + autospec=None, + ) as mock_scheduler: mock_scheduler.return_value.get_job.return_value.status = Job.STATUS_ENABLED mock_scheduler.get_name.return_value = "old.test" self.collection.add(mock_scheduler) @@ -69,9 +75,14 @@ def test_update(self): mock_scheduler = mock.create_autospec(JobScheduler) existing_scheduler = mock.create_autospec(JobScheduler) autospec_method( - self.collection.get_by_name, return_value=existing_scheduler, + self.collection.get_by_name, + return_value=existing_scheduler, ) assert self.collection.update(mock_scheduler) - self.collection.get_by_name.assert_called_with(mock_scheduler.get_name(),) - existing_scheduler.update_from_job_scheduler.assert_called_with(mock_scheduler,) + self.collection.get_by_name.assert_called_with( + mock_scheduler.get_name(), + ) + existing_scheduler.update_from_job_scheduler.assert_called_with( + mock_scheduler, + ) existing_scheduler.schedule_reconfigured.assert_called_with() diff --git a/tests/core/job_scheduler_test.py b/tests/core/job_scheduler_test.py index 1fb0d052c..71e7957fc 100644 --- a/tests/core/job_scheduler_test.py +++ b/tests/core/job_scheduler_test.py @@ -19,7 +19,12 @@ def setup_job(self): self.scheduler = mock.Mock() run_collection = mock.Mock(has_pending=False) node_pool = mock.Mock() - self.job = job.Job("jobname", self.scheduler, run_collection=run_collection, node_pool=node_pool,) + self.job = job.Job( + "jobname", + self.scheduler, + run_collection=run_collection, + node_pool=node_pool, + ) self.job_scheduler = JobScheduler(self.job) self.job.runs.get_pending.return_value = False self.scheduler.queue_overlapping = True @@ -57,7 +62,12 @@ def setup_job(self): self.scheduler = mock.Mock() run_collection = mock.Mock() node_pool = mock.Mock() - self.job = job.Job("jobname", self.scheduler, run_collection=run_collection, node_pool=node_pool,) + self.job = job.Job( + "jobname", + self.scheduler, + run_collection=run_collection, + node_pool=node_pool, + ) self.job_scheduler = JobScheduler(self.job) self.manual_run = mock.Mock() self.job.build_new_runs = mock.Mock(return_value=[self.manual_run]) @@ -71,11 +81,15 @@ def test_manual_start(self): def test_manual_start_default_with_timezone(self): self.job.time_zone = mock.Mock() - with mock.patch("tron.core.job_scheduler.timeutils.current_time", autospec=True,) as mock_current: + with mock.patch( + "tron.core.job_scheduler.timeutils.current_time", + autospec=True, + ) as mock_current: manual_runs = self.job_scheduler.manual_start() mock_current.assert_called_with(tz=self.job.time_zone) self.job.build_new_runs.assert_called_with( - mock_current.return_value, manual=True, + mock_current.return_value, + manual=True, ) assert_length(manual_runs, 1) self.manual_run.start.assert_called_once_with() @@ -96,13 +110,20 @@ def setup_job(self): self.scheduler.next_run_time.return_value = 0 mock_run = mock.Mock() mock_run.seconds_until_run_time.return_value = 0 - run_collection = mock.Mock(has_pending=False, autospec=True, return_value=[mock_run],) + run_collection = mock.Mock( + has_pending=False, + autospec=True, + return_value=[mock_run], + ) mock_build_new_run = mock.Mock() run_collection.build_new_run.return_value = mock_build_new_run mock_build_new_run.seconds_until_run_time.return_value = 0 node_pool = mock.Mock() self.job = job.Job( - name="jobname", scheduler=self.scheduler, run_collection=run_collection, node_pool=node_pool, + name="jobname", + scheduler=self.scheduler, + run_collection=run_collection, + node_pool=node_pool, ) self.job_scheduler = JobScheduler(self.job) self.original_build_new_runs = self.job.build_new_runs @@ -152,7 +173,10 @@ def test_handle_job_events_no_schedule_on_complete(self, reactor): self.job.runs.get_first_queued = lambda: queued_job_run self.job_scheduler.handle_job_events(self.job, job.Job.NOTIFY_RUN_DONE) reactor.callLater.assert_any_call( - 0, self.job_scheduler.run_job, queued_job_run, run_queued=True, + 0, + self.job_scheduler.run_job, + queued_job_run, + run_queued=True, ) def test_handle_job_events_schedule_on_complete(self): @@ -179,31 +203,46 @@ def get_queued(state): @mock.patch("tron.core.job_scheduler.reactor", autospec=True) def test_run_queue_schedule(self, reactor): - with mock.patch.object(self.job_scheduler, "schedule",) as mock_schedule: + with mock.patch.object( + self.job_scheduler, + "schedule", + ) as mock_schedule: self.job_scheduler.run_job = mock.Mock() self.job.scheduler.schedule_on_complete = False queued_job_run = mock.Mock() self.job.runs.get_first_queued = lambda: queued_job_run self.job_scheduler.run_queue_schedule() reactor.callLater.assert_called_once_with( - 0, self.job_scheduler.run_job, queued_job_run, run_queued=True, + 0, + self.job_scheduler.run_job, + queued_job_run, + run_queued=True, ) mock_schedule.assert_called_once_with() class TestJobSchedulerOther(TestCase): - """ Test other JobScheduler functions """ + """Test other JobScheduler functions""" def _make_job_scheduler(self, job_name, enabled=True): scheduler = mock.Mock() run_collection = mock.Mock() node_pool = mock.Mock() - new_job = job.Job(job_name, scheduler, run_collection=run_collection, node_pool=node_pool, enabled=enabled,) + new_job = job.Job( + job_name, + scheduler, + run_collection=run_collection, + node_pool=node_pool, + enabled=enabled, + ) return new_job, JobScheduler(new_job) @setup def setup_job(self): - self.job, self.job_scheduler = self._make_job_scheduler("jobname", True,) + self.job, self.job_scheduler = self._make_job_scheduler( + "jobname", + True, + ) def test_disable(self): self.job.runs.cancel_pending = mock.Mock() @@ -220,7 +259,9 @@ def test_update_from_job_scheduler_disable(self): self.job_scheduler.update_from_job_scheduler(new_job_scheduler) - assert self.job.update_from_job.call_args == mock.call(new_job_scheduler.get_job(),) + assert self.job.update_from_job.call_args == mock.call( + new_job_scheduler.get_job(), + ) assert self.job_scheduler.disable.call_count == 1 def test_update_from_job_scheduler_enable(self): @@ -232,7 +273,9 @@ def test_update_from_job_scheduler_enable(self): self.job_scheduler.update_from_job_scheduler(new_job_scheduler) - assert self.job.update_from_job.call_args == mock.call(new_job_scheduler.get_job(),) + assert self.job.update_from_job.call_args == mock.call( + new_job_scheduler.get_job(), + ) assert self.job_scheduler.enable.call_count == 1 def test_update_from_job_scheduler_no_config_change(self): @@ -244,7 +287,9 @@ def test_update_from_job_scheduler_no_config_change(self): self.job_scheduler.update_from_job_scheduler(new_job_scheduler) - assert self.job.update_from_job.call_args == mock.call(new_job_scheduler.get_job(),) + assert self.job.update_from_job.call_args == mock.call( + new_job_scheduler.get_job(), + ) assert self.job_scheduler.enable.call_count == 0 assert self.job_scheduler.disable.call_count == 0 assert self.job.config_enabled == new_job.config_enabled @@ -257,19 +302,29 @@ def setup_factory(self): self.context = mock.Mock() self.output_stream_dir = mock.Mock() self.time_zone = mock.Mock() - self.action_runner = mock.create_autospec(actioncommand.SubprocessActionRunnerFactory,) + self.action_runner = mock.create_autospec( + actioncommand.SubprocessActionRunnerFactory, + ) self.factory = JobSchedulerFactory( - self.context, self.output_stream_dir, self.time_zone, self.action_runner, mock.Mock(), + self.context, + self.output_stream_dir, + self.time_zone, + self.action_runner, + mock.Mock(), ) def test_build(self): config = mock.Mock() - with mock.patch("tron.core.job_scheduler.Job", autospec=True,) as mock_job: + with mock.patch( + "tron.core.job_scheduler.Job", + autospec=True, + ) as mock_job: job_scheduler = self.factory.build(config) _, kwargs = mock_job.from_config.call_args assert_equal(kwargs["job_config"], config) assert_equal( - job_scheduler.get_job(), mock_job.from_config.return_value, + job_scheduler.get_job(), + mock_job.from_config.return_value, ) assert_equal(kwargs["parent_context"], self.context) assert_equal(kwargs["output_path"].base, self.output_stream_dir) diff --git a/tests/core/job_test.py b/tests/core/job_test.py index 56e0e383e..f1d281c63 100644 --- a/tests/core/job_test.py +++ b/tests/core/job_test.py @@ -20,7 +20,10 @@ @pytest.fixture def mock_node_repo(): - with mock.patch("tron.core.job.node.NodePoolRepository", autospec=True,) as mock_node_repo: + with mock.patch( + "tron.core.job.node.NodePoolRepository", + autospec=True, + ) as mock_node_repo: yield mock_node_repo @@ -53,10 +56,18 @@ def test__init__(self): assert str(self.job.output_path).endswith(self.job.name) def test_from_config(self, mock_node_repo): - action = mock.MagicMock(name="first", command="doit", node=None, requires=[],) + action = mock.MagicMock( + name="first", + command="doit", + node=None, + requires=[], + ) job_config = mock.Mock( node="thenodepool", - monitoring={"team": "foo", "page": True,}, + monitoring={ + "team": "foo", + "page": True, + }, all_nodes=False, queueing=True, enabled=True, @@ -68,7 +79,9 @@ def test_from_config(self, mock_node_repo): scheduler = "scheduler_token" parent_context = "parent_context_token" output_path = ["base_path"] - mock_action_runner = mock.create_autospec(actioncommand.SubprocessActionRunnerFactory,) + mock_action_runner = mock.create_autospec( + actioncommand.SubprocessActionRunnerFactory, + ) new_job = job.Job.from_config( job_config, scheduler, @@ -80,14 +93,21 @@ def test_from_config(self, mock_node_repo): assert_equal(new_job.scheduler, scheduler) assert_equal(new_job.context.next, parent_context) - mock_node_repo.get_instance().get_by_name.assert_called_with(job_config.node,) + mock_node_repo.get_instance().get_by_name.assert_called_with( + job_config.node, + ) assert_equal(new_job.enabled, True) assert_equal(new_job.get_monitoring()["team"], "foo") assert new_job.action_graph def test_update_from_job(self): action_runner = mock.Mock() - other_job = job.Job("otherjob", "scheduler", action_runner=action_runner, run_limit=10,) + other_job = job.Job( + "otherjob", + "scheduler", + action_runner=action_runner, + run_limit=10, + ) self.job.update_from_job(other_job) assert_equal(self.job.name, "otherjob") assert_equal(self.job.scheduler, "scheduler") @@ -141,7 +161,12 @@ def test_build_new_runs(self): self.job.node_pool.next.assert_called_with() node = self.job.node_pool.next.return_value assert_call( - self.job.runs.build_new_run, 0, self.job, run_time, node, manual=False, + self.job.runs.build_new_run, + 0, + self.job, + run_time, + node, + manual=False, ) assert_length(runs, 1) self.job.watch.assert_called_with(runs[0]) @@ -157,7 +182,12 @@ def test_build_new_runs_all_nodes(self): for i in range(len(runs)): node = self.job.node_pool.nodes[i] assert_call( - self.job.runs.build_new_run, i, self.job, run_time, node, manual=False, + self.job.runs.build_new_run, + i, + self.job, + run_time, + node, + manual=False, ) calls = [] @@ -173,7 +203,12 @@ def test_build_new_runs_manual(self): node = self.job.node_pool.next.return_value assert_length(runs, 1) assert_call( - self.job.runs.build_new_run, 0, self.job, run_time, node, manual=True, + self.job.runs.build_new_run, + 0, + self.job, + run_time, + node, + manual=True, ) self.job.watch.assert_called_with(runs[0]) @@ -211,8 +246,16 @@ def test__eq__false(self): def test_job_watch_notifies_about_runs(mock_job): # Separate from the above tests because we don't want # watch to be mocked here. - new_run = jobrun.JobRun(job_name="test", run_num=1, run_time="some_time", node="node",) - with mock.patch.object(mock_job, "handler",) as mock_handler, mock.patch.object(mock_job, "notify",) as mock_notify: + new_run = jobrun.JobRun( + job_name="test", + run_num=1, + run_time="some_time", + node="node", + ) + with mock.patch.object(mock_job, "handler",) as mock_handler, mock.patch.object( + mock_job, + "notify", + ) as mock_notify: mock_job.watch(new_run) # Make sure that the job is still watching correctly @@ -248,15 +291,18 @@ def test_restore_state_sets_job_runs(self): self.job.get_job_runs_from_state.return_value = mock_runs with mock.patch( - "tron.core.job_scheduler.recovery.launch_recovery_actionruns_for_job_runs", autospec=True, + "tron.core.job_scheduler.recovery.launch_recovery_actionruns_for_job_runs", + autospec=True, ) as mock_launch_recovery: mock_launch_recovery.return_value = mock.Mock(autospec=True) self.job_scheduler.restore_state( - job_state_data, mock_action_runner, + job_state_data, + mock_action_runner, ) assert self.job.runs.runs == collections.deque(mock_runs) mock_launch_recovery.assert_called_once_with( - job_runs=mock_runs, master_action_runner=mock_action_runner, + job_runs=mock_runs, + master_action_runner=mock_action_runner, ) calls = [mock.call(mock_runs[i]) for i in range(0, len(mock_runs))] self.job.watch.assert_has_calls(calls) @@ -288,7 +334,9 @@ def test_schedule_reconfigured(self): assert self.job.runs.remove_pending.call_count == 1 assert self.job_scheduler.create_and_schedule_runs.call_args_list == [ - mock.call(next_run_time="a_run_time",), + mock.call( + next_run_time="a_run_time", + ), ] def test_schedule(self): diff --git a/tests/core/jobgraph_test.py b/tests/core/jobgraph_test.py index 1dc81b40f..f05b0ca0d 100644 --- a/tests/core/jobgraph_test.py +++ b/tests/core/jobgraph_test.py @@ -15,8 +15,15 @@ def _setup_job_graph_config_container(): - action1 = ConfigAction(name="action1", command="do something",) - action2 = ConfigAction(name="action2", command="do something", requires=["action1"],) + action1 = ConfigAction( + name="action1", + command="do something", + ) + action2 = ConfigAction( + name="action2", + command="do something", + requires=["action1"], + ) job1_config = ConfigJob( name="job1", node="default", @@ -26,13 +33,22 @@ def _setup_job_graph_config_container(): ) action3 = ConfigAction( - name="action3", command="do something", triggered_by=["MASTER.job1.action2.shortdate.{shortdate}"], + name="action3", + command="do something", + triggered_by=["MASTER.job1.action2.shortdate.{shortdate}"], ) job2_config = ConfigJob( - name="job1", node="default", schedule=mock.Mock(), actions={"action3": action3}, namespace="other", + name="job1", + node="default", + schedule=mock.Mock(), + actions={"action3": action3}, + namespace="other", ) - action4 = ConfigAction(name="action4", command="do something",) + action4 = ConfigAction( + name="action4", + command="do something", + ) action5 = ConfigAction( name="action5", command="do something", @@ -64,7 +80,8 @@ def test_job_graph_missing_dependency(self): missing_dependency_config_container.get_jobs.return_value.pop("other.job2") with pytest.raises(ValueError) as e: JobGraph( - missing_dependency_config_container, should_validate_missing_dependency=True, + missing_dependency_config_container, + should_validate_missing_dependency=True, ) assert str(e.value) == MISSING_DEPENDENCY_ERR_MSG diff --git a/tests/core/jobrun_test.py b/tests/core/jobrun_test.py index 06bad49e2..ef57fe3c7 100644 --- a/tests/core/jobrun_test.py +++ b/tests/core/jobrun_test.py @@ -26,11 +26,18 @@ def build_mock_job(): action_graph = mock.create_autospec(actiongraph.ActionGraph) action_graph.action_map = { - "foo": mock.Mock(triggered_by=[], trigger_timeout=datetime.timedelta(days=1),), + "foo": mock.Mock( + triggered_by=[], + trigger_timeout=datetime.timedelta(days=1), + ), } runner = mock.create_autospec(actioncommand.SubprocessActionRunnerFactory) return mock.create_autospec( - job.Job, action_graph=action_graph, output_path=mock.Mock(), context=mock.Mock(), action_runner=runner, + job.Job, + action_graph=action_graph, + output_path=mock.Mock(), + context=mock.Mock(), + action_runner=runner, ) @@ -50,11 +57,17 @@ def setup_jobrun(self): 7, self.run_time, mock_node, - action_runs=MagicMock(action_runs_with_cleanup=[], get_startable_action_runs=lambda: [],), + action_runs=MagicMock( + action_runs_with_cleanup=[], + get_startable_action_runs=lambda: [], + ), ) autospec_method(self.job_run.watch) autospec_method(self.job_run.notify) - self.action_run = mock.create_autospec(actionrun.ActionRun, is_skipped=False,) + self.action_run = mock.create_autospec( + actionrun.ActionRun, + is_skipped=False, + ) def test__init__(self): assert_equal(self.job_run.job_name, "jobname") @@ -64,7 +77,13 @@ def test__init__(self): def test_for_job(self): run_num = 6 mock_node = mock.create_autospec(node.Node) - run = jobrun.JobRun.for_job(self.job, run_num, self.run_time, mock_node, False,) + run = jobrun.JobRun.for_job( + self.job, + run_num, + self.run_time, + mock_node, + False, + ) assert_equal(run.action_runs.action_graph, self.action_graph) assert_equal(run.job_name, self.job.get_name.return_value) @@ -75,7 +94,13 @@ def test_for_job(self): def test_for_job_manual(self): run_num = 6 mock_node = mock.create_autospec(node.Node) - run = jobrun.JobRun.for_job(self.job, run_num, self.run_time, mock_node, True,) + run = jobrun.JobRun.for_job( + self.job, + run_num, + self.run_time, + mock_node, + True, + ) assert_equal(run.action_runs.action_graph, self.action_graph) assert run.manual @@ -89,7 +114,10 @@ def test_set_action_runs(self): self.job_run._action_runs = None count = 2 action_runs = [mock.create_autospec(actionrun.ActionRun) for _ in range(count)] - run_collection = mock.create_autospec(actionrun.ActionRunCollection, action_runs_with_cleanup=action_runs,) + run_collection = mock.create_autospec( + actionrun.ActionRunCollection, + action_runs_with_cleanup=action_runs, + ) self.job_run._set_action_runs(run_collection) assert_equal(self.job_run.watch.call_count, count) @@ -108,7 +136,9 @@ def test_set_action_runs_none(self): def test_set_action_runs_duplicate(self): run_collection = mock.create_autospec(actionrun.ActionRunCollection) assert_raises( - ValueError, self.job_run._set_action_runs, run_collection, + ValueError, + self.job_run._set_action_runs, + run_collection, ) @mock.patch("tron.core.jobrun.timeutils.current_time", autospec=True) @@ -208,7 +238,9 @@ def test_handler_with_startable(self): self.action_run.is_broken = False self.job_run.handler(self.action_run, mock.Mock()) - self.job_run.notify.assert_called_with(self.job_run.NOTIFY_STATE_CHANGED,) + self.job_run.notify.assert_called_with( + self.job_run.NOTIFY_STATE_CHANGED, + ) startable_run.start.assert_called_with() assert not self.job_run.finalize.mock_calls @@ -330,7 +362,11 @@ def setup_jobrun(self): def test_from_state(self): run = jobrun.JobRun.from_state( - self.state_data, self.action_graph, self.output_path, self.context, self.node_pool, + self.state_data, + self.action_graph, + self.output_path, + self.context, + self.node_pool, ) assert_length(run.action_runs.run_map, 1) assert_equal(run.job_name, self.state_data["job_name"]) @@ -342,7 +378,11 @@ def test_from_state(self): def test_from_state_node_no_longer_exists(self): run = jobrun.JobRun.from_state( - self.state_data, self.action_graph, self.output_path, self.context, self.node_pool, + self.state_data, + self.action_graph, + self.output_path, + self.context, + self.node_pool, ) assert_length(run.action_runs.run_map, 1) assert_equal(run.job_name, "thejobname") @@ -391,7 +431,13 @@ def setup_runs(self): self._mock_run(state=actionrun.ActionRun.QUEUED, run_num=5), self._mock_run(state=actionrun.ActionRun.WAITING, run_num=4), self._mock_run(state=actionrun.ActionRun.RUNNING, run_num=3), - ] + [self._mock_run(state=actionrun.ActionRun.SUCCEEDED, run_num=i,) for i in range(2, 0, -1)] + ] + [ + self._mock_run( + state=actionrun.ActionRun.SUCCEEDED, + run_num=i, + ) + for i in range(2, 0, -1) + ] self.run_collection.runs.extend(self.job_runs) self.mock_node = mock.create_autospec(node.Node) @@ -420,7 +466,13 @@ def test_job_runs_from_state(self): output_path = mock.create_autospec(filehandler.OutputPath) context = mock.Mock() node_pool = mock.create_autospec(node.NodePool) - runs = jobrun.job_runs_from_state(state_data, action_graph, output_path, context, node_pool,) + runs = jobrun.job_runs_from_state( + state_data, + action_graph, + output_path, + context, + node_pool, + ) assert len(runs) == 4 assert all([type(job) == jobrun.JobRun for job in runs]) @@ -428,7 +480,11 @@ def test_build_new_run(self): autospec_method(self.run_collection.remove_old_runs) run_time = datetime.datetime(2012, 3, 14, 15, 9, 26) mock_job = build_mock_job() - job_run = self.run_collection.build_new_run(mock_job, run_time, self.mock_node,) + job_run = self.run_collection.build_new_run( + mock_job, + run_time, + self.mock_node, + ) assert_in(job_run, self.run_collection.runs) self.run_collection.remove_old_runs.assert_called_with() assert job_run.run_num == 6 @@ -438,7 +494,12 @@ def test_build_new_run_manual(self): autospec_method(self.run_collection.remove_old_runs) run_time = datetime.datetime(2012, 3, 14, 15, 9, 26) mock_job = build_mock_job() - job_run = self.run_collection.build_new_run(mock_job, run_time, self.mock_node, True,) + job_run = self.run_collection.build_new_run( + mock_job, + run_time, + self.mock_node, + True, + ) assert_in(job_run, self.run_collection.runs) self.run_collection.remove_old_runs.assert_called_with() assert job_run.run_num == 6 @@ -447,7 +508,8 @@ def test_build_new_run_manual(self): def test_cancel_pending(self): pending_runs = [mock.Mock() for _ in range(2)] autospec_method( - self.run_collection.get_pending, return_value=pending_runs, + self.run_collection.get_pending, + return_value=pending_runs, ) self.run_collection.cancel_pending() for pending_run in pending_runs: @@ -502,7 +564,11 @@ def test_get_newest(self): assert_equal(run, self.job_runs[0]) def test_get_newest_exclude_manual(self): - run = self._mock_run(state=actionrun.ActionRun.RUNNING, run_num=5, manual=True,) + run = self._mock_run( + state=actionrun.ActionRun.RUNNING, + run_num=5, + manual=True, + ) self.job_runs.insert(0, run) newest_run = self.run_collection.get_newest(include_manual=False) assert_equal(newest_run, self.job_runs[1]) @@ -513,21 +579,30 @@ def test_get_newest_no_runs(self): def test_pending(self): run_num = self.run_collection.next_run_num() - scheduled_run = self._mock_run(run_num=run_num, state=actionrun.ActionRun.SCHEDULED,) + scheduled_run = self._mock_run( + run_num=run_num, + state=actionrun.ActionRun.SCHEDULED, + ) self.run_collection.runs.appendleft(scheduled_run) pending = list(self.run_collection.get_pending()) assert_length(pending, 2) assert_equal(pending, [scheduled_run, self.job_runs[0]]) def test_get_active(self): - starting_run = self._mock_run(run_num=self.run_collection.next_run_num(), state=actionrun.ActionRun.STARTING,) + starting_run = self._mock_run( + run_num=self.run_collection.next_run_num(), + state=actionrun.ActionRun.STARTING, + ) self.run_collection.runs.appendleft(starting_run) active = list(self.run_collection.get_active()) assert_length(active, 3) assert_equal(active, [starting_run, self.job_runs[1], self.job_runs[2]]) def test_get_active_with_node(self): - starting_run = self._mock_run(run_num=self.run_collection.next_run_num(), state=actionrun.ActionRun.STARTING,) + starting_run = self._mock_run( + run_num=self.run_collection.next_run_num(), + state=actionrun.ActionRun.STARTING, + ) starting_run.node = "differentnode" self.run_collection.runs.appendleft(starting_run) active = list(self.run_collection.get_active("anode")) @@ -540,7 +615,10 @@ def test_get_active_none(self): def test_get_first_queued(self): run_num = self.run_collection.next_run_num() - second_queued = self._mock_run(run_num=run_num, state=actionrun.ActionRun.QUEUED,) + second_queued = self._mock_run( + run_num=run_num, + state=actionrun.ActionRun.QUEUED, + ) self.run_collection.runs.appendleft(second_queued) first_queued = self.run_collection.get_first_queued() @@ -604,7 +682,10 @@ class TestJobRunStateTransitions: @pytest.fixture def mock_event_bus(self): - with mock.patch("tron.core.actionrun.EventBus", autospec=True,) as mock_event_bus: + with mock.patch( + "tron.core.actionrun.EventBus", + autospec=True, + ) as mock_event_bus: mock_event_bus.has_event.return_value = True yield mock_event_bus @@ -614,7 +695,11 @@ def job_run(self, tmpdir, mock_event_bus): action_after_foo = action.Action("after_foo", action.ActionCommandConfig("command"), None) action_bar = action.Action("bar", action.ActionCommandConfig("command"), None, triggered_by={"trigger"}) action_graph = actiongraph.ActionGraph( - action_map={"foo": action_foo, "after_foo": action_after_foo, "bar": action_bar,}, + action_map={ + "foo": action_foo, + "after_foo": action_after_foo, + "bar": action_bar, + }, required_actions={"foo": set(), "after_foo": {"foo"}, "bar": set()}, required_triggers={"foo": set(), "after_foo": set(), "bar": {"trigger"}}, ) @@ -624,7 +709,11 @@ def job_run(self, tmpdir, mock_event_bus): action_runner=actioncommand.NoActionRunnerFactory(), ) job_run = jobrun.JobRun.for_job( - mock_job, run_num=1, run_time=datetime.datetime.now(), node=mock.Mock(), manual=False, + mock_job, + run_num=1, + run_time=datetime.datetime.now(), + node=mock.Mock(), + manual=False, ) return job_run diff --git a/tests/core/recovery_test.py b/tests/core/recovery_test.py index d266e024f..ac17ce14f 100644 --- a/tests/core/recovery_test.py +++ b/tests/core/recovery_test.py @@ -90,14 +90,30 @@ def test_filter_action_runs_needing_recovery(self): def test_launch_recovery_actionruns_for_job_runs(self, mock_filter): mock_actions = ( [ - mock.Mock(action_runner=NoActionRunnerFactory(), spec=SSHActionRun,), mock.Mock( - action_runner=SubprocessActionRunnerFactory(status_path="/tmp/foo", exec_path=("/tmp/foo"),), + action_runner=NoActionRunnerFactory(), spec=SSHActionRun, ), + mock.Mock( + action_runner=SubprocessActionRunnerFactory( + status_path="/tmp/foo", + exec_path=("/tmp/foo"), + ), + spec=SSHActionRun, + ), + ], + [ + mock.Mock( + action_runner=NoActionRunnerFactory(), + spec=MesosActionRun, + ), + ], + [ + mock.Mock( + action_runner=NoActionRunnerFactory(), + spec=KubernetesActionRun, + ), ], - [mock.Mock(action_runner=NoActionRunnerFactory(), spec=MesosActionRun,),], - [mock.Mock(action_runner=NoActionRunnerFactory(), spec=KubernetesActionRun,),], ) mock_filter.return_value = mock_actions @@ -105,7 +121,8 @@ def test_launch_recovery_actionruns_for_job_runs(self, mock_filter): mock_job_run = mock.Mock() launch_recovery_actionruns_for_job_runs( - [mock_job_run], mock_action_runner, + [mock_job_run], + mock_action_runner, ) ssh_runs = mock_actions[0] for run in ssh_runs: @@ -126,6 +143,7 @@ def test_launch_recovery_actionruns_empty_job_run(self, mock_filter): mock_filter.return_value = ([], [], []) launch_recovery_actionruns_for_job_runs( - [empty_job_run, other_job_run], mock_action_runner, + [empty_job_run, other_job_run], + mock_action_runner, ) mock_filter.assert_called_with(other_job_run._action_runs) diff --git a/tests/kubernetes_test.py b/tests/kubernetes_test.py index 7f03763e6..0b7084d4a 100644 --- a/tests/kubernetes_test.py +++ b/tests/kubernetes_test.py @@ -20,7 +20,9 @@ @pytest.fixture def mock_kubernetes_task(): with mock.patch( - "tron.kubernetes.logging.getLogger", return_value=mock.Mock(handlers=[mock.Mock()]), autospec=None, + "tron.kubernetes.logging.getLogger", + return_value=mock.Mock(handlers=[mock.Mock()]), + autospec=None, ): yield KubernetesTask( action_run_id="mock_service.mock_job.1.mock_action", @@ -33,8 +35,12 @@ def mock_kubernetes_task(): @pytest.fixture def mock_kubernetes_cluster(): with mock.patch("tron.kubernetes.PyDeferredQueue", autospec=True,), mock.patch( - "tron.kubernetes.TaskProcessor", autospec=True, - ), mock.patch("tron.kubernetes.Subscription", autospec=True,) as mock_runner: + "tron.kubernetes.TaskProcessor", + autospec=True, + ), mock.patch( + "tron.kubernetes.Subscription", + autospec=True, + ) as mock_runner: mock_runner.return_value.configure_mock( stopping=False, TASK_CONFIG_INTERFACE=mock.Mock(spec=KubernetesTaskConfig) ) @@ -44,9 +50,11 @@ def mock_kubernetes_cluster(): @pytest.fixture def mock_disabled_kubernetes_cluster(): with mock.patch("tron.kubernetes.PyDeferredQueue", autospec=True,), mock.patch( - "tron.kubernetes.TaskProcessor", autospec=True, + "tron.kubernetes.TaskProcessor", + autospec=True, ), mock.patch( - "tron.kubernetes.Subscription", autospec=True, + "tron.kubernetes.Subscription", + autospec=True, ): yield KubernetesCluster("kube-cluster-a:1234", enabled=False) @@ -95,7 +103,11 @@ def test_handle_event_log_event_info_exception(mock_kubernetes_task): def test_handle_event_exit_early_on_misrouted_event(mock_kubernetes_task): - with mock.patch.object(mock_kubernetes_task, "log_event_info", autospec=True,) as mock_log_event_info: + with mock.patch.object( + mock_kubernetes_task, + "log_event_info", + autospec=True, + ) as mock_log_event_info: mock_kubernetes_task.handle_event( mock_event_factory(task_id="not-the-pods-youre-looking-for", platform_type="finished") ) @@ -278,7 +290,11 @@ def test_handle_event_exit_not_terminated(mock_kubernetes_task): "ready": False, "restartCount": 0, "started": False, - "state": {"running": None, "terminated": None, "waiting": {"reason": "ContainerCreating"},}, + "state": { + "running": None, + "terminated": None, + "waiting": {"reason": "ContainerCreating"}, + }, }, ], } @@ -424,7 +440,10 @@ def test_handle_event_code_from_state(mock_kubernetes_task): def test_handle_event_lost(mock_kubernetes_task): mock_kubernetes_task.started() mock_kubernetes_task.handle_event( - mock_event_factory(task_id=mock_kubernetes_task.get_kubernetes_id(), platform_type="lost",) + mock_event_factory( + task_id=mock_kubernetes_task.get_kubernetes_id(), + platform_type="lost", + ) ) assert mock_kubernetes_task.is_unknown @@ -669,7 +688,10 @@ def test_set_enabled_enable_already_on(mock_kubernetes_cluster): assert mock_kubernetes_cluster.runner is not None assert mock_kubernetes_cluster.deferred is not None mock_kubernetes_cluster.deferred.addCallback.assert_has_calls( - [mock.call(mock_kubernetes_cluster.process_event), mock.call(mock_kubernetes_cluster.handle_next_event),] + [ + mock.call(mock_kubernetes_cluster.process_event), + mock.call(mock_kubernetes_cluster.handle_next_event), + ] ) @@ -704,14 +726,20 @@ def test_set_enabled_disable(mock_kubernetes_cluster): def test_configure_default_volumes(): # default_volume validation is done at config time, we just need to validate we are setting it with mock.patch("tron.kubernetes.PyDeferredQueue", autospec=True,), mock.patch( - "tron.kubernetes.TaskProcessor", autospec=True, + "tron.kubernetes.TaskProcessor", + autospec=True, ), mock.patch( - "tron.kubernetes.Subscription", autospec=True, + "tron.kubernetes.Subscription", + autospec=True, ): mock_kubernetes_cluster = KubernetesCluster("kube-cluster-a:1234", default_volumes=[]) assert mock_kubernetes_cluster.default_volumes == [] expected_volumes = [ - ConfigVolume(container_path="/tmp", host_path="/host/tmp", mode="RO",), + ConfigVolume( + container_path="/tmp", + host_path="/host/tmp", + mode="RO", + ), ] mock_kubernetes_cluster.configure_tasks(default_volumes=expected_volumes) assert mock_kubernetes_cluster.default_volumes == expected_volumes diff --git a/tests/mcp_reconfigure_test.py b/tests/mcp_reconfigure_test.py index 1f528d47a..b179a28ab 100644 --- a/tests/mcp_reconfigure_test.py +++ b/tests/mcp_reconfigure_test.py @@ -21,22 +21,40 @@ class TestMCPReconfigure(TestCase): os.environ["SSH_AUTH_SOCK"] = "test-socket" pre_config = dict( - ssh_options=dict(agent=True, identities=["tests/test_id_rsa"],), - nodes=[dict(name="node0", hostname="batch0"), dict(name="node1", hostname="batch1"),], + ssh_options=dict( + agent=True, + identities=["tests/test_id_rsa"], + ), + nodes=[ + dict(name="node0", hostname="batch0"), + dict(name="node1", hostname="batch1"), + ], node_pools=[dict(name="nodePool", nodes=["node0", "node1"])], - command_context={"thischanges": "froma",}, + command_context={ + "thischanges": "froma", + }, jobs=[ dict( name="test_unchanged", node="node0", schedule="daily", - actions=[dict(name="action_unchanged", command="command_unchanged",),], + actions=[ + dict( + name="action_unchanged", + command="command_unchanged", + ), + ], ), dict( name="test_remove", node="node1", schedule={"type": "cron", "value": "* * * * *"}, - actions=[dict(name="action_remove", command="command_remove",),], + actions=[ + dict( + name="action_remove", + command="command_remove", + ), + ], cleanup_action=dict(name="cleanup", command="doit"), ), dict( @@ -44,54 +62,97 @@ class TestMCPReconfigure(TestCase): node="nodePool", schedule={"type": "cron", "value": "* * * * *"}, actions=[ - dict(name="action_change", command="command_change",), - dict(name="action_remove2", command="command_remove2", requires=["action_change"],), + dict( + name="action_change", + command="command_change", + ), + dict( + name="action_remove2", + command="command_remove2", + requires=["action_change"], + ), ], ), dict( name="test_daily_change", node="node0", schedule="daily", - actions=[dict(name="action_daily_change", command="command",),], + actions=[ + dict( + name="action_daily_change", + command="command", + ), + ], ), dict( name="test_action_added", node="node0", schedule={"type": "cron", "value": "* * * * *"}, - actions=[dict(name="action_first", command="command_do_it"),], + actions=[ + dict(name="action_first", command="command_do_it"), + ], ), ], ) post_config = dict( - ssh_options=dict(agent=True, identities=["tests/test_id_rsa"],), - nodes=[dict(name="node0", hostname="batch0"), dict(name="node1", hostname="batch1"),], + ssh_options=dict( + agent=True, + identities=["tests/test_id_rsa"], + ), + nodes=[ + dict(name="node0", hostname="batch0"), + dict(name="node1", hostname="batch1"), + ], node_pools=[dict(name="nodePool", nodes=["node0", "node1"])], - command_context={"a_variable": "is_constant", "thischanges": "tob",}, + command_context={ + "a_variable": "is_constant", + "thischanges": "tob", + }, jobs=[ dict( name="test_unchanged", node="node0", schedule="daily", - actions=[dict(name="action_unchanged", command="command_unchanged",),], + actions=[ + dict( + name="action_unchanged", + command="command_unchanged", + ), + ], ), dict( name="test_change", node="nodePool", schedule="daily", - actions=[dict(name="action_change", command="command_changed",),], + actions=[ + dict( + name="action_change", + command="command_changed", + ), + ], ), dict( name="test_daily_change", node="node0", schedule="daily", - actions=[dict(name="action_daily_change", command="command_changed",),], + actions=[ + dict( + name="action_daily_change", + command="command_changed", + ), + ], ), dict( name="test_new", node="nodePool", schedule={"type": "cron", "value": "* * * * *"}, - actions=[dict(name="action_new", command="command_new",),], + actions=[ + dict( + name="action_new", + command="command_new", + ), + ], ), dict( name="test_action_added", @@ -140,7 +201,9 @@ def test_job_list(self): self.reconfigure() assert_equal(len(self.mcp.jobs.get_names()), count) - @pytest.mark.skip(reason="This test doesn't currently as run1 is not scheduled.",) + @pytest.mark.skip( + reason="This test doesn't currently as run1 is not scheduled.", + ) @suite("integration") def test_job_unchanged(self): assert "MASTER.test_unchanged" in self.mcp.jobs diff --git a/tests/mcp_test.py b/tests/mcp_test.py index f9419c884..ec4431eb6 100644 --- a/tests/mcp_test.py +++ b/tests/mcp_test.py @@ -25,8 +25,13 @@ class TestMasterControlProgram: def setup_mcp(self): self.working_dir = tempfile.mkdtemp() self.config_path = tempfile.mkdtemp() - self.mcp = mcp.MasterControlProgram(self.working_dir, self.config_path,) - self.mcp.state_watcher = mock.create_autospec(statemanager.StateChangeWatcher,) + self.mcp = mcp.MasterControlProgram( + self.working_dir, + self.config_path, + ) + self.mcp.state_watcher = mock.create_autospec( + statemanager.StateChangeWatcher, + ) yield shutil.rmtree(self.config_path) shutil.rmtree(self.working_dir) @@ -44,7 +49,12 @@ def test_reconfigure_namespace(self): self.mcp._load_config.assert_called_with(reconfigure=True, namespace_to_reconfigure="foo") @pytest.mark.parametrize( - "reconfigure,namespace", [(False, None), (True, None), (True, "foo"),], + "reconfigure,namespace", + [ + (False, None), + (True, None), + (True, "foo"), + ], ) def test_load_config(self, reconfigure, namespace): autospec_method(self.mcp.apply_config) @@ -52,11 +62,19 @@ def test_load_config(self, reconfigure, namespace): self.mcp._load_config(reconfigure, namespace) self.mcp.state_watcher.disabled.assert_called_with() self.mcp.apply_config.assert_called_with( - self.mcp.config.load.return_value, reconfigure=reconfigure, namespace_to_reconfigure=namespace, + self.mcp.config.load.return_value, + reconfigure=reconfigure, + namespace_to_reconfigure=namespace, ) @pytest.mark.parametrize( - "reconfigure,namespace", [(False, None), (True, None), (True, "foo"), (True, "MASTER"),], + "reconfigure,namespace", + [ + (False, None), + (True, None), + (True, "foo"), + (True, "MASTER"), + ], ) @mock.patch("tron.mcp.KubernetesClusterRepository", autospec=True) @mock.patch("tron.mcp.MesosClusterRepository", autospec=True) @@ -67,14 +85,22 @@ def test_apply_config(self, mock_repo, mock_cluster_repo, mock_k8s_cluster_repo, autospec_method(self.mcp.jobs.update_from_config) autospec_method(self.mcp.build_job_scheduler_factory) self.mcp.apply_config(config_container, reconfigure, namespace) - self.mcp.state_watcher.update_from_config.assert_called_with(master_config.state_persistence,) + self.mcp.state_watcher.update_from_config.assert_called_with( + master_config.state_persistence, + ) assert_equal(self.mcp.context.base, master_config.command_context) mock_repo.update_from_config.assert_called_with( - master_config.nodes, master_config.node_pools, master_config.ssh_options, + master_config.nodes, + master_config.node_pools, + master_config.ssh_options, + ) + mock_cluster_repo.configure.assert_called_with( + master_config.mesos_options, + ) + mock_k8s_cluster_repo.configure.assert_called_with( + master_config.k8s_options, ) - mock_cluster_repo.configure.assert_called_with(master_config.mesos_options,) - mock_k8s_cluster_repo.configure.assert_called_with(master_config.k8s_options,) self.mcp.build_job_scheduler_factory(master_config, mock.Mock()) expected_namespace_to_update = None if namespace == "MASTER" else namespace @@ -85,7 +111,8 @@ def test_apply_config(self, mock_repo, mock_cluster_repo, mock_k8s_cluster_repo, expected_namespace_to_update, ) self.mcp.state_watcher.watch_all.assert_called_once_with( - self.mcp.jobs.update_from_config.return_value, mock.ANY, + self.mcp.jobs.update_from_config.return_value, + mock.ANY, ) def test_update_state_watcher_config_changed(self): @@ -97,9 +124,12 @@ def test_update_state_watcher_config_changed(self): } state_config = mock.Mock() self.mcp.update_state_watcher_config(state_config) - self.mcp.state_watcher.update_from_config.assert_called_with(state_config,) + self.mcp.state_watcher.update_from_config.assert_called_with( + state_config, + ) assert_equal( - self.mcp.state_watcher.save_job.mock_calls, [mock.call(j.job) for j in self.mcp.jobs], + self.mcp.state_watcher.save_job.mock_calls, + [mock.call(j.job) for j in self.mcp.jobs], ) def test_update_state_watcher_config_no_change(self): @@ -115,9 +145,14 @@ class TestMasterControlProgramRestoreState(TestCase): def setup_mcp(self): self.working_dir = tempfile.mkdtemp() self.config_path = tempfile.mkdtemp() - self.mcp = mcp.MasterControlProgram(self.working_dir, self.config_path,) + self.mcp = mcp.MasterControlProgram( + self.working_dir, + self.config_path, + ) self.mcp.jobs = mock.create_autospec(JobCollection) - self.mcp.state_watcher = mock.create_autospec(statemanager.StateChangeWatcher,) + self.mcp.state_watcher = mock.create_autospec( + statemanager.StateChangeWatcher, + ) @teardown def teardown_mcp(self): @@ -137,7 +172,8 @@ def test_restore_state(self, mock_cluster_repo): self.mcp.restore_state(action_runner) mock_cluster_repo.restore_state.assert_called_with(mesos_state_data) self.mcp.jobs.restore_state.assert_called_with( - job_state_data, action_runner, + job_state_data, + action_runner, ) diff --git a/tests/mesos_test.py b/tests/mesos_test.py index 38f997695..d109bccaa 100644 --- a/tests/mesos_test.py +++ b/tests/mesos_test.py @@ -18,7 +18,11 @@ def mock_cluster(self): def init_cluster(*args, **kwargs): return mock.MagicMock(spec_set=MesosCluster) - with mock.patch("tron.mesos.MesosCluster", side_effect=init_cluster, autospec=True,) as self.cluster_cls: + with mock.patch( + "tron.mesos.MesosCluster", + side_effect=init_cluster, + autospec=True, + ) as self.cluster_cls: yield def test_get_cluster_repeated_mesos_address(self): @@ -48,7 +52,9 @@ def test_configure(self): offer_timeout=1000, ) with mock.patch( - "tron.mesos.get_secret_from_file", autospec=True, return_value="test-secret", + "tron.mesos.get_secret_from_file", + autospec=True, + return_value="test-secret", ): MesosClusterRepository.configure(options) @@ -56,7 +62,9 @@ def test_configure(self): for cluster in clusters: cluster.set_enabled.assert_called_once_with(False) cluster.configure_tasks.assert_called_once_with( - default_volumes=[expected_volume], dockercfg_location="auth", offer_timeout=1000, + default_volumes=[expected_volume], + dockercfg_location="auth", + offer_timeout=1000, ) # Next cluster we get should be initialized with the same settings @@ -76,7 +84,12 @@ def test_configure(self): def mock_task_event( - task_id, platform_type, raw=None, terminal=False, success=False, **kwargs, + task_id, + platform_type, + raw=None, + terminal=False, + success=False, + **kwargs, ): return mock.MagicMock( kind="task", @@ -96,7 +109,9 @@ def setup(self): self.action_run_id = "my_service.job.1.action" self.task_id = "123abcuuid" with mock.patch( - "tron.mesos.logging.getLogger", return_value=mock.Mock(handlers=[mock.Mock()]), autospec=None, + "tron.mesos.logging.getLogger", + return_value=mock.Mock(handlers=[mock.Mock()]), + autospec=None, ): self.task = MesosTask( id=self.action_run_id, @@ -124,55 +139,92 @@ def test_aws_credentials_redacted(self): assert all(["baz" in text[0][0] for text in self.task.log.info.call_args_list]) def test_handle_staging(self): - event = mock_task_event(task_id=self.task_id, platform_type="staging",) + event = mock_task_event( + task_id=self.task_id, + platform_type="staging", + ) self.task.handle_event(event) assert self.task.state == MesosTask.PENDING def test_handle_starting(self): - event = mock_task_event(task_id=self.task_id, platform_type="starting",) + event = mock_task_event( + task_id=self.task_id, + platform_type="starting", + ) self.task.handle_event(event) assert self.task.state == MesosTask.RUNNING def test_handle_running(self): - event = mock_task_event(task_id=self.task_id, platform_type="running",) + event = mock_task_event( + task_id=self.task_id, + platform_type="running", + ) self.task.handle_event(event) assert self.task.state == MesosTask.RUNNING def test_handle_running_for_other_task(self): - event = mock_task_event(task_id="other321", platform_type="running",) + event = mock_task_event( + task_id="other321", + platform_type="running", + ) self.task.handle_event(event) assert self.task.state == MesosTask.PENDING def test_handle_finished(self): self.task.started() - event = mock_task_event(task_id=self.task_id, platform_type="finished", terminal=True, success=True,) + event = mock_task_event( + task_id=self.task_id, + platform_type="finished", + terminal=True, + success=True, + ) self.task.handle_event(event) assert self.task.is_complete def test_handle_failed(self): self.task.started() - event = mock_task_event(task_id=self.task_id, platform_type="failed", terminal=True, success=False,) + event = mock_task_event( + task_id=self.task_id, + platform_type="failed", + terminal=True, + success=False, + ) self.task.handle_event(event) assert self.task.is_failed assert self.task.is_done def test_handle_killed(self): self.task.started() - event = mock_task_event(task_id=self.task_id, platform_type="killed", terminal=True, success=False,) + event = mock_task_event( + task_id=self.task_id, + platform_type="killed", + terminal=True, + success=False, + ) self.task.handle_event(event) assert self.task.is_failed assert self.task.is_done def test_handle_lost(self): self.task.started() - event = mock_task_event(task_id=self.task_id, platform_type="lost", terminal=True, success=False,) + event = mock_task_event( + task_id=self.task_id, + platform_type="lost", + terminal=True, + success=False, + ) self.task.handle_event(event) assert self.task.is_unknown assert self.task.is_done def test_handle_error(self): self.task.started() - event = mock_task_event(task_id=self.task_id, platform_type="error", terminal=True, success=False,) + event = mock_task_event( + task_id=self.task_id, + platform_type="error", + terminal=True, + success=False, + ) self.task.handle_event(event) assert self.task.is_failed assert self.task.is_done @@ -192,20 +244,46 @@ def test_handle_terminal_event_offer_timeout(self): assert self.task.is_done def test_handle_success_sequence(self): - self.task.handle_event(mock_task_event(task_id=self.task_id, platform_type="staging",),) - self.task.handle_event(mock_task_event(task_id=self.task_id, platform_type="starting",),) - self.task.handle_event(mock_task_event(task_id=self.task_id, platform_type="running",),) self.task.handle_event( - mock_task_event(task_id=self.task_id, platform_type="finished", terminal=True, success=True,), + mock_task_event( + task_id=self.task_id, + platform_type="staging", + ), + ) + self.task.handle_event( + mock_task_event( + task_id=self.task_id, + platform_type="starting", + ), + ) + self.task.handle_event( + mock_task_event( + task_id=self.task_id, + platform_type="running", + ), + ) + self.task.handle_event( + mock_task_event( + task_id=self.task_id, + platform_type="finished", + terminal=True, + success=True, + ), ) assert self.task.is_complete def test_log_event_error(self): with mock.patch.object(self.task, "log_event_info",) as mock_log_event, mock.patch.object( - self.task.log, "warning", + self.task.log, + "warning", ) as mock_log: mock_log_event.side_effect = Exception - self.task.handle_event(mock_task_event(task_id=self.task_id, platform_type="running",),) + self.task.handle_event( + mock_task_event( + task_id=self.task_id, + platform_type="running", + ), + ) assert mock_log_event.called assert mock_log.called assert self.task.state == MesosTask.RUNNING @@ -226,15 +304,18 @@ class TestMesosCluster(TestCase): @setup_teardown def setup_mocks(self): with mock.patch("tron.mesos.PyDeferredQueue", autospec=True,) as queue_cls, mock.patch( - "tron.mesos.TaskProcessor", autospec=True, + "tron.mesos.TaskProcessor", + autospec=True, ) as processor_cls, mock.patch("tron.mesos.Subscription", autospec=True,) as runner_cls, mock.patch( - "tron.mesos.get_mesos_leader", autospec=True, + "tron.mesos.get_mesos_leader", + autospec=True, ) as mock_get_leader: self.mock_queue = queue_cls.return_value self.mock_processor = processor_cls.return_value self.mock_runner_cls = runner_cls self.mock_runner_cls.return_value.configure_mock( - stopping=False, TASK_CONFIG_INTERFACE=mock.Mock(), + stopping=False, + TASK_CONFIG_INTERFACE=mock.Mock(), ) self.mock_get_leader = mock_get_leader yield @@ -255,7 +336,8 @@ def test_init(self, mock_socket): assert_equal(cluster.processor, self.mock_processor) self.mock_get_leader.assert_called_once_with( - "mesos-cluster-a.me", 5000, + "mesos-cluster-a.me", + 5000, ) self.mock_processor.executor_from_config.assert_has_calls( [ @@ -271,18 +353,25 @@ def test_init(self, mock_socket): "failover": True, }, ), - mock.call(provider="logging", provider_config=mock.ANY,), + mock.call( + provider="logging", + provider_config=mock.ANY, + ), ] ) self.mock_runner_cls.assert_called_once_with( - self.mock_processor.executor_from_config.return_value, self.mock_queue, + self.mock_processor.executor_from_config.return_value, + self.mock_queue, ) assert_equal(cluster.runner, self.mock_runner_cls.return_value) get_event_deferred = cluster.deferred assert_equal(get_event_deferred, self.mock_queue.get.return_value) get_event_deferred.addCallback.assert_has_calls( - [mock.call(cluster._process_event), mock.call(cluster.handle_next_event),] + [ + mock.call(cluster._process_event), + mock.call(cluster.handle_next_event), + ] ) def test_init_disabled(self): @@ -310,14 +399,18 @@ def test_set_enabled_on(self): # Basically the same as regular initialization assert_equal(self.mock_processor.executor_from_config.call_count, 2) self.mock_runner_cls.assert_called_once_with( - self.mock_processor.executor_from_config.return_value, self.mock_queue, + self.mock_processor.executor_from_config.return_value, + self.mock_queue, ) assert_equal(cluster.runner, self.mock_runner_cls.return_value) get_event_deferred = cluster.deferred assert_equal(get_event_deferred, self.mock_queue.get.return_value) get_event_deferred.addCallback.assert_has_calls( - [mock.call(cluster._process_event), mock.call(cluster.handle_next_event),] + [ + mock.call(cluster._process_event), + mock.call(cluster.handle_next_event), + ] ) def test_set_enabled_on_already(self): @@ -328,14 +421,27 @@ def test_set_enabled_on_already(self): assert_equal(self.mock_runner_cls.call_count, 1) def test_configure_tasks(self): - cluster = MesosCluster("mesos-cluster-a.me", default_volumes=[], dockercfg_location="first", offer_timeout=60,) + cluster = MesosCluster( + "mesos-cluster-a.me", + default_volumes=[], + dockercfg_location="first", + offer_timeout=60, + ) assert_equal(cluster.default_volumes, []) assert_equal(cluster.dockercfg_location, "first") assert_equal(cluster.offer_timeout, 60) - expected_volumes = [{"container_path": "/tmp", "host_path": "/host", "mode": "RO",}] + expected_volumes = [ + { + "container_path": "/tmp", + "host_path": "/host", + "mode": "RO", + } + ] cluster.configure_tasks( - default_volumes=expected_volumes, dockercfg_location="second", offer_timeout=300, + default_volumes=expected_volumes, + dockercfg_location="second", + offer_timeout=300, ) assert_equal(cluster.default_volumes, expected_volumes) assert_equal(cluster.dockercfg_location, "second") @@ -347,13 +453,17 @@ def test_submit(self): mock_task = mock.MagicMock(get_config=mock.Mock(return_value={"environment": {}})) mock_task.get_mesos_id.return_value = "this_task" with mock.patch( - "tron.mesos.get_clusterman_metrics", return_value=(mock_clusterman_metrics), autospec=True, + "tron.mesos.get_clusterman_metrics", + return_value=(mock_clusterman_metrics), + autospec=True, ): cluster.submit(mock_task) assert "this_task" in cluster.tasks assert cluster.tasks["this_task"] == mock_task - cluster.runner.run.assert_called_once_with(mock_task.get_config.return_value,) + cluster.runner.run.assert_called_once_with( + mock_task.get_config.return_value, + ) assert mock_clusterman_metrics.ClustermanMetricsBotoClient.call_count == 0 def test_submit_with_clusterman(self): @@ -372,15 +482,20 @@ def test_submit_with_clusterman(self): ) mock_task.get_mesos_id.return_value = "this_task" with mock.patch( - "tron.mesos.get_clusterman_metrics", return_value=mock_clusterman_metrics, autospec=True, + "tron.mesos.get_clusterman_metrics", + return_value=mock_clusterman_metrics, + autospec=True, ), staticconf.testing.MockConfiguration( - {"clusters": {"fake-cluster": {"aws_region": "fake-region"}}}, namespace="clusterman", + {"clusters": {"fake-cluster": {"aws_region": "fake-region"}}}, + namespace="clusterman", ): cluster.submit(mock_task) assert "this_task" in cluster.tasks assert cluster.tasks["this_task"] == mock_task - cluster.runner.run.assert_called_once_with(mock_task.get_config.return_value,) + cluster.runner.run.assert_called_once_with( + mock_task.get_config.return_value, + ) assert mock_clusterman_metrics.ClustermanMetricsBotoClient.call_count == 1 def test_submit_disabled(self): @@ -388,7 +503,9 @@ def test_submit_disabled(self): mock_task = mock.MagicMock() mock_task.get_mesos_id.return_value = "this_task" with mock.patch( - "tron.mesos.get_clusterman_metrics", return_value=(None, None), autospec=True, + "tron.mesos.get_clusterman_metrics", + return_value=(None, None), + autospec=True, ): cluster.submit(mock_task) @@ -403,7 +520,9 @@ def test_recover(self): assert "this_task" in cluster.tasks assert cluster.tasks["this_task"] == mock_task - cluster.runner.reconcile.assert_called_once_with(mock_task.get_config.return_value,) + cluster.runner.reconcile.assert_called_once_with( + mock_task.get_config.return_value, + ) assert mock_task.started.call_count == 1 def test_recover_disabled(self): @@ -448,7 +567,9 @@ def test_create_task_defaults(self, mock_task): ) assert_equal(task, mock_task.return_value) mock_task.assert_called_once_with( - "action_c", cluster.runner.TASK_CONFIG_INTERFACE.return_value, mock_serializer, + "action_c", + cluster.runner.TASK_CONFIG_INTERFACE.return_value, + mock_serializer, ) @mock.patch("tron.mesos.MesosTask", autospec=True) @@ -475,7 +596,9 @@ def test_create_task_with_task_id(self, mock_task): task_config = cluster.runner.TASK_CONFIG_INTERFACE.return_value task_config.set_task_id.assert_called_once_with(task_id) mock_task.assert_called_once_with( - "action_c", task_config.set_task_id.return_value, mock_serializer, + "action_c", + task_config.set_task_id.return_value, + mock_serializer, ) @mock.patch("tron.mesos.MesosTask", autospec=True) @@ -503,8 +626,16 @@ def test_create_task_with_configuration(self, mock_task): cluster = MesosCluster( "mesos-cluster-a.me", default_volumes=[ - {"container_path": "/tmp", "host_path": "/host", "mode": "RO",}, - {"container_path": "/other", "host_path": "/other", "mode": "RW",}, + { + "container_path": "/tmp", + "host_path": "/host", + "mode": "RO", + }, + { + "container_path": "/other", + "host_path": "/other", + "mode": "RW", + }, ], dockercfg_location="some_place", offer_timeout=202, @@ -521,7 +652,13 @@ def test_create_task_with_configuration(self, mock_task): docker_parameters=[], env={"TESTING": "true"}, # This should override the default volume for /tmp - extra_volumes=[{"container_path": "/tmp", "host_path": "/custom", "mode": "RW",},], + extra_volumes=[ + { + "container_path": "/tmp", + "host_path": "/custom", + "mode": "RW", + }, + ], serializer=mock_serializer, ) cluster.runner.TASK_CONFIG_INTERFACE.assert_called_once_with( @@ -535,15 +672,25 @@ def test_create_task_with_configuration(self, mock_task): docker_parameters=[], environment={"TESTING": "true"}, volumes=[ - {"container_path": "/tmp", "host_path": "/custom", "mode": "RW",}, - {"container_path": "/other", "host_path": "/other", "mode": "RW",}, + { + "container_path": "/tmp", + "host_path": "/custom", + "mode": "RW", + }, + { + "container_path": "/other", + "host_path": "/other", + "mode": "RW", + }, ], uris=["some_place"], offer_timeout=202, ) assert_equal(task, mock_task.return_value) mock_task.assert_called_once_with( - "action_c", cluster.runner.TASK_CONFIG_INTERFACE.return_value, mock_serializer, + "action_c", + cluster.runner.TASK_CONFIG_INTERFACE.return_value, + mock_serializer, ) def test_process_event_task(self): @@ -567,7 +714,10 @@ def test_process_event_task_id_invalid(self): assert_equal(mock_task.handle_event.call_count, 0) def test_process_event_control_stop(self): - event = mock.MagicMock(kind="control", message="stop",) + event = mock.MagicMock( + kind="control", + message="stop", + ) cluster = MesosCluster("mesos-cluster-a.me") cluster._process_event(event) assert cluster.runner.stop.call_count == 1 diff --git a/tests/metrics_test.py b/tests/metrics_test.py index 69bbdab53..7ea53b28b 100644 --- a/tests/metrics_test.py +++ b/tests/metrics_test.py @@ -12,9 +12,24 @@ def all_metrics(): def test_get_metric(all_metrics): - timer = metrics.get_metric("timer", "api.requests", {"method": "GET"}, mock.Mock(),) - same_timer = metrics.get_metric("timer", "api.requests", {"method": "GET"}, mock.Mock(),) - other_timer = metrics.get_metric("timer", "api.requests", {"method": "POST"}, mock.Mock(),) + timer = metrics.get_metric( + "timer", + "api.requests", + {"method": "GET"}, + mock.Mock(), + ) + same_timer = metrics.get_metric( + "timer", + "api.requests", + {"method": "GET"}, + mock.Mock(), + ) + other_timer = metrics.get_metric( + "timer", + "api.requests", + {"method": "POST"}, + mock.Mock(), + ) metrics.get_metric("something", "name", None, mock.Mock()) assert timer == same_timer assert other_timer != timer @@ -28,7 +43,10 @@ def test_timer(mock_get_metric): metrics.timer("my_metric", 110) metrics.timer("my_metric", 84) mock_get_metric.assert_called_with( - "timer", "my_metric", None, mock.ANY, + "timer", + "my_metric", + None, + mock.ANY, ) result = metrics.view_timer(test_metric) assert result["count"] == 2 @@ -41,7 +59,10 @@ def test_count(mock_get_metric): metrics.count("my_metric", 13) metrics.count("my_metric", -1) mock_get_metric.assert_called_with( - "counter", "my_metric", None, mock.ANY, + "counter", + "my_metric", + None, + mock.ANY, ) result = metrics.view_counter(test_metric) assert result["count"] == 12 @@ -54,7 +75,10 @@ def test_meter(mock_get_metric): metrics.meter("my_metric") metrics.meter("my_metric") mock_get_metric.assert_called_with( - "meter", "my_metric", None, mock.ANY, + "meter", + "my_metric", + None, + mock.ANY, ) result = metrics.view_meter(test_metric) assert result["count"] == 2 @@ -67,7 +91,10 @@ def test_gauge(mock_get_metric): metrics.gauge("my_metric", 23) metrics.gauge("my_metric", 102) mock_get_metric.assert_called_with( - "gauge", "my_metric", None, mock.ANY, + "gauge", + "my_metric", + None, + mock.ANY, ) result = metrics.view_gauge(test_metric) assert result["value"] == 102 @@ -80,7 +107,10 @@ def test_histogram(mock_get_metric): metrics.histogram("my_metric", 2) metrics.histogram("my_metric", 92) mock_get_metric.assert_called_with( - "histogram", "my_metric", None, mock.ANY, + "histogram", + "my_metric", + None, + mock.ANY, ) result = metrics.view_histogram(test_metric) assert result["count"] == 2 diff --git a/tests/mocks.py b/tests/mocks.py index bc09912ad..0416b1cbd 100644 --- a/tests/mocks.py +++ b/tests/mocks.py @@ -55,7 +55,9 @@ class MockJobRun(MagicMock): def __init__(self, *args, **kwargs): kwargs.setdefault("output_path", [tempfile.mkdtemp()]) kwargs.setdefault("action_graph", MockActionGraph()) - action_runs = MockActionRunCollection(action_graph=kwargs["action_graph"],) + action_runs = MockActionRunCollection( + action_graph=kwargs["action_graph"], + ) kwargs.setdefault("action_runs", action_runs) atexit.register(lambda: shutil.rmtree(kwargs["output_path"][0])) super().__init__(*args, **kwargs) diff --git a/tests/node_test.py b/tests/node_test.py index afb03a83f..8b44ce575 100644 --- a/tests/node_test.py +++ b/tests/node_test.py @@ -64,14 +64,18 @@ def test_update_from_config(self): node_pool_config = {"c": mock.Mock(nodes=["a", "b"])} ssh_options = mock.Mock(identities=[], known_hosts_file=None) node.NodePoolRepository.update_from_config( - node_config, node_pool_config, ssh_options, + node_config, + node_pool_config, + ssh_options, ) node_names = [node_config["a"].name, node_config["b"].name] assert_equal( - set(self.repo.pools), set(node_names + [node_pool_config["c"].name]), + set(self.repo.pools), + set(node_names + [node_pool_config["c"].name]), ) assert_equal( - set(self.repo.nodes), set(list(node_names) + list(mock_nodes.keys())), + set(self.repo.nodes), + set(list(node_names) + list(mock_nodes.keys())), ) def test_nodes_by_name(self): @@ -106,7 +110,11 @@ def test_get_public_key_not_found(self): class TestDetermineJitter(TestCase): @setup def setup_node_settings(self): - self.settings = mock.Mock(jitter_load_factor=1, jitter_min_load=4, jitter_max_delay=20,) + self.settings = mock.Mock( + jitter_load_factor=1, + jitter_min_load=4, + jitter_max_delay=20, + ) @setup_teardown def patch_random(self): @@ -130,7 +138,10 @@ def test_jitter_with_max_delay(self): def build_node( - hostname="localhost", username="theuser", name="thename", pub_key=None, + hostname="localhost", + username="theuser", + name="thename", + pub_key=None, ): config = mock.Mock(hostname=hostname, username=username, name=name) ssh_opts = mock.create_autospec(ssh.SSHAuthOptions) @@ -164,11 +175,20 @@ def test_output_logging(self): def test_from_config(self): ssh_options = self.node.conch_options - node_config = mock.Mock(hostname="localhost", username="theuser", name="thename",) + node_config = mock.Mock( + hostname="localhost", + username="theuser", + name="thename", + ) ssh_options.__getitem__.return_value = "something" public_key = mock.Mock() node_settings = mock.Mock() - new_node = node.Node.from_config(node_config, ssh_options, public_key, node_settings,) + new_node = node.Node.from_config( + node_config, + ssh_options, + public_key, + node_settings, + ) assert_equal(new_node.name, node_config.name) assert_equal(new_node.hostname, node_config.hostname) assert_equal(new_node.username, node_config.username) @@ -196,12 +216,18 @@ def test__eq__false_ssh_options_changed(self): assert_not_equal(other_node, self.node) def test_stop_not_tracked(self): - action_command = mock.create_autospec(actioncommand.ActionCommand, id=mock.Mock(),) + action_command = mock.create_autospec( + actioncommand.ActionCommand, + id=mock.Mock(), + ) self.node.stop(action_command) def test_stop(self): autospec_method(self.node._fail_run) - action_command = mock.create_autospec(actioncommand.ActionCommand, id=mock.Mock(),) + action_command = mock.create_autospec( + actioncommand.ActionCommand, + id=mock.Mock(), + ) self.node.run_states[action_command.id] = mock.Mock() self.node.stop(action_command) assert_equal(self.node._fail_run.call_count, 1) diff --git a/tests/sandbox.py b/tests/sandbox.py index 2aeb13c43..62b139d8f 100644 --- a/tests/sandbox.py +++ b/tests/sandbox.py @@ -159,7 +159,9 @@ def __getattr__(self, name): def verify_environment(): for env_var in ["SSH_AUTH_SOCK", "PYTHONPATH"]: if not os.environ.get(env_var): - raise TronSandboxException("Missing $%s in test environment." % env_var,) + raise TronSandboxException( + "Missing $%s in test environment." % env_var, + ) class TronSandbox: @@ -241,7 +243,9 @@ def trond(self, *args): wait_on_sandbox(lambda: bool(self.client.home())) def tronfig( - self, config_content=None, name=schema.MASTER_NAMESPACE, + self, + config_content=None, + name=schema.MASTER_NAMESPACE, ): args = ["--server", self.api_uri, name] args += ["-"] if config_content else ["-p"] diff --git a/tests/scheduler_test.py b/tests/scheduler_test.py index 13020a8e3..750bb9c70 100644 --- a/tests/scheduler_test.py +++ b/tests/scheduler_test.py @@ -105,7 +105,12 @@ def test(self): next_run_date = run_time.date() assert_equal(next_run_date, self.now.date()) - earlier_time = datetime.datetime(self.now.year, self.now.month, self.now.day, hour=13,) + earlier_time = datetime.datetime( + self.now.year, + self.now.month, + self.now.day, + hour=13, + ) assert_lte(earlier_time, run_time) @@ -120,7 +125,12 @@ def test(self): tomorrow = self.now.date() + datetime.timedelta(days=1) assert_equal(next_run_date, tomorrow) - earlier_time = datetime.datetime(year=tomorrow.year, month=tomorrow.month, day=tomorrow.day, hour=13,) + earlier_time = datetime.datetime( + year=tomorrow.year, + month=tomorrow.month, + day=tomorrow.day, + hour=13, + ) assert_lte(earlier_time, run_time) @@ -260,11 +270,14 @@ def test_parse_all(self): assert_equal(cfg.months, {3, 4, 9}) assert_equal(cfg.timestr, "00:00") assert_equal( - scheduler_from_config(config_string), scheduler_from_config(config_string), + scheduler_from_config(config_string), + scheduler_from_config(config_string), ) def test_parse_no_weekday(self): - cfg = parse_groc("1st,2nd,3rd,10th day of march,apr,September at 00:00",) + cfg = parse_groc( + "1st,2nd,3rd,10th day of march,apr,September at 00:00", + ) assert_equal(cfg.ordinals, None) assert_equal(cfg.monthdays, {1, 2, 3, 10}) assert_equal(cfg.weekdays, None) @@ -322,7 +335,12 @@ def test_weekly(self): assert_gte(next_run_date, self.now) assert_equal( - calendar.weekday(next_run_date.year, next_run_date.month, next_run_date.day,), 0, + calendar.weekday( + next_run_date.year, + next_run_date.month, + next_run_date.day, + ), + 0, ) def test_weekly_in_month(self): @@ -335,7 +353,12 @@ def test_weekly_in_month(self): assert_equal(next_run_date.hour, 0) assert_equal(next_run_date.minute, 1) assert_equal( - calendar.weekday(next_run_date.year, next_run_date.month, next_run_date.day,), 0, + calendar.weekday( + next_run_date.year, + next_run_date.month, + next_run_date.day, + ), + 0, ) def test_monthly(self): diff --git a/tests/serialize/filehandler_test.py b/tests/serialize/filehandler_test.py index 8c695a203..c32b31d00 100644 --- a/tests/serialize/filehandler_test.py +++ b/tests/serialize/filehandler_test.py @@ -204,12 +204,14 @@ def test_update(self): fh_wrapper1 = self.manager.open(self.file1.name) fh_wrapper2 = self.manager.open(self.file2.name) assert_equal( - list(self.manager.cache.keys()), [fh_wrapper1.name, fh_wrapper2.name], + list(self.manager.cache.keys()), + [fh_wrapper1.name, fh_wrapper2.name], ) self.manager.update(fh_wrapper1) assert_equal( - list(self.manager.cache.keys()), [fh_wrapper2.name, fh_wrapper1.name], + list(self.manager.cache.keys()), + [fh_wrapper2.name, fh_wrapper1.name], ) diff --git a/tests/serialize/runstate/dynamodb_state_store_test.py b/tests/serialize/runstate/dynamodb_state_store_test.py index 8a41fa939..44c342f92 100644 --- a/tests/serialize/runstate/dynamodb_state_store_test.py +++ b/tests/serialize/runstate/dynamodb_state_store_test.py @@ -35,7 +35,12 @@ def update_item(item): expression_attribute_names = item.get("ExpressionAttributeNames", {}) expression_attribute_values = item.get("ExpressionAttributeValues", {}) return self.dynamodb_backend.update_item( - name, key, update_expression, attribute_updates, expression_attribute_names, expression_attribute_values, + name, + key, + update_expression, + attribute_updates, + expression_attribute_names, + expression_attribute_values, ) transact_items = self.body["TransactItems"] @@ -54,7 +59,9 @@ def update_item(item): @pytest.fixture(autouse=True) def store(): with mock.patch( - "moto.dynamodb2.responses.DynamoHandler.transact_write_items", new=mock_transact_write_items, create=True, + "moto.dynamodb2.responses.DynamoHandler.transact_write_items", + new=mock_transact_write_items, + create=True, ), mock_dynamodb2(): dynamodb = boto3.resource("dynamodb", region_name="us-west-2") table_name = "tmp" @@ -62,14 +69,29 @@ def store(): store.table = dynamodb.create_table( TableName=table_name, KeySchema=[ - {"AttributeName": "key", "KeyType": "HASH",}, # Partition key - {"AttributeName": "index", "KeyType": "RANGE",}, # Sort key + { + "AttributeName": "key", + "KeyType": "HASH", + }, # Partition key + { + "AttributeName": "index", + "KeyType": "RANGE", + }, # Sort key ], AttributeDefinitions=[ - {"AttributeName": "key", "AttributeType": "S",}, - {"AttributeName": "index", "AttributeType": "N",}, + { + "AttributeName": "key", + "AttributeType": "S", + }, + { + "AttributeName": "index", + "AttributeType": "N", + }, ], - ProvisionedThroughput={"ReadCapacityUnits": 10, "WriteCapacityUnits": 10,}, + ProvisionedThroughput={ + "ReadCapacityUnits": 10, + "WriteCapacityUnits": 10, + }, ) store.client = boto3.client("dynamodb", region_name="us-west-2") # Has to be yield here for moto to work @@ -90,8 +112,14 @@ def large_object(): class TestDynamoDBStateStore: def test_save(self, store, small_object, large_object): key_value_pairs = [ - (store.build_key("DynamoDBTest", "two"), small_object,), - (store.build_key("DynamoDBTest2", "four"), small_object,), + ( + store.build_key("DynamoDBTest", "two"), + small_object, + ), + ( + store.build_key("DynamoDBTest2", "four"), + small_object, + ), ] store.save(key_value_pairs) store._consume_save_queue() @@ -107,14 +135,23 @@ def test_save(self, store, small_object, large_object): def test_delete_if_val_is_none(self, store, small_object, large_object): key_value_pairs = [ - (store.build_key("DynamoDBTest", "two"), small_object,), - (store.build_key("DynamoDBTest2", "four"), small_object,), + ( + store.build_key("DynamoDBTest", "two"), + small_object, + ), + ( + store.build_key("DynamoDBTest2", "four"), + small_object, + ), ] store.save(key_value_pairs) store._consume_save_queue() delete = [ - (store.build_key("DynamoDBTest", "two"), None,), + ( + store.build_key("DynamoDBTest", "two"), + None, + ), ] store.save(delete) store._consume_save_queue() @@ -130,7 +167,10 @@ def test_delete_if_val_is_none(self, store, small_object, large_object): def test_save_more_than_4KB(self, store, small_object, large_object): key_value_pairs = [ - (store.build_key("DynamoDBTest", "two"), large_object,), + ( + store.build_key("DynamoDBTest", "two"), + large_object, + ), ] store.save(key_value_pairs) store._consume_save_queue() @@ -179,7 +219,8 @@ def test_delete_item(self, store, small_object, large_object): def test_retry_saving(self, store, small_object, large_object): with mock.patch( - "moto.dynamodb2.responses.DynamoHandler.transact_write_items", side_effect=KeyError("foo"), + "moto.dynamodb2.responses.DynamoHandler.transact_write_items", + side_effect=KeyError("foo"), ) as mock_failed_write: keys = [store.build_key("thing", i) for i in range(1)] value = pickle.loads(small_object) @@ -191,9 +232,24 @@ def test_retry_saving(self, store, small_object, large_object): def test_retry_reading(self, store, small_object, large_object): unprocessed_value = { - "Responses": {store.name: [{"index": {"N": "0"}, "key": {"S": "thing 0"},},],}, + "Responses": { + store.name: [ + { + "index": {"N": "0"}, + "key": {"S": "thing 0"}, + }, + ], + }, "UnprocessedKeys": { - store.name: {"ConsistentRead": True, "Keys": [{"index": {"N": "0"}, "key": {"S": "thing 0"},}],}, + store.name: { + "ConsistentRead": True, + "Keys": [ + { + "index": {"N": "0"}, + "key": {"S": "thing 0"}, + } + ], + }, }, "ResponseMetadata": {}, } @@ -201,7 +257,11 @@ def test_retry_reading(self, store, small_object, large_object): value = pickle.loads(small_object) pairs = zip(keys, (value for i in range(len(keys)))) store.save(pairs) - with mock.patch.object(store.client, "batch_get_item", return_value=unprocessed_value,) as mock_failed_read: + with mock.patch.object( + store.client, + "batch_get_item", + return_value=unprocessed_value, + ) as mock_failed_read: try: store.restore(keys) except Exception: diff --git a/tests/serialize/runstate/shelvestore_test.py b/tests/serialize/runstate/shelvestore_test.py index d59e99770..e7d9e818b 100644 --- a/tests/serialize/runstate/shelvestore_test.py +++ b/tests/serialize/runstate/shelvestore_test.py @@ -28,8 +28,18 @@ def test__init__(self): def test_save(self): key_value_pairs = [ - (ShelveKey("one", "two"), {"this": "data",},), - (ShelveKey("three", "four"), {"this": "data2",},), + ( + ShelveKey("one", "two"), + { + "this": "data", + }, + ), + ( + ShelveKey("three", "four"), + { + "this": "data2", + }, + ), ] self.store.save(key_value_pairs) self.store.cleanup() @@ -41,10 +51,23 @@ def test_save(self): def test_delete(self): key_value_pairs = [ - (ShelveKey("one", "two"), {"this": "data",},), - (ShelveKey("three", "four"), {"this": "data2",},), + ( + ShelveKey("one", "two"), + { + "this": "data", + }, + ), + ( + ShelveKey("three", "four"), + { + "this": "data2", + }, + ), # Delete first key - (ShelveKey("one", "two"), None,), + ( + ShelveKey("one", "two"), + None, + ), ] self.store.save(key_value_pairs) self.store.cleanup() diff --git a/tests/serialize/runstate/statemanager_test.py b/tests/serialize/runstate/statemanager_test.py index 3f3083de4..bbcc38212 100644 --- a/tests/serialize/runstate/statemanager_test.py +++ b/tests/serialize/runstate/statemanager_test.py @@ -29,7 +29,11 @@ def test_from_config_shelve(self): tmpdir = tempfile.mkdtemp() try: fname = os.path.join(tmpdir, "state") - config = schema.ConfigState(store_type="shelve", name=fname, buffer_size=0,) + config = schema.ConfigState( + store_type="shelve", + name=fname, + buffer_size=0, + ) manager = PersistenceManagerFactory.from_config(config) store = manager._impl assert_equal(store.filename, config.name) @@ -50,7 +54,9 @@ def test_validate_metadata_no_state_data(self): def test_validate_metadata_mismatch(self): metadata = {"version": (200, 1, 1)} assert_raises( - VersionMismatchError, StateMetadata.validate_metadata, metadata, + VersionMismatchError, + StateMetadata.validate_metadata, + metadata, ) @@ -98,15 +104,24 @@ def test_keys_for_items(self): def test_restore(self): job_names = ["one", "two"] with mock.patch.object( - self.manager, "_restore_metadata", autospec=True, + self.manager, + "_restore_metadata", + autospec=True, ) as mock_restore_metadata, mock.patch.object( - self.manager, "_restore_dicts", autospec=True, + self.manager, + "_restore_dicts", + autospec=True, ) as mock_restore_dicts, mock.patch.object( - self.manager, "_restore_runs_for_job", autospect=True, + self.manager, + "_restore_runs_for_job", + autospect=True, ) as mock_restore_runs: mock_restore_dicts.side_effect = [ # _restore_dicts for JOB_STATE - {"one": {"key": "val1"}, "two": {"key": "val2"},}, + { + "one": {"key": "val1"}, + "two": {"key": "val2"}, + }, # _restore_dicts for MESOS_STATE {"frameworks": "clusters"}, ] @@ -128,7 +143,11 @@ def test_restore(self): def test_restore_runs_for_job(self): job_state = {"run_nums": [2, 3], "enabled": True} - with mock.patch.object(self.manager, "_restore_dicts", autospec=True,) as mock_restore_dicts: + with mock.patch.object( + self.manager, + "_restore_dicts", + autospec=True, + ) as mock_restore_dicts: mock_restore_dicts.side_effect = [{"job_a.2": "two"}, {"job_a.3": "three"}] runs = self.manager._restore_runs_for_job("job_a", job_state) @@ -140,7 +159,11 @@ def test_restore_runs_for_job(self): def test_restore_runs_for_job_one_missing(self): job_state = {"run_nums": [2, 3], "enabled": True} - with mock.patch.object(self.manager, "_restore_dicts", autospec=True,) as mock_restore_dicts: + with mock.patch.object( + self.manager, + "_restore_dicts", + autospec=True, + ) as mock_restore_dicts: mock_restore_dicts.side_effect = [{}, {"job_a.3": "three"}] runs = self.manager._restore_runs_for_job("job_a", job_state) @@ -155,13 +178,21 @@ def test_restore_dicts(self): autospec_method(self.manager._keys_for_items) self.manager._keys_for_items.return_value = dict(enumerate(names)) self.store.restore.return_value = { - 0: {"state": "data",}, - 1: {"state": "2data",}, + 0: { + "state": "data", + }, + 1: { + "state": "2data", + }, } state_data = self.manager._restore_dicts("type", names) expected = { - names[0]: {"state": "data",}, - names[1]: {"state": "2data",}, + names[0]: { + "state": "data", + }, + names[1]: { + "state": "2data", + }, } assert_equal(expected, state_data) @@ -174,7 +205,11 @@ def test_save(self): def test_save_failed(self): self.store.save.side_effect = PersistenceStoreError("blah") assert_raises( - PersistenceStoreError, self.manager.save, None, None, None, + PersistenceStoreError, + self.manager.save, + None, + None, + None, ) def test_save_while_disabled(self): @@ -227,7 +262,8 @@ def test_update_from_config_no_change(self): assert not self.watcher.shutdown.mock_calls @mock.patch( - "tron.serialize.runstate.statemanager.PersistenceManagerFactory", autospec=True, + "tron.serialize.runstate.statemanager.PersistenceManagerFactory", + autospec=True, ) def test_update_from_config_changed(self, mock_factory): state_config = mock.Mock() @@ -236,7 +272,8 @@ def test_update_from_config_changed(self, mock_factory): assert_equal(self.watcher.config, state_config) self.watcher.shutdown.assert_called_with() assert_equal( - self.watcher.state_manager, mock_factory.from_config.return_value, + self.watcher.state_manager, + mock_factory.from_config.return_value, ) mock_factory.from_config.assert_called_with(state_config) @@ -244,17 +281,22 @@ def test_save_job(self): mock_job = mock.Mock() self.watcher.save_job(mock_job) self.watcher.state_manager.save.assert_called_with( - runstate.JOB_STATE, mock_job.name, mock_job.state_data, + runstate.JOB_STATE, + mock_job.name, + mock_job.state_data, ) @mock.patch( - "tron.serialize.runstate.statemanager.StateMetadata", autospec=None, + "tron.serialize.runstate.statemanager.StateMetadata", + autospec=None, ) def test_save_metadata(self, mock_state_metadata): self.watcher.save_metadata() meta_data = mock_state_metadata.return_value self.watcher.state_manager.save.assert_called_with( - runstate.MCP_STATE, meta_data.name, meta_data.state_data, + runstate.MCP_STATE, + meta_data.name, + meta_data.state_data, ) def test_shutdown(self): @@ -273,17 +315,21 @@ def test_restore(self): def test_handler_mesos_change(self): self.watcher.handler( - observable=MesosClusterRepository, event=None, + observable=MesosClusterRepository, + event=None, ) self.watcher.state_manager.save.assert_called_with( - runstate.MESOS_STATE, MesosClusterRepository.name, MesosClusterRepository.state_data, + runstate.MESOS_STATE, + MesosClusterRepository.name, + MesosClusterRepository.state_data, ) def test_handler_job_state_change(self): mock_job = mock.Mock(spec_set=Job) with mock.patch.object(self.watcher, "save_job") as mock_save_job: self.watcher.handler( - observable=mock_job, event=Job.NOTIFY_STATE_CHANGE, + observable=mock_job, + event=Job.NOTIFY_STATE_CHANGE, ) mock_save_job.assert_called_with(mock_job) @@ -291,18 +337,22 @@ def test_handler_job_new_run(self): mock_job = mock.Mock(spec_set=Job) mock_job_run = mock.Mock(spec_set=JobRun) with mock.patch.object(self.watcher, "save_job",) as mock_save_job, mock.patch.object( - self.watcher, "watch", + self.watcher, + "watch", ) as mock_watch: # Error: No job run in event data, do nothing self.watcher.handler( - observable=mock_job, event=Job.NOTIFY_NEW_RUN, + observable=mock_job, + event=Job.NOTIFY_NEW_RUN, ) assert mock_watch.call_count == 0 assert mock_save_job.call_count == 0 # Correct case self.watcher.handler( - observable=mock_job, event=Job.NOTIFY_NEW_RUN, event_data=mock_job_run, + observable=mock_job, + event=Job.NOTIFY_NEW_RUN, + event_data=mock_job_run, ) mock_watch.assert_called_with(mock_job_run) assert mock_save_job.call_count == 0 @@ -310,19 +360,24 @@ def test_handler_job_new_run(self): def test_handler_job_run_state_change(self): mock_job_run = mock.MagicMock(spec_set=JobRun) self.watcher.handler( - observable=mock_job_run, event=JobRun.NOTIFY_STATE_CHANGED, + observable=mock_job_run, + event=JobRun.NOTIFY_STATE_CHANGED, ) self.watcher.state_manager.save.assert_called_with( - runstate.JOB_RUN_STATE, mock_job_run.name, mock_job_run.state_data, + runstate.JOB_RUN_STATE, + mock_job_run.name, + mock_job_run.state_data, ) def test_handler_job_run_removed(self): mock_job_run = mock.MagicMock(spec_set=JobRun) self.watcher.handler( - observable=mock_job_run, event=JobRun.NOTIFY_REMOVED, + observable=mock_job_run, + event=JobRun.NOTIFY_REMOVED, ) self.watcher.state_manager.delete.assert_called_with( - runstate.JOB_RUN_STATE, mock_job_run.name, + runstate.JOB_RUN_STATE, + mock_job_run.name, ) diff --git a/tests/serialize/runstate/yamlstore_test.py b/tests/serialize/runstate/yamlstore_test.py index 307aaa18c..525b4b6a6 100644 --- a/tests/serialize/runstate/yamlstore_test.py +++ b/tests/serialize/runstate/yamlstore_test.py @@ -16,9 +16,15 @@ def setup_store(self): self.filename = os.path.join(tempfile.gettempdir(), "yaml_state") self.store = yamlstore.YamlStateStore(self.filename) self.test_data = { - "one": {"a": 1,}, - "two": {"b": 2,}, - "three": {"c": 3,}, + "one": { + "a": 1, + }, + "two": { + "b": 2, + }, + "three": { + "c": 3, + }, } @teardown diff --git a/tests/ssh_test.py b/tests/ssh_test.py index 5b95d5ce9..04e5fffe2 100644 --- a/tests/ssh_test.py +++ b/tests/ssh_test.py @@ -16,7 +16,11 @@ def setup_transport(self): self.username = "username" self.options = mock.Mock() self.expected_pub_key = mock.Mock() - self.transport = ssh.ClientTransport(self.username, self.options, self.expected_pub_key,) + self.transport = ssh.ClientTransport( + self.username, + self.options, + self.expected_pub_key, + ) def test_verifyHostKey_missing_pub_key(self): self.transport.expected_pub_key = None @@ -64,12 +68,14 @@ def test_from_config_both(self): def test__eq__true(self): config = mock.Mock(agent=True, identities=["one", "two"]) assert_equal( - ssh.SSHAuthOptions.from_config(config), ssh.SSHAuthOptions.from_config(config), + ssh.SSHAuthOptions.from_config(config), + ssh.SSHAuthOptions.from_config(config), ) def test__eq__false(self): config = mock.Mock(agent=True, identities=["one", "two"]) second_config = mock.Mock(agent=True, identities=["two"]) assert_not_equal( - ssh.SSHAuthOptions.from_config(config), ssh.SSHAuthOptions.from_config(second_config), + ssh.SSHAuthOptions.from_config(config), + ssh.SSHAuthOptions.from_config(second_config), ) diff --git a/tests/trond_test.py b/tests/trond_test.py index b54fcadf3..e3c2c18ae 100644 --- a/tests/trond_test.py +++ b/tests/trond_test.py @@ -66,7 +66,8 @@ def test_end_to_end_basic(self): client = self.sandbox.client assert_equal( - self.client.config("MASTER")["config"], SINGLE_ECHO_CONFIG, + self.client.config("MASTER")["config"], + SINGLE_ECHO_CONFIG, ) # reconfigure and confirm results @@ -94,23 +95,30 @@ def wait_on_cleanup(): sandbox.wait_on_sandbox(wait_on_cleanup) echo_action_run = client.action_runs(action_url) - another_action_url = client.get_url("MASTER.echo_job.1.another_echo_action",) + another_action_url = client.get_url( + "MASTER.echo_job.1.another_echo_action", + ) other_act_run = client.action_runs(another_action_url) assert_equal( - echo_action_run["state"], actionrun.ActionRun.SUCCEEDED, + echo_action_run["state"], + actionrun.ActionRun.SUCCEEDED, ) assert_equal(echo_action_run["stdout"], ["Echo!"]) assert_equal( - other_act_run["state"], actionrun.ActionRun.FAILED, + other_act_run["state"], + actionrun.ActionRun.FAILED, ) now = datetime.datetime.now() - stdout = now.strftime("Today is %Y-%m-%d, which is the same as %Y-%m-%d",) + stdout = now.strftime( + "Today is %Y-%m-%d, which is the same as %Y-%m-%d", + ) assert_equal(other_act_run["stdout"], [stdout]) job_runs_url = client.get_url("%s.1" % echo_job_name) assert_equal( - client.job_runs(job_runs_url)["state"], actionrun.ActionRun.FAILED, + client.job_runs(job_runs_url)["state"], + actionrun.ActionRun.FAILED, ) def test_node_reconfig(self): @@ -147,7 +155,9 @@ def test_node_reconfig(self): job_url = self.client.get_url("MASTER.a_job.0") sandbox.wait_on_state( - self.client.job_runs, job_url, actionrun.ActionRun.SUCCEEDED, + self.client.job_runs, + job_url, + actionrun.ActionRun.SUCCEEDED, ) self.sandbox.tronfig(second_config) @@ -184,17 +194,21 @@ def test_tronctl_with_job(self): cleanup_url = self.client.get_url("MASTER.echo_job.1.cleanup") sandbox.wait_on_state( - self.client.action_runs, cleanup_url, actionrun.ActionRun.SUCCEEDED, + self.client.action_runs, + cleanup_url, + actionrun.ActionRun.SUCCEEDED, ) action_run_url = self.client.get_url("MASTER.echo_job.1.echo_action") assert_equal( - self.client.action_runs(action_run_url)["state"], actionrun.ActionRun.SUCCEEDED, + self.client.action_runs(action_run_url)["state"], + actionrun.ActionRun.SUCCEEDED, ) job_run_url = self.client.get_url("MASTER.echo_job.1") assert_equal( - self.client.job_runs(job_run_url)["state"], actionrun.ActionRun.SUCCEEDED, + self.client.job_runs(job_run_url)["state"], + actionrun.ActionRun.SUCCEEDED, ) assert_equal(self.client.job(job_url)["status"], "enabled") @@ -239,14 +253,20 @@ def test_cleanup_on_failure(self): action_run_url = self.client.get_url("MASTER.failjob.0.failaction") sandbox.wait_on_state( - self.client.action_runs, action_run_url, actionrun.ActionRun.FAILED, + self.client.action_runs, + action_run_url, + actionrun.ActionRun.FAILED, ) action_run_url = self.client.get_url("MASTER.failjob.1.cleanup") sandbox.wait_on_state( - self.client.action_runs, action_run_url, actionrun.ActionRun.SUCCEEDED, + self.client.action_runs, + action_run_url, + actionrun.ActionRun.SUCCEEDED, ) - job_runs = self.client.job(self.client.get_url("MASTER.failjob"),)["runs"] + job_runs = self.client.job( + self.client.get_url("MASTER.failjob"), + )["runs"] assert_gt(len(job_runs), 1) def test_skip_failed_actions(self): @@ -266,7 +286,10 @@ def test_skip_failed_actions(self): ) self.start_with_config(config) action_run_url = self.client.get_url("MASTER.multi_step_job.0.broken") - waiter = sandbox.build_waiter_func(self.client.action_runs, action_run_url,) + waiter = sandbox.build_waiter_func( + self.client.action_runs, + action_run_url, + ) waiter(actionrun.ActionRun.FAILED) self.sandbox.tronctl("skip", "MASTER.multi_step_job.0.broken") @@ -274,12 +297,16 @@ def test_skip_failed_actions(self): action_run_url = self.client.get_url("MASTER.multi_step_job.0.works") sandbox.wait_on_state( - self.client.action_runs, action_run_url, actionrun.ActionRun.SUCCEEDED, + self.client.action_runs, + action_run_url, + actionrun.ActionRun.SUCCEEDED, ) job_run_url = self.client.get_url("MASTER.multi_step_job.0") sandbox.wait_on_state( - self.client.job_runs, job_run_url, actionrun.ActionRun.SUCCEEDED, + self.client.job_runs, + job_run_url, + actionrun.ActionRun.SUCCEEDED, ) def test_failure_on_multi_step_job_doesnt_wedge_tron(self): @@ -370,7 +397,9 @@ def wait_on_job_schedule(): sandbox.wait_on_sandbox(wait_on_job_schedule) sandbox.wait_on_state( - self.client.job, job_run_url, actionrun.ActionRun.CANCELLED, + self.client.job, + job_run_url, + actionrun.ActionRun.CANCELLED, ) action_run_states = [action_run["state"] for action_run in self.client.job_runs(job_run_url)["runs"]] @@ -393,18 +422,23 @@ def test_trond_restart_job_with_run_history(self): action_run_url = self.client.get_url("MASTER.fast_job.0.single_act") sandbox.wait_on_state( - self.client.action_runs, action_run_url, actionrun.ActionRun.RUNNING, + self.client.action_runs, + action_run_url, + actionrun.ActionRun.RUNNING, ) self.restart_trond() assert_equal( - self.client.job_runs(action_run_url)["state"], actionrun.ActionRun.UNKNOWN, + self.client.job_runs(action_run_url)["state"], + actionrun.ActionRun.UNKNOWN, ) next_run_url = self.client.get_url("MASTER.fast_job.-1.single_act") sandbox.wait_on_state( - self.client.action_runs, next_run_url, actionrun.ActionRun.RUNNING, + self.client.action_runs, + next_run_url, + actionrun.ActionRun.RUNNING, ) def test_trond_restart_job_running_with_dependencies(self): @@ -431,17 +465,23 @@ def test_trond_restart_job_running_with_dependencies(self): action_run_url = self.client.get_url("MASTER.complex_job.1.first_act") sandbox.wait_on_state( - self.client.action_runs, action_run_url, actionrun.ActionRun.RUNNING, + self.client.action_runs, + action_run_url, + actionrun.ActionRun.RUNNING, ) self.restart_trond() assert_equal( - self.client.job_runs(action_run_url)["state"], actionrun.ActionRun.UNKNOWN, + self.client.job_runs(action_run_url)["state"], + actionrun.ActionRun.UNKNOWN, ) for followup_action_run in ("following_act", "last_act"): - url = self.client.get_url(f"{job_name}.1.{followup_action_run}",) + url = self.client.get_url( + f"{job_name}.1.{followup_action_run}", + ) assert_equal( - self.client.action_runs(url)["state"], actionrun.ActionRun.QUEUED, + self.client.action_runs(url)["state"], + actionrun.ActionRun.QUEUED, ) diff --git a/tests/trondaemon_test.py b/tests/trondaemon_test.py index bd0056dda..80097faaa 100644 --- a/tests/trondaemon_test.py +++ b/tests/trondaemon_test.py @@ -29,13 +29,18 @@ def test_init(self): daemon = TronDaemon.__new__(TronDaemon) # skip __init__ options = mock.Mock() - with mock.patch("tron.utils.flock", autospec=True,) as mock_flock: + with mock.patch( + "tron.utils.flock", + autospec=True, + ) as mock_flock: daemon.__init__(options) assert mock_flock.call_count == 0 def test_run_uses_context(self): with mock.patch("tron.trondaemon.setup_logging", mock.Mock(), autospec=None,), mock.patch( - "tron.trondaemon.no_daemon_context", mock.Mock(), autospec=None, + "tron.trondaemon.no_daemon_context", + mock.Mock(), + autospec=None, ) as ndc: ndc.return_value = mock.MagicMock() ndc.return_value.__enter__.side_effect = RuntimeError() @@ -50,7 +55,10 @@ def test_run_manhole_new_manhole(self): with open(self.trond.manhole_sock, "w+"): pass - with mock.patch("twisted.internet.reactor.listenUNIX", autospec=True,) as mock_listenUNIX: + with mock.patch( + "twisted.internet.reactor.listenUNIX", + autospec=True, + ) as mock_listenUNIX: self.trond._run_manhole() assert mock_listenUNIX.call_count == 1 diff --git a/tests/utils/collections_test.py b/tests/utils/collections_test.py index b7d9e4ce7..564c6758b 100644 --- a/tests/utils/collections_test.py +++ b/tests/utils/collections_test.py @@ -68,5 +68,6 @@ def test_replace(self): item = mock.Mock() self.collection.replace(item) self.collection.add.assert_called_with( - item, self.collection.remove_item, + item, + self.collection.remove_item, ) diff --git a/tests/utils/proxy_test.py b/tests/utils/proxy_test.py index d5858f0a4..aaf7244ff 100644 --- a/tests/utils/proxy_test.py +++ b/tests/utils/proxy_test.py @@ -39,7 +39,11 @@ def setup_proxy(self): self.target_list = [DummyTarget(1), DummyTarget(2), DummyTarget(0)] self.proxy = CollectionProxy( lambda: self.target_list, - [("foo", any, True), ("not_foo", all, False), ("equals", lambda a: list(a), True),], + [ + ("foo", any, True), + ("not_foo", all, False), + ("equals", lambda a: list(a), True), + ], ) self.dummy = DummyObject(self.proxy) @@ -58,7 +62,8 @@ def test_perform_with_params(self): assert_equal(self.proxy.perform("equals")(2), [False, True, False]) sometimes = ["sometimes"] * 3 assert_equal( - self.proxy.perform("equals")(3, sometimes=True), sometimes, + self.proxy.perform("equals")(3, sometimes=True), + sometimes, ) diff --git a/tests/utils/scribereader_test.py b/tests/utils/scribereader_test.py index 0e89a0943..65e0f4863 100644 --- a/tests/utils/scribereader_test.py +++ b/tests/utils/scribereader_test.py @@ -18,19 +18,27 @@ def test_read_log_stream_for_action_run_min_date_and_max_date_today(): min_date = datetime.datetime.now() max_date = datetime.datetime.now() + datetime.timedelta(hours=1) with mock.patch( - "tron.utils.scribereader.get_scribereader_host_and_port", autospec=True, return_value=("host", 1234), + "tron.utils.scribereader.get_scribereader_host_and_port", + autospec=True, + return_value=("host", 1234), ), mock.patch( - "tron.utils.scribereader.scribereader.get_stream_reader", autospec=True, + "tron.utils.scribereader.scribereader.get_stream_reader", + autospec=True, ) as mock_stream_reader, mock.patch( - "tron.utils.scribereader.scribereader.get_stream_tailer", autospec=True, + "tron.utils.scribereader.scribereader.get_stream_tailer", + autospec=True, ) as mock_stream_tailer, mock.patch( - "tron.utils.scribereader.get_superregion", autospec=True, return_value="fake", + "tron.utils.scribereader.get_superregion", + autospec=True, + return_value="fake", ), mock.patch( - "tron.config.static_config.build_configuration_watcher", autospec=True, + "tron.config.static_config.build_configuration_watcher", + autospec=True, ), mock.patch( "staticconf.read", autospec=True, return_value=1000 ), mock.patch( - "tron.config.static_config.load_yaml_file", autospec=True, + "tron.config.static_config.load_yaml_file", + autospec=True, ): # in this case, we shouldn't even try to check the reader, so lets set an exception # to make sure we didn't try @@ -71,7 +79,10 @@ def test_read_log_stream_for_action_run_min_date_and_max_date_today(): mock_stream_reader.assert_not_called() mock_stream_tailer.assert_called_once_with( - stream_name="stream_paasta_app_output_namespace_job__action", lines=-1, tailing_host="host", tailing_port=1234, + stream_name="stream_paasta_app_output_namespace_job__action", + lines=-1, + tailing_host="host", + tailing_port=1234, ) assert output == ["line 1", "line 2"] @@ -83,19 +94,27 @@ def test_read_log_stream_for_action_run_min_date_and_max_date_different_days(): min_date = datetime.datetime.now() - datetime.timedelta(days=5) max_date = datetime.datetime.now() with mock.patch( - "tron.utils.scribereader.get_scribereader_host_and_port", autospec=True, return_value=("host", 1234), + "tron.utils.scribereader.get_scribereader_host_and_port", + autospec=True, + return_value=("host", 1234), ), mock.patch( - "tron.utils.scribereader.scribereader.get_stream_reader", autospec=True, + "tron.utils.scribereader.scribereader.get_stream_reader", + autospec=True, ) as mock_stream_reader, mock.patch( - "tron.utils.scribereader.scribereader.get_stream_tailer", autospec=True, + "tron.utils.scribereader.scribereader.get_stream_tailer", + autospec=True, ) as mock_stream_tailer, mock.patch( - "tron.utils.scribereader.get_superregion", autospec=True, return_value="fake", + "tron.utils.scribereader.get_superregion", + autospec=True, + return_value="fake", ), mock.patch( - "tron.config.static_config.build_configuration_watcher", autospec=True, + "tron.config.static_config.build_configuration_watcher", + autospec=True, ), mock.patch( "staticconf.read", autospec=True, return_value=1000 ), mock.patch( - "tron.config.static_config.load_yaml_file", autospec=True, + "tron.config.static_config.load_yaml_file", + autospec=True, ): # we should check the reader for data from a previous day mock_stream_reader.return_value.__enter__.return_value = iter( @@ -158,7 +177,10 @@ def test_read_log_stream_for_action_run_min_date_and_max_date_different_days(): reader_port=1234, ) mock_stream_tailer.assert_called_once_with( - stream_name="stream_paasta_app_output_namespace_job__action", lines=-1, tailing_host="host", tailing_port=1234, + stream_name="stream_paasta_app_output_namespace_job__action", + lines=-1, + tailing_host="host", + tailing_port=1234, ) assert output == ["line 0", "line 1", "line 2"] @@ -170,19 +192,27 @@ def test_read_log_stream_for_action_run_min_date_and_max_date_in_past(): min_date = datetime.datetime.now() - datetime.timedelta(days=5) max_date = datetime.datetime.now() - datetime.timedelta(days=4) with mock.patch( - "tron.utils.scribereader.get_scribereader_host_and_port", autospec=True, return_value=("host", 1234), + "tron.utils.scribereader.get_scribereader_host_and_port", + autospec=True, + return_value=("host", 1234), ), mock.patch( - "tron.utils.scribereader.scribereader.get_stream_reader", autospec=True, + "tron.utils.scribereader.scribereader.get_stream_reader", + autospec=True, ) as mock_stream_reader, mock.patch( - "tron.utils.scribereader.scribereader.get_stream_tailer", autospec=True, + "tron.utils.scribereader.scribereader.get_stream_tailer", + autospec=True, ) as mock_stream_tailer, mock.patch( - "tron.utils.scribereader.get_superregion", autospec=True, return_value="fake", + "tron.utils.scribereader.get_superregion", + autospec=True, + return_value="fake", ), mock.patch( - "tron.config.static_config.build_configuration_watcher", autospec=True, + "tron.config.static_config.build_configuration_watcher", + autospec=True, ), mock.patch( "staticconf.read", autospec=True, return_value=1000 ), mock.patch( - "tron.config.static_config.load_yaml_file", autospec=True, + "tron.config.static_config.load_yaml_file", + autospec=True, ): # all the data we want is from the past, so we should only check the reader mock_stream_reader.return_value.__enter__.return_value = iter( @@ -228,19 +258,27 @@ def test_read_log_stream_for_action_run_min_date_and_max_date_for_long_output(): # in tron.yaml in srv-configs max_lines = 1000 with mock.patch( - "tron.utils.scribereader.get_scribereader_host_and_port", autospec=True, return_value=("host", 1234), + "tron.utils.scribereader.get_scribereader_host_and_port", + autospec=True, + return_value=("host", 1234), ), mock.patch( - "tron.utils.scribereader.scribereader.get_stream_reader", autospec=True, + "tron.utils.scribereader.scribereader.get_stream_reader", + autospec=True, ) as mock_stream_reader, mock.patch( - "tron.utils.scribereader.scribereader.get_stream_tailer", autospec=True, + "tron.utils.scribereader.scribereader.get_stream_tailer", + autospec=True, ) as mock_stream_tailer, mock.patch( - "tron.utils.scribereader.get_superregion", autospec=True, return_value="fake", + "tron.utils.scribereader.get_superregion", + autospec=True, + return_value="fake", ), mock.patch( - "tron.config.static_config.build_configuration_watcher", autospec=True, + "tron.config.static_config.build_configuration_watcher", + autospec=True, ), mock.patch( "staticconf.read", autospec=True, return_value=1000 ), mock.patch( - "tron.config.static_config.load_yaml_file", autospec=True, + "tron.config.static_config.load_yaml_file", + autospec=True, ): with open("./tests/utils/shortOutputTest.txt") as f: diff --git a/tests/utils/state_test.py b/tests/utils/state_test.py index 6d618c6c0..42a521804 100644 --- a/tests/utils/state_test.py +++ b/tests/utils/state_test.py @@ -39,7 +39,9 @@ def build_machine(self): # If they are listening, we should talk # If they are ignoring us we should get angry self.machine = state.Machine( - "listening", listening=dict(listening="talking"), talking=dict(ignoring="angry", talking="listening"), + "listening", + listening=dict(listening="talking"), + talking=dict(ignoring="angry", talking="listening"), ) def test_transition_many(self): diff --git a/tests/utils/timeutils_test.py b/tests/utils/timeutils_test.py index f5ade8d08..03d823f11 100644 --- a/tests/utils/timeutils_test.py +++ b/tests/utils/timeutils_test.py @@ -24,75 +24,124 @@ def make_dates(self): def check_delta(self, start, target, years=0, months=0, days=0): assert_equal( - start + macro_timedelta(start, years=years, months=months, days=days,), target, + start + + macro_timedelta( + start, + years=years, + months=months, + days=days, + ), + target, ) def test_days(self): self.check_delta( - self.start_nonleap, datetime.datetime(year=2011, month=1, day=11), days=10, + self.start_nonleap, + datetime.datetime(year=2011, month=1, day=11), + days=10, ) self.check_delta( - self.end_nonleap, datetime.datetime(year=2012, month=1, day=10), days=10, + self.end_nonleap, + datetime.datetime(year=2012, month=1, day=10), + days=10, ) self.check_delta( - self.start_leap, datetime.datetime(year=2012, month=1, day=11), days=10, + self.start_leap, + datetime.datetime(year=2012, month=1, day=11), + days=10, ) self.check_delta( - self.end_leap, datetime.datetime(year=2013, month=1, day=10), days=10, + self.end_leap, + datetime.datetime(year=2013, month=1, day=10), + days=10, ) self.check_delta( - self.begin_feb_nonleap, datetime.datetime(year=2011, month=3, day=1), days=28, + self.begin_feb_nonleap, + datetime.datetime(year=2011, month=3, day=1), + days=28, ) self.check_delta( - self.begin_feb_leap, datetime.datetime(year=2012, month=3, day=1), days=29, + self.begin_feb_leap, + datetime.datetime(year=2012, month=3, day=1), + days=29, ) def test_months(self): self.check_delta( - self.start_nonleap, datetime.datetime(year=2011, month=11, day=1), months=10, + self.start_nonleap, + datetime.datetime(year=2011, month=11, day=1), + months=10, ) self.check_delta( - self.end_nonleap, datetime.datetime(year=2012, month=10, day=31), months=10, + self.end_nonleap, + datetime.datetime(year=2012, month=10, day=31), + months=10, ) self.check_delta( - self.start_leap, datetime.datetime(year=2012, month=11, day=1), months=10, + self.start_leap, + datetime.datetime(year=2012, month=11, day=1), + months=10, ) self.check_delta( - self.end_leap, datetime.datetime(year=2013, month=10, day=31), months=10, + self.end_leap, + datetime.datetime(year=2013, month=10, day=31), + months=10, ) self.check_delta( - self.begin_feb_nonleap, datetime.datetime(year=2011, month=12, day=1), months=10, + self.begin_feb_nonleap, + datetime.datetime(year=2011, month=12, day=1), + months=10, ) self.check_delta( - self.begin_feb_leap, datetime.datetime(year=2012, month=12, day=1), months=10, + self.begin_feb_leap, + datetime.datetime(year=2012, month=12, day=1), + months=10, ) def test_years(self): self.check_delta( - self.start_nonleap, datetime.datetime(year=2015, month=1, day=1), years=4, + self.start_nonleap, + datetime.datetime(year=2015, month=1, day=1), + years=4, ) self.check_delta( - self.end_nonleap, datetime.datetime(year=2015, month=12, day=31), years=4, + self.end_nonleap, + datetime.datetime(year=2015, month=12, day=31), + years=4, ) self.check_delta( - self.start_leap, datetime.datetime(year=2016, month=1, day=1), years=4, + self.start_leap, + datetime.datetime(year=2016, month=1, day=1), + years=4, ) self.check_delta( - self.end_leap, datetime.datetime(year=2016, month=12, day=31), years=4, + self.end_leap, + datetime.datetime(year=2016, month=12, day=31), + years=4, ) self.check_delta( - self.begin_feb_nonleap, datetime.datetime(year=2015, month=2, day=1), years=4, + self.begin_feb_nonleap, + datetime.datetime(year=2015, month=2, day=1), + years=4, ) self.check_delta( - self.begin_feb_leap, datetime.datetime(year=2016, month=2, day=1), years=4, + self.begin_feb_leap, + datetime.datetime(year=2016, month=2, day=1), + years=4, ) def test_start_date_with_timezone(self): pacific_tz = pytz.timezone("US/Pacific") - start_date = pacific_tz.localize(datetime.datetime(year=2018, month=1, day=3, hour=13),) - expected_end = pacific_tz.localize(datetime.datetime(year=2018, month=1, day=1, hour=13),) + start_date = pacific_tz.localize( + datetime.datetime(year=2018, month=1, day=3, hour=13), + ) + expected_end = pacific_tz.localize( + datetime.datetime(year=2018, month=1, day=1, hour=13), + ) self.check_delta( - start_date, expected_end, days=-2, + start_date, + expected_end, + days=-2, ) @@ -104,7 +153,8 @@ def setup_times(self): def test_duration(self): assert_equal( - duration(self.earliest, self.latest), datetime.timedelta(0, 60 * 20), + duration(self.earliest, self.latest), + datetime.timedelta(0, 60 * 20), ) def test_duration_no_end(self): diff --git a/tests/utils/trontimespec_test.py b/tests/utils/trontimespec_test.py index a89b82440..2ae2ad94d 100644 --- a/tests/utils/trontimespec_test.py +++ b/tests/utils/trontimespec_test.py @@ -31,7 +31,9 @@ def test_get_match_months(self): self._cmp((2012, 12, 22), (2013, 1, 1)) def test_get_match_monthdays(self): - self.time_spec = trontimespec.TimeSpecification(monthdays=[10, 3, 3, 10],) + self.time_spec = trontimespec.TimeSpecification( + monthdays=[10, 3, 3, 10], + ) self._cmp((2012, 3, 14), (2012, 4, 3)) self._cmp((2012, 3, 1), (2012, 3, 3)) @@ -68,7 +70,10 @@ def test_next_day_weekdays(self): assert_equal(list(gen), [2, 5, 9, 12, 16, 19, 23, 26, 30]) def test_next_day_weekdays_with_ordinals(self): - time_spec = trontimespec.TimeSpecification(weekdays=[1, 5], ordinals=[1, 3],) + time_spec = trontimespec.TimeSpecification( + weekdays=[1, 5], + ordinals=[1, 3], + ) gen = time_spec.next_day(14, 2012, 3) assert_equal(list(gen), [16, 19]) @@ -98,7 +103,10 @@ def test_next_time_hours(self): assert_equal(time, datetime.time(4, 0)) def test_next_time_minutes(self): - time_spec = trontimespec.TimeSpecification(minutes=[30, 20, 30], seconds=[0],) + time_spec = trontimespec.TimeSpecification( + minutes=[30, 20, 30], + seconds=[0], + ) start_date = datetime.datetime(2012, 3, 14, 0, 25) time = time_spec.next_time(start_date, True) assert_equal(time, datetime.time(0, 30)) @@ -109,7 +117,11 @@ def test_next_time_minutes(self): assert_equal(time, datetime.time(0, 20)) def test_next_time_hours_and_minutes_and_seconds(self): - time_spec = trontimespec.TimeSpecification(minutes=[20, 30], hours=[1, 5], seconds=[4, 5],) + time_spec = trontimespec.TimeSpecification( + minutes=[20, 30], + hours=[1, 5], + seconds=[4, 5], + ) start_date = datetime.datetime(2012, 3, 14, 1, 25) time = time_spec.next_time(start_date, True) assert_equal(time, datetime.time(1, 30, 4)) @@ -122,7 +134,10 @@ def test_next_time_hours_and_minutes_and_seconds(self): def test_get_match_dst_spring_forward(self): tz = pytz.timezone("US/Pacific") time_spec = trontimespec.TimeSpecification( - hours=[0, 1, 2, 3, 4], minutes=[0], seconds=[0], timezone="US/Pacific", + hours=[0, 1, 2, 3, 4], + minutes=[0], + seconds=[0], + timezone="US/Pacific", ) start = trontimespec.naive_as_timezone(datetime.datetime(2020, 3, 8, 1), tz) # Springing forward, the next hour after 1AM should be 3AM @@ -132,7 +147,10 @@ def test_get_match_dst_spring_forward(self): def test_get_match_dst_fall_back(self): tz = pytz.timezone("US/Pacific") time_spec = trontimespec.TimeSpecification( - hours=[0, 1, 2, 3, 4], minutes=[0], seconds=[0], timezone="US/Pacific", + hours=[0, 1, 2, 3, 4], + minutes=[0], + seconds=[0], + timezone="US/Pacific", ) start = trontimespec.naive_as_timezone(datetime.datetime(2020, 11, 1, 1), tz) # Falling back, the next hour after 1AM is 1AM again. But we only run on the first 1AM diff --git a/tools/action_dag_diagram.py b/tools/action_dag_diagram.py index 529e3918e..30fe0e90f 100644 --- a/tools/action_dag_diagram.py +++ b/tools/action_dag_diagram.py @@ -18,10 +18,14 @@ def parse_args(): parser = optparse.OptionParser() parser.add_option("-c", "--config", help="Tron configuration path.") parser.add_option( - "-n", "--name", help="Job name to graph. Also used as output filename.", + "-n", + "--name", + help="Job name to graph. Also used as output filename.", ) parser.add_option( - "--namespace", default=schema.MASTER_NAMESPACE, help="Configuration namespace which contains the job.", + "--namespace", + default=schema.MASTER_NAMESPACE, + help="Configuration namespace which contains the job.", ) opts, _ = parser.parse_args() diff --git a/tools/inspect_serialized_state.py b/tools/inspect_serialized_state.py index 1512d89a3..991a20169 100644 --- a/tools/inspect_serialized_state.py +++ b/tools/inspect_serialized_state.py @@ -18,7 +18,10 @@ def parse_options(): parser = optparse.OptionParser() parser.add_option("-c", "--config-path", help="Path to the configuration.") parser.add_option( - "-w", "--working-dir", default=".", help="Working directory to resolve relative paths.", + "-w", + "--working-dir", + default=".", + help="Working directory to resolve relative paths.", ) opts, _ = parser.parse_args() diff --git a/tools/migration/migrate_config_0.2_to_0.3.py b/tools/migration/migrate_config_0.2_to_0.3.py index 203ed3377..09b276563 100644 --- a/tools/migration/migrate_config_0.2_to_0.3.py +++ b/tools/migration/migrate_config_0.2_to_0.3.py @@ -109,12 +109,20 @@ def update_references(content): def key_length_func(kv): return len(kv[0]) - anchors_by_length = sorted(anchor_mapping.items(), key=key_length_func, reverse=True,) + anchors_by_length = sorted( + anchor_mapping.items(), + key=key_length_func, + reverse=True, + ) for anchor_name, string_name in anchors_by_length: # Remove the anchors content = re.sub(r"\s*&%s ?" % anchor_name, "", content) # Update the reference to use the string identifier - content = re.sub(r"\*%s\b" % anchor_name, '"%s"' % string_name, content,) + content = re.sub( + r"\*%s\b" % anchor_name, + '"%s"' % string_name, + content, + ) return content diff --git a/tools/migration/migrate_config_0.5.1_to_0.5.2.py b/tools/migration/migrate_config_0.5.1_to_0.5.2.py index e851a3672..1519dbb77 100644 --- a/tools/migration/migrate_config_0.5.1_to_0.5.2.py +++ b/tools/migration/migrate_config_0.5.1_to_0.5.2.py @@ -17,7 +17,9 @@ def parse_options(): parser = optparse.OptionParser() parser.add_option("-s", "--source", help="Path to old configuration file.") parser.add_option( - "-d", "--dest", help="Path to new configuration directory.", + "-d", + "--dest", + help="Path to new configuration directory.", ) opts, _ = parser.parse_args() diff --git a/tools/migration/migrate_state.py b/tools/migration/migrate_state.py index e44ad39b0..5a0f7c114 100644 --- a/tools/migration/migrate_state.py +++ b/tools/migration/migrate_state.py @@ -38,13 +38,17 @@ def parse_options(): "state_persistence section configured for the state file/database.", ) parser.add_option( - "--source-working-dir", help="The working directory for source dir to resolve relative paths.", + "--source-working-dir", + help="The working directory for source dir to resolve relative paths.", ) parser.add_option( - "--dest-working-dir", help="The working directory for dest dir to resolve relative paths.", + "--dest-working-dir", + help="The working directory for dest dir to resolve relative paths.", ) parser.add_option( - "--namespace", action="store_true", help="Move jobs which are missing a namespace to the MASTER", + "--namespace", + action="store_true", + help="Move jobs which are missing a namespace to the MASTER", ) opts, args = parser.parse_args() @@ -58,8 +62,7 @@ def parse_options(): def get_state_manager_from_config(config_path, working_dir): - """Return a state manager from the configuration. - """ + """Return a state manager from the configuration.""" config_manager = manager.ConfigManager(config_path) config_container = config_manager.load() state_config = config_container.get_master().state_persistence @@ -81,8 +84,14 @@ def strip_namespace(names): def convert_state(opts): - source_manager = get_state_manager_from_config(opts.source, opts.source_working_dir,) - dest_manager = get_state_manager_from_config(opts.dest, opts.dest_working_dir,) + source_manager = get_state_manager_from_config( + opts.source, + opts.source_working_dir, + ) + dest_manager = get_state_manager_from_config( + opts.dest, + opts.dest_working_dir, + ) container = get_current_config(opts.source) msg = "Migrating state from %s to %s" @@ -92,7 +101,10 @@ def convert_state(opts): if opts.namespace: job_names = strip_namespace(job_names) - job_states = source_manager.restore(job_names, skip_validation=True,) + job_states = source_manager.restore( + job_names, + skip_validation=True, + ) source_manager.cleanup() if opts.namespace: diff --git a/tools/migration/migrate_state_1.3.15_to_1.4.0.py b/tools/migration/migrate_state_1.3.15_to_1.4.0.py index ede052b1b..68a051b64 100644 --- a/tools/migration/migrate_state_1.3.15_to_1.4.0.py +++ b/tools/migration/migrate_state_1.3.15_to_1.4.0.py @@ -10,13 +10,20 @@ def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( - "--back", help="Flag to migrate back from new state back to old state", action="store_true", default=False, + "--back", + help="Flag to migrate back from new state back to old state", + action="store_true", + default=False, ) parser.add_argument( - "--working-dir", help="Working directory for the Tron daemon", required=True, + "--working-dir", + help="Working directory for the Tron daemon", + required=True, ) parser.add_argument( - "--config-path", help="Path in working dir with configs", required=True, + "--config-path", + help="Path in working dir with configs", + required=True, ) return parser.parse_args() diff --git a/tox.ini b/tox.ini index 9906c8f6e..96479c51e 100644 --- a/tox.ini +++ b/tox.ini @@ -1,14 +1,13 @@ [tox] -envlist = py36 -tox_pip_extensions_ext_venv_update = true +envlist = py38 [testenv] -basepython = python3.6 +basepython = python3.8 deps = --requirement={toxinidir}/requirements.txt --requirement={toxinidir}/requirements-dev.txt usedevelop = true -passenv = USER +passenv = USER PIP_INDEX_URL setenv = YARN_REGISTRY = {env:NPM_CONFIG_REGISTRY:https://registry.npmjs.org/} whitelist_externals= @@ -55,37 +54,5 @@ commands= commands = make deb_bionic make _itest_bionic - -[testenv:cluster_itests] -changedir=cluster_itests/ -passenv = DOCKER_TLS_VERIFY DOCKER_HOST DOCKER_CERT_PATH -whitelist_externals = - /bin/bash -deps = - docker-compose -commands = - docker-compose down - docker-compose build - docker-compose up -d mesosmaster mesosslave tronmaster - bash -c "docker-compose run --rm tronmaster tox -i {env:PIP_INDEX_URL:https://pypi.python.org/simple} -e tron_itests_inside_container -- --no-capture {posargs} || (docker-compose logs && exit 1)" - docker-compose stop - docker-compose rm --force - -[testenv:tron_itests_inside_container] -basepython = python3.6 -changedir=cluster_itests/ -deps = - {[testenv]deps} - behave==1.2.5 -whitelist_externals = - /bin/mkdir -commands = - # TODO: upgrade behave if they ever take this reasonable PR - pip install git+https://github.com/Yelp/behave@1.2.5-issue_533-fork - behave {posargs} - -[testenv:trond_inside_container] -basepython = python3.6 -deps = {[testenv]deps} -commands = - trond --debug -c /work/cluster_itests/config/ -l /work/example-cluster/logging.conf -H 0.0.0.0 + make deb_jammy + make _itest_jammy diff --git a/tron/__init__.py b/tron/__init__.py index 882f03f4b..aac1fb921 100644 --- a/tron/__init__.py +++ b/tron/__init__.py @@ -1,4 +1,4 @@ -__version_info__ = (1, 27, 5) +__version_info__ = (1, 28, 3) __version__ = ".".join("%s" % v for v in __version_info__) __author__ = "Yelp " __credits__ = [ diff --git a/tron/actioncommand.py b/tron/actioncommand.py index 1cca9d0d3..f34e07076 100644 --- a/tron/actioncommand.py +++ b/tron/actioncommand.py @@ -32,9 +32,16 @@ class ActionCommand(Observable): STATE_MACHINE = Machine( PENDING, **{ - PENDING: {"start": RUNNING, "exit": FAILSTART,}, - RUNNING: {"exit": EXITING,}, - EXITING: {"close": COMPLETE,}, + PENDING: { + "start": RUNNING, + "exit": FAILSTART, + }, + RUNNING: { + "exit": EXITING, + }, + EXITING: { + "close": COMPLETE, + }, }, ) @@ -113,7 +120,10 @@ def is_complete(self): @property def is_done(self): """Done implies no more work will be done, but might not be success.""" - return self.machine.state in (ActionCommand.COMPLETE, ActionCommand.FAILSTART,) + return self.machine.state in ( + ActionCommand.COMPLETE, + ActionCommand.FAILSTART, + ) def __repr__(self): return f"ActionCommand {self.id} {self.command}: {self.state}" diff --git a/tron/api/adapter.py b/tron/api/adapter.py index 836cd129a..fba84c968 100644 --- a/tron/api/adapter.py +++ b/tron/api/adapter.py @@ -113,7 +113,13 @@ class ActionRunAdapter(RunAdapter): ] def __init__( - self, action_run, job_run=None, max_lines=10, include_stdout=False, include_stderr=False, include_meta=False, + self, + action_run, + job_run=None, + max_lines=10, + include_stdout=False, + include_stderr=False, + include_meta=False, ): super().__init__(action_run) self.job_run = job_run @@ -149,7 +155,12 @@ def _get_alternate_output_paths(self): formatted_alt_path = os.path.join( # This ugliness is getting the "root output directory" self._obj.context.next.next.base.job.output_path.base, - alt_path.format(namespace=namespace, jobname=jobname, run_num=run_num, action=action,), + alt_path.format( + namespace=namespace, + jobname=jobname, + run_num=run_num, + action=action, + ), ) if os.path.exists(formatted_alt_path): yield formatted_alt_path @@ -351,7 +362,10 @@ class JobRunAdapter(RunAdapter): ] def __init__( - self, job_run, include_action_runs=False, include_action_graph=False, + self, + job_run, + include_action_runs=False, + include_action_graph=False, ): super().__init__(job_run) self.include_action_runs = include_action_runs @@ -433,7 +447,11 @@ def get_url(self): @toggle_flag("include_job_runs") def get_runs(self): - runs = adapt_many(JobRunAdapter, list(self._obj.runs)[: self.num_runs or None], self.include_action_runs,) + runs = adapt_many( + JobRunAdapter, + list(self._obj.runs)[: self.num_runs or None], + self.include_action_runs, + ) return runs def get_max_runtime(self): diff --git a/tron/api/async_resource.py b/tron/api/async_resource.py index e9e962231..a65cbea79 100644 --- a/tron/api/async_resource.py +++ b/tron/api/async_resource.py @@ -38,7 +38,12 @@ def process(fn, resource, request): @staticmethod def bounded(fn): def wrapper(resource, request): - d = threads.deferToThread(AsyncResource.process, fn, resource, request,) + d = threads.deferToThread( + AsyncResource.process, + fn, + resource, + request, + ) d.addCallback(AsyncResource.finish, request, resource) d.addErrback(request.processingFailed) return server.NOT_DONE_YET diff --git a/tron/api/controller.py b/tron/api/controller.py index ec48704c4..add637333 100644 --- a/tron/api/controller.py +++ b/tron/api/controller.py @@ -84,7 +84,9 @@ def handle_command(self, command, **kwargs): return msg % (self.action_run, self.action_run.state) raise InvalidCommandForActionState( - command=command, action_name=self.action_run.name, action_state=self.action_run.state, + command=command, + action_name=self.action_run.name, + action_state=self.action_run.state, ) def handle_termination(self, command): diff --git a/tron/api/resource.py b/tron/api/resource.py index 6d3c3e300..fd6574dd4 100644 --- a/tron/api/resource.py +++ b/tron/api/resource.py @@ -42,7 +42,7 @@ def default(self, o): if isinstance(o, datetime.date): return o.isoformat() - if isinstance(o, collections.KeysView): + if isinstance(o, collections.abc.KeysView): return list(o) return super().default(o) @@ -61,7 +61,14 @@ def respond(request, response, code=None, headers=None): for key, val in (headers or {}).items(): request.setHeader(str(key), str(val)) - result = json.dumps(response, cls=JSONEncoder,) if response else "" + result = ( + json.dumps( + response, + cls=JSONEncoder, + ) + if response + else "" + ) if type(result) is not bytes: result = result.encode("utf8") @@ -79,10 +86,18 @@ def handle_command(request, api_controller, obj, **kwargs): except controller.UnknownCommandError: error_msg = f"Unknown command '{command}' for '{obj}'" log.warning(error_msg) - return respond(request=request, response={"error": error_msg}, code=http.NOT_IMPLEMENTED,) + return respond( + request=request, + response={"error": error_msg}, + code=http.NOT_IMPLEMENTED, + ) except controller.InvalidCommandForActionState as e: log.warning(e.message) - return respond(request=request, response={"error": e.message}, code=http.CONFLICT,) + return respond( + request=request, + response={"error": e.message}, + code=http.CONFLICT, + ) except Exception as e: log.exception("%r while executing command %s for %s", e, command, obj) trace = traceback.format_exc() @@ -90,8 +105,8 @@ def handle_command(request, api_controller, obj, **kwargs): class ErrorResource(resource.Resource): - """ Equivalent to resource.NoResource, except error message is returned - as JSON, not HTML """ + """Equivalent to resource.NoResource, except error message is returned + as JSON, not HTML""" def __init__(self, error="No Such Resource", code=http.NOT_FOUND): resource.Resource.__init__(self) @@ -107,7 +122,7 @@ def render_POST(self, request): return respond(request=request, response={"error": self.error}, code=self.code) def getChild(self, chnam, request): - """ Overrided getChild to ensure a NoResource is not returned """ + """Overrided getChild to ensure a NoResource is not returned""" return self @@ -148,7 +163,12 @@ def render_GET(self, request): @AsyncResource.exclusive def render_POST(self, request): use_latest_command = requestargs.get_bool(request, "use_latest_command", False) - return handle_command(request, self.controller, self.action_run, use_latest_command=use_latest_command,) + return handle_command( + request, + self.controller, + self.action_run, + use_latest_command=use_latest_command, + ) class JobRunResource(resource.Resource): @@ -167,14 +187,18 @@ def getChild(self, action_name, _): action_run = self.job_run.action_runs[action_name] return ActionRunResource(action_run, self.job_run) - return ErrorResource(f"Cannot find action '{action_name}' for " f"'{self.job_run}'",) + return ErrorResource( + f"Cannot find action '{action_name}' for " f"'{self.job_run}'", + ) @AsyncResource.bounded def render_GET(self, request): include_runs = requestargs.get_bool(request, "include_action_runs") include_graph = requestargs.get_bool(request, "include_action_graph") run_adapter = adapter.JobRunAdapter( - self.job_run, include_action_runs=include_runs, include_action_graph=include_graph, + self.job_run, + include_action_runs=include_runs, + include_action_graph=include_graph, ) return respond(request=request, response=run_adapter.get_repr()) @@ -220,7 +244,10 @@ def getChild(self, run_id, _): @AsyncResource.bounded def render_GET(self, request): - include_action_runs = requestargs.get_bool(request, "include_action_runs",) + include_action_runs = requestargs.get_bool( + request, + "include_action_runs", + ) include_graph = requestargs.get_bool(request, "include_action_graph") num_runs = requestargs.get_integer(request, "num_runs") job_adapter = adapter.JobAdapter( @@ -235,7 +262,12 @@ def render_GET(self, request): @AsyncResource.exclusive def render_POST(self, request): run_time = requestargs.get_datetime(request, "run_time") - return handle_command(request, self.controller, self.job_scheduler, run_time=run_time,) + return handle_command( + request, + self.controller, + self.job_scheduler, + run_time=run_time, + ) class ActionRunHistoryResource(resource.Resource): @@ -248,7 +280,10 @@ def __init__(self, action_runs): @AsyncResource.bounded def render_GET(self, request): - return respond(request=request, response=adapter.adapt_many(adapter.ActionRunAdapter, self.action_runs),) + return respond( + request=request, + response=adapter.adapt_many(adapter.ActionRunAdapter, self.action_runs), + ) class JobCollectionResource(resource.Resource): @@ -265,7 +300,11 @@ def getChild(self, name, request): return resource_from_collection(self.job_collection, name, JobResource) def get_data( - self, include_job_run=False, include_action_runs=False, include_action_graph=True, include_node_pool=True, + self, + include_job_run=False, + include_action_runs=False, + include_action_graph=True, + include_node_pool=True, ): return adapter.adapt_many( adapter.JobAdapter, @@ -278,17 +317,41 @@ def get_data( ) def get_job_index(self): - jobs = adapter.adapt_many(adapter.JobIndexAdapter, self.job_collection.get_jobs(),) + jobs = adapter.adapt_many( + adapter.JobIndexAdapter, + self.job_collection.get_jobs(), + ) return {job["name"]: job["actions"] for job in jobs} @AsyncResource.bounded def render_GET(self, request): - include_job_runs = requestargs.get_bool(request, "include_job_runs", default=False,) - include_action_runs = requestargs.get_bool(request, "include_action_runs", default=False,) - include_action_graph = requestargs.get_bool(request, "include_action_graph", default=True,) - include_node_pool = requestargs.get_bool(request, "include_node_pool", default=True,) + include_job_runs = requestargs.get_bool( + request, + "include_job_runs", + default=False, + ) + include_action_runs = requestargs.get_bool( + request, + "include_action_runs", + default=False, + ) + include_action_graph = requestargs.get_bool( + request, + "include_action_graph", + default=True, + ) + include_node_pool = requestargs.get_bool( + request, + "include_node_pool", + default=True, + ) response = dict( - jobs=self.get_data(include_job_runs, include_action_runs, include_action_graph, include_node_pool,), + jobs=self.get_data( + include_job_runs, + include_action_runs, + include_action_graph, + include_node_pool, + ), ) return respond(request=request, response=response) @@ -322,7 +385,9 @@ def render_GET(self, request): config_name = requestargs.get_string(request, "name") if not config_name: return respond( - request=request, response={"error": "'name' for config is required."}, code=http.BAD_REQUEST, + request=request, + response={"error": "'name' for config is required."}, + code=http.BAD_REQUEST, ) response = self.controller.read_config(config_name) return respond(request=request, response=response) @@ -336,7 +401,9 @@ def render_POST(self, request): if not name: return respond( - request=request, response={"error": "'name' for config is required."}, code=http.BAD_REQUEST, + request=request, + response={"error": "'name' for config is required."}, + code=http.BAD_REQUEST, ) response = {"status": "Active"} @@ -407,7 +474,11 @@ def render_GET(self, request): def render_POST(self, request): command = requestargs.get_string(request, "command") if command not in self.controller.COMMANDS: - return respond(request=request, response=dict(error=f"Unknown command: {command}"), code=http.BAD_REQUEST,) + return respond( + request=request, + response=dict(error=f"Unknown command: {command}"), + code=http.BAD_REQUEST, + ) event = requestargs.get_string(request, "event") fn = getattr(self.controller, command) response = fn(event) @@ -421,7 +492,8 @@ def __init__(self, mcp): # Setup children self.putChild( - b"jobs", JobCollectionResource(mcp.get_job_collection()), + b"jobs", + JobCollectionResource(mcp.get_job_collection()), ) self.putChild(b"config", ConfigResource(mcp)) diff --git a/tron/bin/action_runner.py b/tron/bin/action_runner.py index c09d73806..0ad022f93 100755 --- a/tron/bin/action_runner.py +++ b/tron/bin/action_runner.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3.6 +#!/usr/bin/env python3.8 """ Write pid and stdout/stderr to a standard location before execing a command. """ @@ -36,14 +36,25 @@ def get_content(self, run_id, command, proc): def wrap(self, command, run_id, proc): with open(self.filename, "w") as fh: yaml.safe_dump( - self.get_content(run_id=run_id, command=command, proc=proc,), fh, explicit_start=True, width=1000000, + self.get_content( + run_id=run_id, + command=command, + proc=proc, + ), + fh, + explicit_start=True, + width=1000000, ) try: yield finally: with open(self.filename, "a") as fh: yaml.safe_dump( - self.get_content(run_id=run_id, command=command, proc=proc,), + self.get_content( + run_id=run_id, + command=command, + proc=proc, + ), fh, explicit_start=True, width=1000000, @@ -86,7 +97,9 @@ def run_proc(output_path, command, run_id, proc): logging.warning(f"{run_id} running as pid {proc.pid}") status_file = StatusFile(os.path.join(output_path, STATUS_FILE)) with status_file.wrap( - command=command, run_id=run_id, proc=proc, + command=command, + run_id=run_id, + proc=proc, ): returncode = proc.wait() logging.warning(f"pid {proc.pid} exited with returncode {returncode}") @@ -96,20 +109,27 @@ def run_proc(output_path, command, run_id, proc): def parse_args(): parser = argparse.ArgumentParser(description="Action Runner for Tron") parser.add_argument( - "output_dir", help="The directory to store the state of the action run", + "output_dir", + help="The directory to store the state of the action run", ) parser.add_argument( - "command", help="the command to run", + "command", + help="the command to run", ) parser.add_argument( - "run_id", help="run_id of the action", + "run_id", + help="run_id of the action", ) return parser.parse_args() def run_command(command, run_id): return subprocess.Popen( - command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=build_environment(run_id=run_id), + command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=build_environment(run_id=run_id), ) @@ -134,7 +154,9 @@ def stream(source, dst): def configure_logging(run_id, output_dir): output_file = os.path.join(output_dir, f"{run_id}-{os.getpid()}.log") logging.basicConfig( - filename=output_file, format="%(asctime)s %(levelname)s %(message)s", datefmt="%Y-%m-%dT%H:%M:%S%z", + filename=output_file, + format="%(asctime)s %(levelname)s %(message)s", + datefmt="%Y-%m-%dT%H:%M:%S%z", ) @@ -149,7 +171,12 @@ def main(): ] for t in threads: t.start() - returncode = run_proc(output_path=args.output_dir, run_id=args.run_id, command=args.command, proc=proc,) + returncode = run_proc( + output_path=args.output_dir, + run_id=args.run_id, + command=args.command, + proc=proc, + ) for t in threads: t.join() diff --git a/tron/bin/action_status.py b/tron/bin/action_status.py index d4a54c09a..6b5b86749 100755 --- a/tron/bin/action_status.py +++ b/tron/bin/action_status.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3.6 +#!/usr/bin/env python3.8 import argparse import logging import os @@ -44,13 +44,16 @@ def send_signal(signal_num, status_file): def parse_args(): parser = argparse.ArgumentParser(description="Action Status for Tron") parser.add_argument( - "output_dir", help="The directory where the state of the action run is", + "output_dir", + help="The directory where the state of the action run is", ) parser.add_argument( - "command", help="the command to run", + "command", + help="the command to run", ) parser.add_argument( - "run_id", help="run_id of the action", + "run_id", + help="run_id of the action", ) return parser.parse_args() diff --git a/tron/bin/check_tron_datastore_staleness.py b/tron/bin/check_tron_datastore_staleness.py index 553a8e2ca..d3cd86ddb 100755 --- a/tron/bin/check_tron_datastore_staleness.py +++ b/tron/bin/check_tron_datastore_staleness.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3.6 +#!/usr/bin/env python3.8 import argparse import logging import os @@ -40,10 +40,15 @@ def parse_cli(): help="Working directory for the Tron daemon, default %(default)s", ) parser.add_argument( - "-c", "--config-path", default=DEFAULT_CONF_PATH, help="File path to the Tron configuration file", + "-c", + "--config-path", + default=DEFAULT_CONF_PATH, + help="File path to the Tron configuration file", ) parser.add_argument( - "--job-name", required=True, help="The job name to read timestamp from", + "--job-name", + required=True, + help="The job name to read timestamp from", ) parser.add_argument( "--staleness-threshold", @@ -52,7 +57,10 @@ def parse_cli(): ) args = parser.parse_args() args.working_dir = os.path.abspath(args.working_dir) - args.config_path = os.path.join(args.working_dir, args.config_path,) + args.config_path = os.path.join( + args.working_dir, + args.config_path, + ) return args diff --git a/tron/bin/check_tron_jobs.py b/tron/bin/check_tron_jobs.py index 400495170..cd733d473 100755 --- a/tron/bin/check_tron_jobs.py +++ b/tron/bin/check_tron_jobs.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3.6 +#!/usr/bin/env python3.8 import datetime import logging import pprint @@ -39,10 +39,15 @@ class State(Enum): def parse_cli(): parser = cmd_utils.build_option_parser() parser.add_argument( - "--dry-run", action="store_true", default=False, help="Don't actually send alerts out. Defaults to %(default)s", + "--dry-run", + action="store_true", + default=False, + help="Don't actually send alerts out. Defaults to %(default)s", ) parser.add_argument( - "--job", default=None, help="Check a particular job. If unset checks all jobs", + "--job", + default=None, + help="Check a particular job. If unset checks all jobs", ) parser.add_argument( "--run-interval", @@ -69,7 +74,10 @@ def _timestamp_to_timeobj(timestamp): def _timestamp_to_shortdate(timestamp, separator="."): - return time.strftime("%Y{0}%m{0}%d".format(separator), _timestamp_to_timeobj(timestamp),) + return time.strftime( + "%Y{0}%m{0}%d".format(separator), + _timestamp_to_timeobj(timestamp), + ) def compute_check_result_for_job_runs(client, job, job_content, url_index, hide_stderr=False): @@ -88,7 +96,9 @@ def compute_check_result_for_job_runs(client, job, job_content, url_index, hide_ kwargs["status"] = 2 return kwargs else: # if no run scheduled, no run_time available - relevant_job_run_date = _timestamp_to_shortdate(relevant_job_run["run_time"],) + relevant_job_run_date = _timestamp_to_shortdate( + relevant_job_run["run_time"], + ) # A job_run is like MASTER.foo.1 job_run_id = relevant_job_run["id"] @@ -96,9 +106,14 @@ def compute_check_result_for_job_runs(client, job, job_content, url_index, hide_ # A job action is like MASTER.foo.1.step1 actions_expected_runtime = job_content.get("actions_expected_runtime", {}) relevant_action = get_relevant_action( - action_runs=relevant_job_run["runs"], last_state=last_state, actions_expected_runtime=actions_expected_runtime, + action_runs=relevant_job_run["runs"], + last_state=last_state, + actions_expected_runtime=actions_expected_runtime, + ) + action_run_id = get_object_type_from_identifier( + url_index, + relevant_action["id"], ) - action_run_id = get_object_type_from_identifier(url_index, relevant_action["id"],) if last_state in (State.STUCK, State.FAILED, State.UNKNOWN): if _skip_sensu_failure_logging: @@ -191,7 +206,9 @@ def get_relevant_run_and_state(job_content): # 3. If there are multiple running ones, then most recent run_time wins # 4. If nothing is currently running, then most recent end_time wins job_runs = sorted( - job_content.get("runs", []), key=lambda k: (k["end_time"] is None, k["end_time"], k["run_time"]), reverse=True, + job_content.get("runs", []), + key=lambda k: (k["end_time"] is None, k["end_time"], k["run_time"]), + reverse=True, ) if len(job_runs) == 0: return None, State.NO_RUN_YET @@ -226,13 +243,20 @@ def is_action_failed_or_unknown(job_run): def is_job_stuck( - job_runs, job_expected_runtime, actions_expected_runtime, allow_overlap, queueing, + job_runs, + job_expected_runtime, + actions_expected_runtime, + allow_overlap, + queueing, ): next_run_time = None for job_run in job_runs: states_to_check = {"running", "waiting"} if job_run.get("state", "unknown") in states_to_check: - if is_job_run_exceeding_expected_runtime(job_run, job_expected_runtime,): + if is_job_run_exceeding_expected_runtime( + job_run, + job_expected_runtime, + ): return job_run # check if it is still running at next scheduled job run time if not allow_overlap and queueing and next_run_time: @@ -240,7 +264,10 @@ def is_job_stuck( if time.time() > time.mktime(difftime): return job_run for action_run in job_run.get("runs", []): - if is_action_run_exceeding_expected_runtime(action_run, actions_expected_runtime,): + if is_action_run_exceeding_expected_runtime( + action_run, + actions_expected_runtime, + ): return job_run next_run_time = job_run.get("run_time", None) @@ -249,7 +276,14 @@ def is_job_stuck( def is_job_run_exceeding_expected_runtime(job_run, job_expected_runtime): states_to_check = {"running", "waiting"} - if job_expected_runtime is not None and job_run.get("state", "unknown",) in states_to_check: + if ( + job_expected_runtime is not None + and job_run.get( + "state", + "unknown", + ) + in states_to_check + ): duration_seconds = pytimeparse.parse(job_run.get("duration", "")) if duration_seconds and duration_seconds > job_expected_runtime: return True @@ -257,12 +291,15 @@ def is_job_run_exceeding_expected_runtime(job_run, job_expected_runtime): def is_action_run_exceeding_expected_runtime( - action_run, actions_expected_runtime, + action_run, + actions_expected_runtime, ): if action_run.get("state", "unknown") == "running": action_name = action_run.get("action_name", None) if action_name in actions_expected_runtime and actions_expected_runtime[action_name] is not None: - duration_seconds = pytimeparse.parse(action_run.get("duration", ""),) + duration_seconds = pytimeparse.parse( + action_run.get("duration", ""), + ) if duration_seconds > actions_expected_runtime[action_name]: return True return False @@ -277,7 +314,10 @@ def get_relevant_action(*, action_runs, last_state, actions_expected_runtime): return action_run except ValueError: if last_state == State.STUCK: - if is_action_run_exceeding_expected_runtime(action_run, actions_expected_runtime,): + if is_action_run_exceeding_expected_runtime( + action_run, + actions_expected_runtime, + ): return action_run if action_state == "running": stuck_action_run_candidate = action_run @@ -297,7 +337,9 @@ def guess_realert_every(job): ] if len(job_runs_started) == 0: return -1 - job_previous_run = max(job_runs_started,) + job_previous_run = max( + job_runs_started, + ) time_diff = time.mktime(_timestamp_to_timeobj(job_next_run)) - time.mktime( _timestamp_to_timeobj(job_previous_run) ) @@ -314,12 +356,13 @@ def get_earliest_run_time_to_check(job_content, interval): earliest_run_time = min(time.mktime(_timestamp_to_timeobj(run["run_time"])) for run in job_content["runs"]) return max( - earliest_run_time, time.time() - datetime.timedelta(**{f"{interval}s": NUM_PRECIOUS - 1}).total_seconds(), + earliest_run_time, + time.time() - datetime.timedelta(**{f"{interval}s": NUM_PRECIOUS - 1}).total_seconds(), ) def sort_runs_by_interval(job_content, interval="day", until=None): - """ Sorts a job's runs by a time interval (day, hour, minute, or second), + """Sorts a job's runs by a time interval (day, hour, minute, or second), according to a job run's run time. """ interval_formats = { @@ -350,7 +393,10 @@ def sort_runs_by_interval(job_content, interval="day", until=None): # Bucket runs by interval for run in job_content["runs"]: - run_time = time.strftime(interval_formats[interval], _timestamp_to_timeobj(run["run_time"]),) + run_time = time.strftime( + interval_formats[interval], + _timestamp_to_timeobj(run["run_time"]), + ) if run_time not in run_buckets: continue run_buckets[run_time].append(run) @@ -358,7 +404,10 @@ def sort_runs_by_interval(job_content, interval="day", until=None): def compute_check_result_for_job(client, job, url_index): - kwargs = m(name=f"check_tron_job.{job['name']}", source=client.cluster_name,) + kwargs = m( + name=f"check_tron_job.{job['name']}", + source=client.cluster_name, + ) if "realert_every" not in kwargs: kwargs = kwargs.set("realert_every", guess_realert_every(job)) kwargs = kwargs.set("check_every", f"{_run_interval}s") @@ -376,13 +425,21 @@ def compute_check_result_for_job(client, job, url_index): hide_stderr = kwargs.get("hide_stderr", False) kwargs_list = [] if job["status"] == "disabled": - kwargs = kwargs.set("output", f"OK: {job['name']} is disabled and won't be checked.",) + kwargs = kwargs.set( + "output", + f"OK: {job['name']} is disabled and won't be checked.", + ) kwargs = kwargs.set("status", 0) kwargs_list.append(kwargs) else: # The job is not disabled, therefore we have to look at its run history tron_id = get_object_type_from_identifier(url_index, job["name"]) - job_content = pmap(client.job(tron_id.url, include_action_runs=True,),) + job_content = pmap( + client.job( + tron_id.url, + include_action_runs=True, + ), + ) if job["monitoring"].get(PRECIOUS_JOB_ATTR, False): dated_runs = sort_runs_by_interval(job_content, interval="day") @@ -399,7 +456,10 @@ def compute_check_result_for_job(client, job, url_index): ) dated_kwargs = kwargs.update(results) if date: # if empty date, leave job name alone - dated_kwargs = dated_kwargs.set("name", f"{kwargs['name']}-{date}",) + dated_kwargs = dated_kwargs.set( + "name", + f"{kwargs['name']}-{date}", + ) kwargs_list.append(dated_kwargs) return [dict(kws) for kws in kwargs_list] diff --git a/tron/bin/get_tron_metrics.py b/tron/bin/get_tron_metrics.py index ad031d48f..706ec7db3 100755 --- a/tron/bin/get_tron_metrics.py +++ b/tron/bin/get_tron_metrics.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3.6 +#!/usr/bin/env python3.8 # # get_tron_metrics.py # This script is designed to retrieve metrics from Tron via its API and send @@ -19,7 +19,10 @@ def parse_cli(): parser = cmd_utils.build_option_parser() parser.description = "Collects metrics from Tron via its API and forwards them to " "meteorite." parser.add_argument( - "--dry-run", action="store_true", default=False, help="Don't actually send metrics out. Defaults: %(default)s", + "--dry-run", + action="store_true", + default=False, + help="Don't actually send metrics out. Defaults: %(default)s", ) args = parser.parse_args() return args @@ -31,7 +34,14 @@ def check_bin_exists(bin): :param bin: (str) Name of the executable; could be a path to one """ - return subprocess.call(["which", bin], stdout=subprocess.PIPE, stderr=subprocess.PIPE,) == 0 + return ( + subprocess.call( + ["which", bin], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + == 0 + ) def send_data_metric(name, metric_type, value, dimensions={}, dry_run=False): @@ -46,21 +56,34 @@ def send_data_metric(name, metric_type, value, dimensions={}, dry_run=False): :param dry_run: (bool) Whether or not to send metrics to meteorite """ if dry_run: - metric_args = dict(name=name, metric_type=metric_type, value=value, dimensions=dimensions,) - log.info(f"Would have sent this to meteorite:\n" f"{pprint.pformat(metric_args)}",) + metric_args = dict( + name=name, + metric_type=metric_type, + value=value, + dimensions=dimensions, + ) + log.info( + f"Would have sent this to meteorite:\n" f"{pprint.pformat(metric_args)}", + ) return cmd = ["meteorite", "data", "-v", name, metric_type, str(value)] for k, v in dimensions.items(): cmd.extend(["-d", f"{k}:{v}"]) - process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,) + process = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) output, error = process.communicate() output = output.decode("utf-8").rstrip() error = error.decode("utf-8").rstrip() if process.returncode != 0: - log.error("Meteorite failed with:\n" f"{textwrap.indent(error, ' ')}",) + log.error( + "Meteorite failed with:\n" f"{textwrap.indent(error, ' ')}", + ) else: log.debug(f"From meteorite: {output}") diff --git a/tron/bin/recover_batch.py b/tron/bin/recover_batch.py index a92b8ccbc..c43934b5a 100755 --- a/tron/bin/recover_batch.py +++ b/tron/bin/recover_batch.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3.6 +#!/usr/bin/env python3.8 import argparse import logging import signal @@ -27,7 +27,9 @@ def __init__(self, to_watch, callback): def parse_args(): - parser = argparse.ArgumentParser(description="Check if a action runner has exited; wait otherwise",) + parser = argparse.ArgumentParser( + description="Check if a action runner has exited; wait otherwise", + ) parser.add_argument("filepath") return parser.parse_args() @@ -85,7 +87,8 @@ def run(fpath): # If not, wait for updates to the file. notify_queue = Queue() StatusFileWatcher( - fpath, lambda *args, **kwargs: notify(notify_queue, *args, **kwargs), + fpath, + lambda *args, **kwargs: notify(notify_queue, *args, **kwargs), ) reactor.run() exit_code, error_message = notify_queue.get() diff --git a/tron/command_context.py b/tron/command_context.py index e41dfbfa8..531575e05 100644 --- a/tron/command_context.py +++ b/tron/command_context.py @@ -44,9 +44,9 @@ class CommandContext: def __init__(self, base=None, next=None): """ - base - Object to look for attributes in - next - Next place to look for more pieces of context - Generally this will be another instance of CommandContext + base - Object to look for attributes in + next - Next place to look for more pieces of context + Generally this will be another instance of CommandContext """ self.base = base or {} self.next = next or {} @@ -94,7 +94,10 @@ def __getitem__(self, item): last_success = self.job.runs.last_success last_success = last_success.run_time if last_success else None - time_value = timeutils.DateArithmetic.parse(date_spec, last_success,) + time_value = timeutils.DateArithmetic.parse( + date_spec, + last_success, + ) if time_value: return time_value diff --git a/tron/commands/backfill.py b/tron/commands/backfill.py index 9774f3632..dce607a60 100644 --- a/tron/commands/backfill.py +++ b/tron/commands/backfill.py @@ -19,7 +19,9 @@ def get_date_range( - start_date: datetime.datetime, end_date: datetime.datetime, descending: bool = False, + start_date: datetime.datetime, + end_date: datetime.datetime, + descending: bool = False, ) -> List[datetime.datetime]: dates = [] delta = end_date - start_date @@ -127,7 +129,10 @@ async def get_run_id(self) -> Optional[client.TronObjectIdentifier]: loop = asyncio.get_event_loop() try: self.run_id = await loop.run_in_executor( - None, client.get_object_type_from_identifier, self.tron_client.index(), self.run_name, + None, + client.get_object_type_from_identifier, + self.tron_client.index(), + self.run_name, ) except client.RequestError as e: diff --git a/tron/commands/client.py b/tron/commands/client.py index 5df7b71ca..f80c8dad3 100644 --- a/tron/commands/client.py +++ b/tron/commands/client.py @@ -8,7 +8,6 @@ import urllib.request from collections import namedtuple from typing import Dict -from typing import Mapping import tron from tron.config.schema import MASTER_NAMESPACE @@ -18,7 +17,7 @@ assert simplejson # Pyflakes except ImportError: - import json as simplejson + import json as simplejson # type: ignore log = logging.getLogger(__name__) @@ -67,7 +66,9 @@ def build_http_error_response(exc): content = simplejson.loads(content) content = content["error"] except ValueError: - log.warning(f"Incorrectly formatted error response: {content}",) + log.warning( + f"Incorrectly formatted error response: {content}", + ) return Response(exc.code, exc.msg, content) @@ -97,7 +98,7 @@ def build_get_url(url, data=None): return url -def ensure_user_attribution(headers: Mapping[str, str]) -> Dict[str, str]: +def ensure_user_attribution(headers: Dict[str, str]) -> Dict[str, str]: headers = headers.copy() if "User-Agent" not in headers: headers["User-Agent"] = USER_AGENT @@ -106,12 +107,11 @@ def ensure_user_attribution(headers: Mapping[str, str]) -> Dict[str, str]: class Client: - """An HTTP client used to issue commands to the Tron API. - """ + """An HTTP client used to issue commands to the Tron API.""" def __init__(self, url_base, cluster_name=None, user_attribution=False): """Create a new client. - url_base - A url with a schema, hostname and port + url_base - A url with a schema, hostname and port """ self.url_base = url_base self.cluster_name = cluster_name @@ -126,12 +126,21 @@ def metrics(self): return self.http_get("/api/metrics") def config( - self, config_name, config_data=None, config_hash=None, check=False, + self, + config_name, + config_data=None, + config_hash=None, + check=False, ): """Retrieve or update the configuration.""" if config_data is not None: data_check = 1 if check else 0 - request_data = dict(config=config_data, name=config_name, hash=config_hash, check=data_check,) + request_data = dict( + config=config_data, + name=config_name, + hash=config_hash, + check=data_check, + ) return self.request("/api/config", request_data) request_data = dict(name=config_name) return self.http_get("/api/config", request_data) @@ -145,7 +154,11 @@ def get_url(self, identifier): return get_object_type_from_identifier(self.index(), identifier).url def jobs( - self, include_job_runs=False, include_action_runs=False, include_action_graph=True, include_node_pool=True, + self, + include_job_runs=False, + include_action_runs=False, + include_action_graph=True, + include_node_pool=True, ): params = { "include_job_runs": int(include_job_runs), @@ -231,7 +244,7 @@ def first(seq): def get_object_type_from_identifier(url_index, identifier): - """Given a string identifier, return a TronObjectIdentifier. """ + """Given a string identifier, return a TronObjectIdentifier.""" name_mapping = { "jobs": set(url_index["jobs"]), } diff --git a/tron/commands/cmd_utils.py b/tron/commands/cmd_utils.py index b2abeb7e8..a83b81311 100644 --- a/tron/commands/cmd_utils.py +++ b/tron/commands/cmd_utils.py @@ -21,7 +21,12 @@ class ExitCode: fail = 1 -GLOBAL_CONFIG_FILE_NAME = os.environ.get("TRON_CONFIG",) or "/etc/tron/tron.yaml" +GLOBAL_CONFIG_FILE_NAME = ( + os.environ.get( + "TRON_CONFIG", + ) + or "/etc/tron/tron.yaml" +) CONFIG_FILE_NAME = os.path.expanduser("~/.tron") DEFAULT_HOST = "localhost" @@ -65,32 +70,56 @@ def tron_jobs_completer(prefix, **kwargs): if os.path.isfile(TAB_COMPLETE_FILE): with opener(TAB_COMPLETE_FILE, "r") as f: jobs = f.readlines() - return filter_jobs_actions_runs(prefix=prefix, inputs=[job.strip("\n\r") for job in jobs],) + return filter_jobs_actions_runs( + prefix=prefix, + inputs=[job.strip("\n\r") for job in jobs], + ) else: if "client" not in kwargs: client = Client(get_default_server()) else: client = kwargs["client"] - return filter_jobs_actions_runs(prefix=prefix, inputs=[job["name"] for job in client.jobs()],) + return filter_jobs_actions_runs( + prefix=prefix, + inputs=[job["name"] for job in client.jobs()], + ) def build_option_parser(usage=None, epilog=None): - parser = argparse.ArgumentParser(usage=usage, epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter,) + parser = argparse.ArgumentParser( + usage=usage, + epilog=epilog, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) parser.add_argument( - "--version", action="version", version=f"{parser.prog} {tron.__version__}", + "--version", + action="version", + version=f"{parser.prog} {tron.__version__}", ) parser.add_argument( - "-v", "--verbose", action="count", help="Verbose logging", default=None, + "-v", + "--verbose", + action="count", + help="Verbose logging", + default=None, ) parser.add_argument( - "--server", default=None, help="Url including scheme, host and port, Default: %(default)s", + "--server", + default=None, + help="Url including scheme, host and port, Default: %(default)s", ) parser.add_argument( - "--cluster_name", default=None, help="Human friendly tron cluster name", + "--cluster_name", + default=None, + help="Human friendly tron cluster name", ) parser.add_argument( - "-s", "--save", action="store_true", dest="save_config", help="Save options used on this job for next time.", + "-s", + "--save", + action="store_true", + dest="save_config", + help="Save options used on this job for next time.", ) return parser @@ -166,12 +195,18 @@ def setup_logging(options): level = logging.NOTSET logging.basicConfig( - level=level, format="%(name)s %(levelname)s %(message)s", stream=sys.stdout, + level=level, + format="%(name)s %(levelname)s %(message)s", + stream=sys.stdout, ) def suggest_possibilities(word, possibilities, max_suggestions=6): - suggestions = difflib.get_close_matches(word=word, possibilities=possibilities, n=max_suggestions,) + suggestions = difflib.get_close_matches( + word=word, + possibilities=possibilities, + n=max_suggestions, + ) if len(suggestions) == 1: return f"\nDid you mean: {suggestions[0]}?" elif len(suggestions) >= 1: diff --git a/tron/commands/display.py b/tron/commands/display.py index 4ad9081a9..91e254518 100644 --- a/tron/commands/display.py +++ b/tron/commands/display.py @@ -52,7 +52,11 @@ def enable(cls): def set(cls, color_name, text): if not cls.enabled or not color_name: return text - return "{}{}{}".format(cls.colors[color_name.lower()], text, cls.colors["end"],) + return "{}{}{}".format( + cls.colors[color_name.lower()], + text, + cls.colors["end"], + ) @classmethod def toggle(cls, enable): @@ -150,7 +154,11 @@ def row_color(self, row): return None def rows(self): - return sorted(self.data, key=itemgetter(self.fields[self.sort_index]), reverse=self.reversed,) + return sorted( + self.data, + key=itemgetter(self.fields[self.sort_index]), + reverse=self.reversed, + ) def store_data(self, data): self.data = data @@ -221,7 +229,9 @@ def build_field(label, field): def format_job_details(job_content): details = format_fields(DisplayJobs, job_content) job_runs = DisplayJobRuns().format(job_content["runs"]) - actions = "\n\nList of Actions:\n%s" % "\n".join(job_content["action_names"],) + actions = "\n\nList of Actions:\n%s" % "\n".join( + job_content["action_names"], + ) return details + actions + "\n" + job_runs @@ -259,7 +269,7 @@ class DisplayJobRuns(TableDisplay): colors = { "id": partial(Color.set, "yellow"), "state": add_color_for_state, - "manual": lambda value: Color.set("cyan" if value else None, value), + "manual": lambda value: Color.set("cyan" if value else None, value), # type: ignore # can't type a lambda } def format_value(self, field_idx, value): @@ -282,7 +292,12 @@ def post_row(self, row): end = row["end_time"] or "-" duration = row["duration"][:-7] if row["duration"] else "-" - row_data = "{}Start: {} End: {} ({})".format(" " * self.widths[0], start, end, duration,) + row_data = "{}Start: {} End: {} ({})".format( + " " * self.widths[0], + start, + end, + duration, + ) self.out.append(Color.set("gray", row_data)) @@ -376,7 +391,11 @@ def rows(self): # Action runs need a sort order that sorts by date # and that can handle situations where it is None, or # othere weird things, so we str() - return sorted(self.data, key=lambda x: str(x[self.fields[self.sort_index]]), reverse=self.reversed,) + return sorted( + self.data, + key=lambda x: str(x[self.fields[self.sort_index]]), + reverse=self.reversed, + ) def display_node(source, _=None): diff --git a/tron/commands/retry.py b/tron/commands/retry.py index 0d137d6e9..d48bf4157 100644 --- a/tron/commands/retry.py +++ b/tron/commands/retry.py @@ -43,7 +43,10 @@ class RetryAction: RETRY_FAIL = False def __init__( - self, tron_client: client.Client, full_action_name: str, use_latest_command: bool = False, + self, + tron_client: client.Client, + full_action_name: str, + use_latest_command: bool = False, ): self.tron_client = tron_client self.retry_params = dict(command="retry", use_latest_command=int(use_latest_command)) @@ -130,7 +133,12 @@ async def can_retry(self) -> bool: async def check_trigger_statuses(self) -> Dict[str, bool]: action_run = await asyncio.get_event_loop().run_in_executor( - None, functools.partial(self.tron_client.action_runs, self.action_run_id.url, num_lines=0,), + None, + functools.partial( + self.tron_client.action_runs, + self.action_run_id.url, + num_lines=0, + ), ) # from tron.api.adapter:ActionRunAdapter.get_triggered_by: # triggered_by is a single string with this format: @@ -147,7 +155,11 @@ async def check_trigger_statuses(self) -> Dict[str, bool]: async def check_required_actions_statuses(self) -> Dict[str, bool]: action_runs = ( - await asyncio.get_event_loop().run_in_executor(None, self.tron_client.job_runs, self.job_run_id.url,) + await asyncio.get_event_loop().run_in_executor( + None, + self.tron_client.job_runs, + self.job_run_id.url, + ) )["runs"] return { action_runs[i]["action_name"]: action_runs[i]["state"] in BackfillRun.SUCCESS_STATES @@ -155,7 +167,10 @@ async def check_required_actions_statuses(self) -> Dict[str, bool]: } async def wait_and_retry( - self, deps_timeout_s: int = 0, poll_interval_s: int = DEFAULT_POLLING_INTERVAL_S, jitter: bool = True, + self, + deps_timeout_s: int = 0, + poll_interval_s: int = DEFAULT_POLLING_INTERVAL_S, + jitter: bool = True, ) -> bool: if deps_timeout_s != RetryAction.NO_TIMEOUT and jitter: @@ -173,7 +188,11 @@ async def wait_and_retry( self._log(msg) return False - async def wait_for_deps(self, deps_timeout_s: int = 0, poll_interval_s: int = DEFAULT_POLLING_INTERVAL_S,) -> bool: + async def wait_for_deps( + self, + deps_timeout_s: int = 0, + poll_interval_s: int = DEFAULT_POLLING_INTERVAL_S, + ) -> bool: """Wait for all upstream dependencies to finished up to a timeout. Once the timeout has expired, one final check is always conducted. diff --git a/tron/config/config_parse.py b/tron/config/config_parse.py index b8b947739..0a134f1a6 100644 --- a/tron/config/config_parse.py +++ b/tron/config/config_parse.py @@ -63,14 +63,17 @@ def build_format_string_validator(context_object): """Validate that a string does not contain any unexpected formatting keys. - valid_keys - a sequence of strings + valid_keys - a sequence of strings """ def validator(value, config_context): if config_context.partial: return valid_string(value, config_context) - context = command_context.CommandContext(context_object, config_context.command_context,) + context = command_context.CommandContext( + context_object, + config_context.command_context, + ) try: StringFormatter(context).format(value) @@ -101,7 +104,9 @@ def valid_output_stream_dir(output_dir, config_context): raise ConfigError(msg % output_dir) if not os.access(output_dir, os.W_OK): - raise ConfigError("output_stream_dir '%s' is not writable" % output_dir,) + raise ConfigError( + "output_stream_dir '%s' is not writable" % output_dir, + ) return output_dir @@ -388,7 +393,10 @@ class ValidateSSHOptions(Validator): # TODO: move this config and validations outside master namespace # 'identities': build_list_of_type_validator( # valid_identity_file, allow_empty=True), - "identities": build_list_of_type_validator(valid_string, allow_empty=True,), + "identities": build_list_of_type_validator( + valid_string, + allow_empty=True, + ), # 'known_hosts_file': valid_known_hosts_file, "known_hosts_file": valid_string, "connect_timeout": config_utils.valid_int, @@ -464,7 +472,9 @@ def valid_action_name(value, config_context): action_context = command_context.build_filled_context( - command_context.JobContext, command_context.JobRunContext, command_context.ActionRunContext, + command_context.JobContext, + command_context.JobRunContext, + command_context.ActionRunContext, ) @@ -475,7 +485,8 @@ def valid_mesos_action(action, config_context): if missing_keys: raise ConfigError( "Mesos executor for action {id} is missing these required keys: {keys}".format( - id=action["name"], keys=missing_keys, + id=action["name"], + keys=missing_keys, ), ) @@ -487,7 +498,8 @@ def valid_kubernetes_action(action, config_context): if missing_keys: raise ConfigError( "Kubernetes executor for action {id} is missing these required keys: {keys}".format( - id=action["name"], keys=missing_keys, + id=action["name"], + keys=missing_keys, ), ) @@ -534,7 +546,10 @@ class ValidateAction(Validator): "service_account_name": None, "ports": None, } - requires = build_list_of_type_validator(valid_action_name, allow_empty=True,) + requires = build_list_of_type_validator( + valid_action_name, + allow_empty=True, + ) validators = { "name": valid_action_name, "command": build_format_string_validator(action_context), @@ -551,7 +566,10 @@ class ValidateAction(Validator): "cap_drop": valid_list, "constraints": build_list_of_type_validator(valid_constraint, allow_empty=True), "docker_image": valid_string, - "docker_parameters": build_list_of_type_validator(valid_docker_parameter, allow_empty=True,), + "docker_parameters": build_list_of_type_validator( + valid_docker_parameter, + allow_empty=True, + ), "env": valid_dict, "secret_env": build_dict_value_validator(valid_secret_source), "secret_volumes": build_list_of_type_validator(valid_secret_volume, allow_empty=True), @@ -632,7 +650,10 @@ class ValidateCleanupAction(Validator): "cap_drop": valid_list, "constraints": build_list_of_type_validator(valid_constraint, allow_empty=True), "docker_image": valid_string, - "docker_parameters": build_list_of_type_validator(valid_docker_parameter, allow_empty=True,), + "docker_parameters": build_list_of_type_validator( + valid_docker_parameter, + allow_empty=True, + ), "env": valid_dict, "secret_env": build_dict_value_validator(valid_secret_source), "secret_volumes": build_list_of_type_validator(valid_secret_volume, allow_empty=True), @@ -847,7 +868,11 @@ def validate_jobs(config, config_context): config_utils.unique_names(fmt_string, config["jobs"]) -DEFAULT_STATE_PERSISTENCE = ConfigState(name="tron_state", store_type="shelve", buffer_size=1,) +DEFAULT_STATE_PERSISTENCE = ConfigState( + name="tron_state", + store_type="shelve", + buffer_size=1, +) DEFAULT_NODE = ValidateNode().do_shortcut(node="localhost") @@ -866,7 +891,9 @@ class ValidateConfig(Validator): "ssh_options": ConfigSSHOptions(**ValidateSSHOptions.defaults), "time_zone": None, "state_persistence": DEFAULT_STATE_PERSISTENCE, - "nodes": {"localhost": DEFAULT_NODE,}, + "nodes": { + "localhost": DEFAULT_NODE, + }, "node_pools": {}, "jobs": (), "mesos_options": ConfigMesos(**ValidateMesos.defaults), @@ -904,13 +931,20 @@ def validate_node_pool_nodes(self, config): def post_validation(self, config, _): """Validate a non-named config.""" node_names = config_utils.unique_names( - "Node and NodePool names must be unique %s", config["nodes"], config.get("node_pools", []), + "Node and NodePool names must be unique %s", + config["nodes"], + config.get("node_pools", []), ) if config.get("node_pools"): self.validate_node_pool_nodes(config) - config_context = ConfigContext("config", node_names, config.get("command_context"), MASTER_NAMESPACE,) + config_context = ConfigContext( + "config", + node_names, + config.get("command_context"), + MASTER_NAMESPACE, + ) validate_jobs(config, config_context) @@ -964,7 +998,12 @@ def validate_config_mapping(config_mapping): yield MASTER_NAMESPACE, master for name, content in config_mapping.items(): - context = ConfigContext(name, nodes, master.command_context, name,) + context = ConfigContext( + name, + nodes, + master.command_context, + name, + ) yield name, valid_named_config(content, config_context=context) @@ -989,7 +1028,9 @@ def get_job_names(self): return job_names def get_jobs(self): - return dict(itertools.chain.from_iterable(config.jobs.items() for _, config in self.configs.items()),) + return dict( + itertools.chain.from_iterable(config.jobs.items() for _, config in self.configs.items()), + ) def get_master(self): return self.configs[MASTER_NAMESPACE] diff --git a/tron/config/config_utils.py b/tron/config/config_utils.py index 736a7076a..67250bd1d 100644 --- a/tron/config/config_utils.py +++ b/tron/config/config_utils.py @@ -55,13 +55,13 @@ def unique_names(fmt_string, *seqs): def build_type_validator(validator, error_fmt): """Create a validator function using `validator` to validate the value. - validator - a function which takes a single argument `value` - error_fmt - a string which accepts two format variables (path, value) + validator - a function which takes a single argument `value` + error_fmt - a string which accepts two format variables (path, value) - Returns a function func(value, config_context) where - value - the value to validate - config_context - a ConfigContext object - Returns True if the value is valid + Returns a function func(value, config_context) where + value - the value to validate + config_context - a ConfigContext object + Returns True if the value is valid """ def f(value, config_context): @@ -90,16 +90,29 @@ def valid_number(type_func, value, config_context): valid_float = functools.partial(valid_number, float) valid_identifier = build_type_validator( - lambda s: isinstance(s, str) and IDENTIFIER_RE.match(s), "Identifier at %s is not a valid identifier: %s", + lambda s: isinstance(s, str) and IDENTIFIER_RE.match(s), + "Identifier at %s is not a valid identifier: %s", ) -valid_list = build_type_validator(lambda s: isinstance(s, list), "Value at %s is not a list: %s",) +valid_list = build_type_validator( + lambda s: isinstance(s, list), + "Value at %s is not a list: %s", +) -valid_string = build_type_validator(lambda s: isinstance(s, str), "Value at %s is not a string: %s",) +valid_string = build_type_validator( + lambda s: isinstance(s, str), + "Value at %s is not a string: %s", +) -valid_dict = build_type_validator(lambda s: isinstance(s, dict), "Value at %s is not a dictionary: %s",) +valid_dict = build_type_validator( + lambda s: isinstance(s, dict), + "Value at %s is not a dictionary: %s", +) -valid_bool = build_type_validator(lambda s: isinstance(s, bool), "Value at %s is not a boolean: %s",) +valid_bool = build_type_validator( + lambda s: isinstance(s, bool), + "Value at %s is not a boolean: %s", +) def build_enum_validator(enum): @@ -113,7 +126,9 @@ def enum_validator(value, config_context): try: return enum(value).value except Exception: - raise ConfigError(f"Value at {config_context.path} is not in {enum!r}: {value!r}",) + raise ConfigError( + f"Value at {config_context.path} is not in {enum!r}: {value!r}", + ) return enum_validator @@ -182,12 +197,18 @@ def validator(value, config_context): def build_dict_name_validator(item_validator, allow_empty=False): """Build a validator which validates a list or dict, and returns a dict. - Item validator must expect a "name" key, mapped to the key of the dict item""" + Item validator must expect a "name" key, mapped to the key of the dict item""" valid = build_list_of_type_validator(item_validator, allow_empty) def validator(value, config_context): if isinstance(value, dict): - value = [{"name": name, **config,} for name, config in value.items()] + value = [ + { + "name": name, + **config, + } + for name, config in value.items() + ] msg = "Duplicate name %%s at %s" % config_context.path name_dict = UniqueNameDict(msg) @@ -254,8 +275,8 @@ def build_child_context(self, path): class NullConfigContext: path = "" - nodes = set() - command_context = {} + nodes = set() # type: ignore + command_context = {} # type: ignore namespace = MASTER_NAMESPACE partial = False @@ -271,8 +292,8 @@ class Validator: """ config_class = None - defaults = {} - validators = {} + defaults = {} # type: ignore + validators = {} # type: ignore optional = False def validate(self, in_dict, config_context): diff --git a/tron/config/manager.py b/tron/config/manager.py index 3ed7f4996..893a805a0 100644 --- a/tron/config/manager.py +++ b/tron/config/manager.py @@ -132,7 +132,10 @@ def create_filename(): return self.manifest.get_file_name(name) or create_filename() def validate_with_fragment( - self, name, content, should_validate_missing_dependency=True, + self, + name, + content, + should_validate_missing_dependency=True, ): name_mapping = self.get_config_name_mapping() name_mapping[name] = content diff --git a/tron/config/schedule_parse.py b/tron/config/schedule_parse.py index cabfede9e..aba6a103c 100644 --- a/tron/config/schedule_parse.py +++ b/tron/config/schedule_parse.py @@ -11,15 +11,26 @@ from tron.config import schema from tron.utils import crontab -ConfigGenericSchedule = schema.config_object_factory("ConfigGenericSchedule", ["type", "value"], ["jitter"],) +ConfigGenericSchedule = schema.config_object_factory( + "ConfigGenericSchedule", + ["type", "value"], + ["jitter"], +) -ConfigGrocScheduler = namedtuple("ConfigGrocScheduler", "original ordinals weekdays monthdays months timestr jitter",) +ConfigGrocScheduler = namedtuple( + "ConfigGrocScheduler", + "original ordinals weekdays monthdays months timestr jitter", +) ConfigCronScheduler = namedtuple( - "ConfigCronScheduler", "original minutes hours monthdays months weekdays ordinals jitter", + "ConfigCronScheduler", + "original minutes hours monthdays months weekdays ordinals jitter", ) -ConfigDailyScheduler = namedtuple("ConfigDailyScheduler", "original hour minute second days jitter",) +ConfigDailyScheduler = namedtuple( + "ConfigDailyScheduler", + "original hour minute second days jitter", +) class ScheduleParseError(ConfigError): @@ -35,7 +46,11 @@ def pad_sequence(seq, size, padding=None): def schedule_config_from_string(schedule, config_context): """Return a scheduler config object from a string.""" schedule = schedule.strip() - name, schedule_config = pad_sequence(schedule.split(None, 1), 2, padding="",) + name, schedule_config = pad_sequence( + schedule.split(None, 1), + 2, + padding="", + ) if name not in schedulers: config = ConfigGenericSchedule("groc daily", schedule, jitter=None) return parse_groc_expression(config, config_context) @@ -83,13 +98,20 @@ def valid_daily_scheduler(config, config_context): def valid_day(day): if day not in CONVERT_DAYS_INT: - raise ConfigError(f"Unknown day {day} at {config_context.path}",) + raise ConfigError( + f"Unknown day {day} at {config_context.path}", + ) return CONVERT_DAYS_INT[day] original = f"{time_string} {days}" weekdays = {valid_day(day) for day in days or ()} return ConfigDailyScheduler( - original, time_spec.hour, time_spec.minute, time_spec.second, weekdays, jitter=config.jitter, + original, + time_spec.hour, + time_spec.minute, + time_spec.second, + weekdays, + jitter=config.jitter, ) @@ -105,8 +127,24 @@ def day_canonicalization_map(): weekday_lists = [ normalize_weekdays(calendar.day_name), normalize_weekdays(calendar.day_abbr), - ("u", "m", "t", "w", "r", "f", "s",), - ("su", "mo", "tu", "we", "th", "fr", "sa",), + ( + "u", + "m", + "t", + "w", + "r", + "f", + "s", + ), + ( + "su", + "mo", + "tu", + "we", + "th", + "fr", + "sa", + ), ] for day_list in weekday_lists: for day_name_synonym, day_index in zip(day_list, range(7)): @@ -177,7 +215,18 @@ def build_groc_schedule_parser_re(): TIME_EXPR = r"((at\s+)?(?P