diff --git a/doc/guides/deploy-without-root.rst b/doc/guides/deploy-without-root.rst index b29e60a0c..dd53fa4a8 100644 --- a/doc/guides/deploy-without-root.rst +++ b/doc/guides/deploy-without-root.rst @@ -61,7 +61,7 @@ the next step. Configuring the NixOps Network ****************************** -Edit your network.nix to specify the machine's +Edit your nixops.nix to specify the machine's ``deployment.targetUser``: .. code-block:: nix diff --git a/doc/manual/migrating.rst b/doc/manual/migrating.rst new file mode 100644 index 000000000..f0885d994 --- /dev/null +++ b/doc/manual/migrating.rst @@ -0,0 +1,76 @@ +.. _chap-overview: + +Overview +======== + +This chapter aims to provide guidelines on migrating from NixOps 1.x to 2.0. + +.. _sec-layout: + +Code layout changes +------------------- + +Using NixOps 1.0 multiple deployments spread out over the file and deployed +from any working directory with the ``--deployment (-d)`` parameter. + +NixOps 2 however requires a file relative to the invocation working directory. +It needs to be called either ``nixops.nix`` for a traditional deployment or +``flake.nix`` for the as of yet experimental +`flakes support `. + +.. _sec-state-location: + +State location +-------------- + +In NixOps 1.0 deployment state such as provisioned resources are stored in a +SQLite database located in ``~/.nixops``. + +NixOps 2 however has pluggable state backends, meaning that you will have to +make a choice where to store this state. + +To implement the old behaviour of loading deployment state from the SQLite +database located in ``~/.nixops`` add the following snippet to your deployment: + +:: + { + network = { + storage.legacy = {}; + }; + } + +To implement a fire-and-forget strategy use this code snippet: + +:: + { + network = { + storage.memory = {}; + }; + } + +For additional state storage strategies see the various NixOps plugins. + +.. _sec-state-migration: + +State migration +--------------- + +Migrating to any non-legacy backend from a previous deployment requires a +manual migration step. + +#. Start by configuring the legacy backend as such:: + { + network = { + storage.legacy = {}; + }; + } + +#. Then export the current state:: + nixops export > state.json + +#. Now go ahead and configure your desired state backend. + +#. And finally import the old state:: + nixops import < state.json + +#. Make sure to remove ``state.json`` as it may contain deployment secrets. diff --git a/flake.nix b/flake.nix index a91925319..8af0905a0 100644 --- a/flake.nix +++ b/flake.nix @@ -38,7 +38,9 @@ ] ++ (builtins.attrValues linters); shellHook = '' - export PATH=${builtins.toString ./scripts}:$PATH + git_root=$(${pkgs.git}/bin/git rev-parse --show-toplevel) + export PYTHONPATH=$git_root:$PYTHONPATH + export PATH=$git_root/scripts:$PATH ''; }; diff --git a/nix/eval-machine-info.nix b/nix/eval-machine-info.nix index 8acdcff99..47e490def 100644 --- a/nix/eval-machine-info.nix +++ b/nix/eval-machine-info.nix @@ -17,6 +17,8 @@ let zipAttrs = set: builtins.listToAttrs ( map (name: { inherit name; value = builtins.catAttrs name set; }) (builtins.concatMap builtins.attrNames set)); + flakeExpr = (builtins.getFlake flakeUri).outputs.nixopsConfigurations.default; + networks = let getNetworkFromExpr = networkExpr: @@ -31,7 +33,7 @@ let in map ({ key }: getNetworkFromExpr key) networkExprClosure ++ optional (flakeUri != null) - ((call (builtins.getFlake flakeUri).outputs.nixopsConfigurations.default) // { _file = "<${flakeUri}>"; }); + ((call flakeExpr) // { _file = "<${flakeUri}>"; }); network = zipAttrs networks; @@ -260,6 +262,11 @@ in rec { in [ f ] ++ map getRequires requires; + exprToArgs = nixopsExpr: f: + if builtins.isFunction nixopsExpr then + map (a: { "${a}" = builtins.toString f; } ) (builtins.attrNames (builtins.functionArgs nixopsExpr)) + else []; + fileToArgs = f: let nixopsExpr = import f; @@ -270,6 +277,8 @@ in rec { getNixOpsArgs = fs: lib.zipAttrs (lib.unique (lib.concatMap fileToArgs (getNixOpsExprs fs))); - nixopsArguments = getNixOpsArgs networkExprs; + nixopsArguments = + if flakeUri == null then getNixOpsArgs networkExprs + else lib.listToAttrs (builtins.map (a: {name = a; value = [ flakeUri ];}) (lib.attrNames (builtins.functionArgs flakeExpr))); } diff --git a/nix/templates/.keep b/nix/templates/.keep deleted file mode 100644 index e69de29bb..000000000 diff --git a/nixops/__main__.py b/nixops/__main__.py index 905a8ac34..a8f258479 100755 --- a/nixops/__main__.py +++ b/nixops/__main__.py @@ -1,6 +1,7 @@ #! /usr/bin/env python3 # -*- coding: utf-8 -*- import sys +import os def setup_debugger() -> None: @@ -11,7 +12,9 @@ def setup_debugger() -> None: from types import TracebackType from typing import Type - def hook(_type: Type[BaseException], value: BaseException, tb: TracebackType): + def hook( + _type: Type[BaseException], value: BaseException, tb: TracebackType + ) -> None: if hasattr(sys, "ps1") or not sys.stderr.isatty(): sys.__excepthook__(_type, value, tb) else: @@ -28,671 +31,12 @@ def hook(_type: Type[BaseException], value: BaseException, tb: TracebackType): setup_debugger() -from argparse import ArgumentParser, _SubParsersAction, SUPPRESS, REMAINDER -import os from nixops.parallel import MultipleExceptions -from nixops.script_defs import ( - add_subparser, - op_list_deployments, - op_create, - add_common_modify_options, - op_modify, - op_clone, - op_delete, - op_info, - op_check, - op_set_args, - op_deploy, - add_common_deployment_options, - op_send_keys, - op_destroy, - op_delete_resources, - op_stop, - op_start, - op_reboot, - op_show_arguments, - op_show_physical, - op_ssh, - op_ssh_for_each, - op_scp, - op_mount, - op_rename, - op_backup, - op_backup_status, - op_remove_backup, - op_clean_backups, - op_restore, - op_show_option, - op_list_generations, - op_rollback, - op_delete_generation, - op_show_console_output, - op_dump_nix_paths, - op_export, - op_import, - op_edit, - op_copy_closure, - op_list_plugins, - parser_plugin_hooks, - setup_logging, - error, -) -import sys +from nixops.script_defs import setup_logging +from nixops.evaluation import NixEvalError +from nixops.script_defs import error +from nixops.args import parser import nixops -import nixops.ansi - -# Set up the parser. -parser = ArgumentParser(description="NixOS cloud deployment tool", prog="nixops") -parser.add_argument("--version", action="version", version="NixOps @version@") -parser.add_argument( - "--pdb", action="store_true", help="Invoke pdb on unhandled exception" -) - -subparsers: _SubParsersAction = parser.add_subparsers( - help="sub-command help", metavar="operation", required=True -) - -subparser = add_subparser(subparsers, "list", help="list all known deployments") -subparser.set_defaults(op=op_list_deployments) - -subparser = add_subparser(subparsers, "create", help="create a new deployment") -subparser.set_defaults(op=op_create) -subparser.add_argument( - "--name", "-n", dest="name", metavar="NAME", help=SUPPRESS -) # obsolete, use -d instead -add_common_modify_options(subparser) - -subparser = add_subparser(subparsers, "modify", help="modify an existing deployment") -subparser.set_defaults(op=op_modify) -subparser.add_argument( - "--name", "-n", dest="name", metavar="NAME", help="new symbolic name of deployment" -) -add_common_modify_options(subparser) - -subparser = add_subparser(subparsers, "clone", help="clone an existing deployment") -subparser.set_defaults(op=op_clone) -subparser.add_argument( - "--name", - "-n", - dest="name", - metavar="NAME", - help="symbolic name of the cloned deployment", -) - -subparser = add_subparser(subparsers, "delete", help="delete a deployment") -subparser.add_argument( - "--force", action="store_true", help="force deletion even if resources still exist" -) -subparser.add_argument("--all", action="store_true", help="delete all deployments") -subparser.set_defaults(op=op_delete) - -subparser = add_subparser(subparsers, "info", help="show the state of the deployment") -subparser.set_defaults(op=op_info) -subparser.add_argument("--all", action="store_true", help="show all deployments") -subparser.add_argument( - "--plain", action="store_true", help="do not pretty-print the output" -) -subparser.add_argument( - "--no-eval", - action="store_true", - help="do not evaluate the deployment specification", -) - -subparser = add_subparser( - subparsers, - "check", - help="check the state of the machines in the network" - " (note that this might alter the internal nixops state to consolidate with the real state of the resource)", -) -subparser.set_defaults(op=op_check) -subparser.add_argument("--all", action="store_true", help="check all deployments") -subparser.add_argument( - "--include", - nargs="+", - metavar="MACHINE-NAME", - help="check only the specified machines", -) -subparser.add_argument( - "--exclude", - nargs="+", - metavar="MACHINE-NAME", - help="check all except the specified machines", -) - -subparser = add_subparser( - subparsers, - "set-args", - help="persistently set arguments to the deployment specification", -) -subparser.set_defaults(op=op_set_args) -subparser.add_argument( - "--arg", - nargs=2, - action="append", - dest="args", - metavar=("NAME", "VALUE"), - help="pass a Nix expression value", -) -subparser.add_argument( - "--argstr", - nargs=2, - action="append", - dest="argstrs", - metavar=("NAME", "VALUE"), - help="pass a string value", -) -subparser.add_argument( - "--unset", - nargs=1, - action="append", - dest="unset", - metavar="NAME", - help="unset previously set argument", -) - - -subparser = add_subparser(subparsers, "deploy", help="deploy the network configuration") -subparser.set_defaults(op=op_deploy) -subparser.add_argument( - "--kill-obsolete", "-k", action="store_true", help="kill obsolete virtual machines" -) -subparser.add_argument( - "--dry-run", action="store_true", help="evaluate and print what would be built" -) -subparser.add_argument( - "--dry-activate", - action="store_true", - help="show what will be activated on the machines in the network", -) -subparser.add_argument( - "--test", - action="store_true", - help="build and activate the new configuration; do not enable it in the bootloader. Rebooting the system will roll back automatically.", -) -subparser.add_argument( - "--boot", - action="store_true", - help="build the new configuration and enable it in the bootloader; do not activate it. Upon reboot, the system will use the new configuration.", -) -subparser.add_argument( - "--repair", action="store_true", help="use --repair when calling nix-build (slow)" -) -subparser.add_argument( - "--evaluate-only", action="store_true", help="only call nix-instantiate and exit" -) -subparser.add_argument( - "--plan-only", - action="store_true", - help="show the diff between the configuration and the state and exit", -) -subparser.add_argument( - "--build-only", - action="store_true", - help="build only; do not perform deployment actions", -) -subparser.add_argument( - "--create-only", action="store_true", help="exit after creating missing machines" -) -subparser.add_argument( - "--copy-only", action="store_true", help="exit after copying closures" -) -subparser.add_argument( - "--allow-recreate", - action="store_true", - help="recreate resources machines that have disappeared", -) -subparser.add_argument( - "--always-activate", - action="store_true", - help="activate unchanged configurations as well", -) -add_common_deployment_options(subparser) - -subparser = add_subparser(subparsers, "send-keys", help="send encryption keys") -subparser.set_defaults(op=op_send_keys) -subparser.add_argument( - "--include", - nargs="+", - metavar="MACHINE-NAME", - help="send keys to only the specified machines", -) -subparser.add_argument( - "--exclude", - nargs="+", - metavar="MACHINE-NAME", - help="send keys to all except the specified machines", -) - -subparser = add_subparser( - subparsers, "destroy", help="destroy all resources in the specified deployment" -) -subparser.set_defaults(op=op_destroy) -subparser.add_argument( - "--include", - nargs="+", - metavar="MACHINE-NAME", - help="destroy only the specified machines", -) -subparser.add_argument( - "--exclude", - nargs="+", - metavar="MACHINE-NAME", - help="destroy all except the specified machines", -) -subparser.add_argument( - "--wipe", action="store_true", help="securely wipe data on the machines" -) -subparser.add_argument("--all", action="store_true", help="destroy all deployments") - -subparser = add_subparser( - subparsers, - "delete-resources", - help="deletes the resource from the local NixOps state file.", -) -subparser.set_defaults(op=op_delete_resources) -subparser.add_argument( - "--include", - nargs="+", - metavar="RESOURCE-NAME", - help="delete only the specified resources", -) -subparser.add_argument( - "--exclude", - nargs="+", - metavar="RESOURCE-NAME", - help="delete all resources except the specified resources", -) - -subparser = add_subparser( - subparsers, "stop", help="stop all virtual machines in the network" -) -subparser.set_defaults(op=op_stop) -subparser.add_argument( - "--include", - nargs="+", - metavar="MACHINE-NAME", - help="stop only the specified machines", -) -subparser.add_argument( - "--exclude", - nargs="+", - metavar="MACHINE-NAME", - help="stop all except the specified machines", -) - -subparser = add_subparser( - subparsers, "start", help="start all virtual machines in the network" -) -subparser.set_defaults(op=op_start) -subparser.add_argument( - "--include", - nargs="+", - metavar="MACHINE-NAME", - help="start only the specified machines", -) -subparser.add_argument( - "--exclude", - nargs="+", - metavar="MACHINE-NAME", - help="start all except the specified machines", -) - -subparser = add_subparser( - subparsers, "reboot", help="reboot all virtual machines in the network" -) -subparser.set_defaults(op=op_reboot) -subparser.add_argument( - "--include", - nargs="+", - metavar="MACHINE-NAME", - help="reboot only the specified machines", -) -subparser.add_argument( - "--exclude", - nargs="+", - metavar="MACHINE-NAME", - help="reboot all except the specified machines", -) -subparser.add_argument( - "--no-wait", action="store_true", help="do not wait until the machines are up again" -) -subparser.add_argument( - "--rescue", - action="store_true", - help="reboot machines into the rescue system" " (if available)", -) -subparser.add_argument( - "--hard", - action="store_true", - help="send a hard reset (power switch) to the machines" " (if available)", -) - -subparser = add_subparser( - subparsers, "show-arguments", help="print the arguments to the network expressions" -) -subparser.set_defaults(op=op_show_arguments) - -subparser = add_subparser( - subparsers, "show-physical", help="print the physical network expression" -) -subparser.add_argument( - "--backup", - dest="backupid", - default=None, - help="print physical network expression for given backup id", -) -subparser.set_defaults(op=op_show_physical) - -subparser = add_subparser( - subparsers, "ssh", help="login on the specified machine via SSH" -) -subparser.set_defaults(op=op_ssh) -subparser.add_argument("machine", metavar="MACHINE", help="identifier of the machine") -subparser.add_argument( - "args", metavar="SSH_ARGS", nargs=REMAINDER, help="SSH flags and/or command", -) - -subparser = add_subparser( - subparsers, "ssh-for-each", help="execute a command on each machine via SSH" -) -subparser.set_defaults(op=op_ssh_for_each) -subparser.add_argument( - "args", metavar="ARG", nargs="*", help="additional arguments to SSH" -) -subparser.add_argument("--parallel", "-p", action="store_true", help="run in parallel") -subparser.add_argument( - "--include", - nargs="+", - metavar="MACHINE-NAME", - help="run command only on the specified machines", -) -subparser.add_argument( - "--exclude", - nargs="+", - metavar="MACHINE-NAME", - help="run command on all except the specified machines", -) -subparser.add_argument( - "--all", action="store_true", help="run ssh-for-each for all deployments" -) - -subparser = add_subparser( - subparsers, "scp", help="copy files to or from the specified machine via scp" -) -subparser.set_defaults(op=op_scp) -subparser.add_argument( - "--from", - dest="scp_from", - action="store_true", - help="copy a file from specified machine", -) -subparser.add_argument( - "--to", dest="scp_to", action="store_true", help="copy a file to specified machine" -) -subparser.add_argument("machine", metavar="MACHINE", help="identifier of the machine") -subparser.add_argument("source", metavar="SOURCE", help="source file location") -subparser.add_argument("destination", metavar="DEST", help="destination file location") - -subparser = add_subparser( - subparsers, - "mount", - help="mount a directory from the specified machine into the local filesystem", -) -subparser.set_defaults(op=op_mount) -subparser.add_argument( - "machine", - metavar="MACHINE[:PATH]", - help="identifier of the machine, optionally followed by a path", -) -subparser.add_argument("destination", metavar="PATH", help="local path") -subparser.add_argument( - "--sshfs-option", - "-o", - action="append", - metavar="OPTIONS", - help="mount options passed to sshfs", -) - -subparser = add_subparser(subparsers, "rename", help="rename machine in network") -subparser.set_defaults(op=op_rename) -subparser.add_argument( - "current_name", metavar="FROM", help="current identifier of the machine" -) -subparser.add_argument("new_name", metavar="TO", help="new identifier of the machine") - -subparser = add_subparser( - subparsers, - "backup", - help="make snapshots of persistent disks in network (currently EC2-only)", -) -subparser.set_defaults(op=op_backup) -subparser.add_argument( - "--include", - nargs="+", - metavar="MACHINE-NAME", - help="perform backup actions on the specified machines only", -) -subparser.add_argument( - "--exclude", - nargs="+", - metavar="MACHINE-NAME", - help="do not perform backup actions on the specified machines", -) -subparser.add_argument( - "--freeze", - dest="freeze_fs", - action="store_true", - default=False, - help="freeze filesystems for non-root filesystems that support this (e.g. xfs)", -) -subparser.add_argument( - "--force", - dest="force", - action="store_true", - default=False, - help="start new backup even if previous is still running", -) -subparser.add_argument( - "--devices", - nargs="+", - metavar="DEVICE-NAME", - help="only backup the specified devices", -) - -subparser = add_subparser(subparsers, "backup-status", help="get status of backups") -subparser.set_defaults(op=op_backup_status) -subparser.add_argument( - "backupid", default=None, nargs="?", help="use specified backup in stead of latest" -) -subparser.add_argument( - "--include", - nargs="+", - metavar="MACHINE-NAME", - help="perform backup actions on the specified machines only", -) -subparser.add_argument( - "--exclude", - nargs="+", - metavar="MACHINE-NAME", - help="do not perform backup actions on the specified machines", -) -subparser.add_argument( - "--wait", - dest="wait", - action="store_true", - default=False, - help="wait until backup is finished", -) -subparser.add_argument( - "--latest", - dest="latest", - action="store_true", - default=False, - help="show status of latest backup only", -) - -subparser = add_subparser(subparsers, "remove-backup", help="remove a given backup") -subparser.set_defaults(op=op_remove_backup) -subparser.add_argument("backupid", metavar="BACKUP-ID", help="backup ID to remove") -subparser.add_argument( - "--keep-physical", - dest="keep_physical", - default=False, - action="store_true", - help="do not remove the physical backups, only remove backups from nixops state", -) - -subparser = add_subparser(subparsers, "clean-backups", help="remove old backups") -subparser.set_defaults(op=op_clean_backups) -subparser.add_argument( - "--keep", dest="keep", type=int, help="number of backups to keep around" -) -subparser.add_argument( - "--keep-days", - metavar="N", - dest="keep_days", - type=int, - help="keep backups newer than N days", -) -subparser.add_argument( - "--keep-physical", - dest="keep_physical", - default=False, - action="store_true", - help="do not remove the physical backups, only remove backups from nixops state", -) - -subparser = add_subparser( - subparsers, - "restore", - help="restore machines based on snapshots of persistent disks in network (currently EC2-only)", -) -subparser.set_defaults(op=op_restore) -subparser.add_argument( - "--backup-id", default=None, help="use specified backup in stead of latest" -) -subparser.add_argument( - "--include", - nargs="+", - metavar="MACHINE-NAME", - help="perform backup actions on the specified machines only", -) -subparser.add_argument( - "--exclude", - nargs="+", - metavar="MACHINE-NAME", - help="do not perform backup actions on the specified machines", -) -subparser.add_argument( - "--devices", - nargs="+", - metavar="DEVICE-NAME", - help="only restore the specified devices", -) - -subparser = add_subparser( - subparsers, "show-option", help="print the value of a configuration option" -) -subparser.set_defaults(op=op_show_option) -subparser.add_argument("machine", metavar="MACHINE", help="identifier of the machine") -subparser.add_argument("option", metavar="OPTION", help="option name") -subparser.add_argument( - "--xml", action="store_true", help="print the option value in XML format" -) -subparser.add_argument( - "--json", action="store_true", help="print the option value in JSON format" -) -subparser.add_argument( - "--include-physical", - action="store_true", - help="include the physical specification in the evaluation", -) - -subparser = add_subparser( - subparsers, - "list-generations", - help="list previous configurations to which you can roll back", -) -subparser.set_defaults(op=op_list_generations) - -subparser = add_subparser( - subparsers, "rollback", help="roll back to a previous configuration" -) -subparser.set_defaults(op=op_rollback) -subparser.add_argument( - "generation", - type=int, - metavar="GENERATION", - help="number of the desired configuration (see ‘nixops list-generations’)", -) -add_common_deployment_options(subparser) - -subparser = add_subparser( - subparsers, "delete-generation", help="remove a previous configuration" -) -subparser.set_defaults(op=op_delete_generation) -subparser.add_argument( - "generation", - type=int, - metavar="GENERATION", - help="number of the desired configuration (see ‘nixops list-generations’)", -) -add_common_deployment_options(subparser) - -subparser = add_subparser( - subparsers, - "show-console-output", - help="print the machine's console output on stdout", -) -subparser.set_defaults(op=op_show_console_output) -subparser.add_argument("machine", metavar="MACHINE", help="identifier of the machine") -add_common_deployment_options(subparser) - -subparser = add_subparser( - subparsers, "dump-nix-paths", help="dump Nix paths referenced in deployments" -) -subparser.add_argument( - "--all", action="store_true", help="dump Nix paths for all deployments" -) -subparser.set_defaults(op=op_dump_nix_paths) -add_common_deployment_options(subparser) - -subparser = add_subparser(subparsers, "export", help="export the state of a deployment") -subparser.add_argument("--all", action="store_true", help="export all deployments") -subparser.set_defaults(op=op_export) - -subparser = add_subparser( - subparsers, "import", help="import deployments into the state file" -) -subparser.add_argument( - "--include-keys", - action="store_true", - help="import public SSH hosts keys to .ssh/known_hosts", -) -subparser.set_defaults(op=op_import) - -subparser = add_subparser( - subparsers, "edit", help="open the deployment specification in $EDITOR" -) -subparser.set_defaults(op=op_edit) - -subparser = add_subparser( - subparsers, "copy-closure", help="copy closure to a target machine" -) -subparser.add_argument("machine", help="identifier of the machine") -subparser.add_argument("storepath", help="store path of the closure to be copied") -subparser.set_defaults(op=op_copy_closure) - -subparser = subparsers.add_parser( - "list-plugins", help="list the available nixops plugins" -) -subparser.set_defaults(op=op_list_plugins) -subparser.add_argument( - "--verbose", "-v", action="store_true", help="Provide extra plugin information" -) -subparser.add_argument("--debug", action="store_true", help="enable debug output") - -parser_plugin_hooks(parser, subparsers) def main() -> None: @@ -705,10 +49,12 @@ def main() -> None: args = parser.parse_args() setup_logging(args) + from nixops.exceptions import NixError + try: nixops.deployment.DEBUG = args.debug args.op(args) - except nixops.deployment.NixEvalError: + except NixEvalError: error("evaluation of the deployment specification failed") sys.exit(1) except KeyboardInterrupt: @@ -719,6 +65,10 @@ def main() -> None: if args.debug or args.show_trace or str(e) == "": e.print_all_backtraces() sys.exit(1) + except NixError as e: + sys.stderr.write(str(e)) + sys.stderr.flush() + sys.exit(1) if __name__ == "__main__": diff --git a/nixops/args.py b/nixops/args.py new file mode 100644 index 000000000..97ade4469 --- /dev/null +++ b/nixops/args.py @@ -0,0 +1,653 @@ +from argparse import ArgumentParser, _SubParsersAction, SUPPRESS, REMAINDER +from nixops.script_defs import ( + add_subparser, + op_list_deployments, + op_create, + op_modify, + op_clone, + op_delete, + op_info, + op_check, + op_set_args, + op_deploy, + add_common_deployment_options, + op_send_keys, + op_destroy, + op_delete_resources, + op_stop, + op_start, + op_reboot, + op_show_arguments, + op_show_physical, + op_ssh, + op_ssh_for_each, + op_scp, + op_mount, + op_rename, + op_backup, + op_backup_status, + op_remove_backup, + op_clean_backups, + op_restore, + op_show_option, + op_list_generations, + op_rollback, + op_delete_generation, + op_show_console_output, + op_dump_nix_paths, + op_export, + op_import, + op_edit, + op_copy_closure, + op_list_plugins, + parser_plugin_hooks, + op_unlock, +) + +# Set up the parser. +parser = ArgumentParser(description="NixOS cloud deployment tool", prog="nixops") +parser.add_argument("--version", action="version", version="NixOps @version@") +parser.add_argument( + "--pdb", action="store_true", help="Invoke pdb on unhandled exception" +) + +subparsers: _SubParsersAction = parser.add_subparsers( + help="sub-command help", metavar="operation", required=True +) + +subparser = add_subparser(subparsers, "list", help="list all known deployments") +subparser.set_defaults(op=op_list_deployments) + +subparser = add_subparser(subparsers, "create", help="create a new deployment") +subparser.set_defaults(op=op_create) +subparser.add_argument( + "--name", "-n", dest="name", metavar="NAME", help=SUPPRESS +) # obsolete, use -d instead + +subparser = add_subparser(subparsers, "modify", help="modify an existing deployment") +subparser.set_defaults(op=op_modify) +subparser.add_argument( + "--name", "-n", dest="name", metavar="NAME", help="new symbolic name of deployment" +) + +subparser = add_subparser(subparsers, "clone", help="clone an existing deployment") +subparser.set_defaults(op=op_clone) +subparser.add_argument( + "--name", + "-n", + dest="name", + metavar="NAME", + help="symbolic name of the cloned deployment", +) + +subparser = add_subparser(subparsers, "delete", help="delete a deployment") +subparser.add_argument( + "--force", action="store_true", help="force deletion even if resources still exist" +) +subparser.add_argument("--all", action="store_true", help="delete all deployments") +subparser.set_defaults(op=op_delete) + +subparser = add_subparser(subparsers, "info", help="show the state of the deployment") +subparser.set_defaults(op=op_info) +subparser.add_argument("--all", action="store_true", help="show all deployments") +subparser.add_argument( + "--plain", action="store_true", help="do not pretty-print the output" +) +subparser.add_argument( + "--no-eval", + action="store_true", + help="do not evaluate the deployment specification", +) + +subparser = add_subparser( + subparsers, + "check", + help="check the state of the machines in the network" + " (note that this might alter the internal nixops state to consolidate with the real state of the resource)", +) +subparser.set_defaults(op=op_check) +subparser.add_argument("--all", action="store_true", help="check all deployments") +subparser.add_argument( + "--include", + nargs="+", + metavar="MACHINE-NAME", + help="check only the specified machines", +) +subparser.add_argument( + "--exclude", + nargs="+", + metavar="MACHINE-NAME", + help="check all except the specified machines", +) + +subparser = add_subparser( + subparsers, + "set-args", + help="persistently set arguments to the deployment specification", +) +subparser.set_defaults(op=op_set_args) +subparser.add_argument( + "--arg", + nargs=2, + action="append", + dest="args", + metavar=("NAME", "VALUE"), + help="pass a Nix expression value", +) +subparser.add_argument( + "--argstr", + nargs=2, + action="append", + dest="argstrs", + metavar=("NAME", "VALUE"), + help="pass a string value", +) +subparser.add_argument( + "--unset", + nargs=1, + action="append", + dest="unset", + metavar="NAME", + help="unset previously set argument", +) + + +subparser = add_subparser(subparsers, "deploy", help="deploy the network configuration") +subparser.set_defaults(op=op_deploy) +subparser.add_argument( + "--kill-obsolete", "-k", action="store_true", help="kill obsolete virtual machines" +) +subparser.add_argument( + "--dry-run", action="store_true", help="evaluate and print what would be built" +) +subparser.add_argument( + "--dry-activate", + action="store_true", + help="show what will be activated on the machines in the network", +) +subparser.add_argument( + "--test", + action="store_true", + help="build and activate the new configuration; do not enable it in the bootloader. Rebooting the system will roll back automatically.", +) +subparser.add_argument( + "--boot", + action="store_true", + help="build the new configuration and enable it in the bootloader; do not activate it. Upon reboot, the system will use the new configuration.", +) +subparser.add_argument( + "--repair", action="store_true", help="use --repair when calling nix-build (slow)" +) +subparser.add_argument( + "--evaluate-only", action="store_true", help="only call nix-instantiate and exit" +) +subparser.add_argument( + "--plan-only", + action="store_true", + help="show the diff between the configuration and the state and exit", +) +subparser.add_argument( + "--build-only", + action="store_true", + help="build only; do not perform deployment actions", +) +subparser.add_argument( + "--create-only", action="store_true", help="exit after creating missing machines" +) +subparser.add_argument( + "--copy-only", action="store_true", help="exit after copying closures" +) +subparser.add_argument( + "--allow-recreate", + action="store_true", + help="recreate resources machines that have disappeared", +) +subparser.add_argument( + "--always-activate", + action="store_true", + help="activate unchanged configurations as well", +) +add_common_deployment_options(subparser) + +subparser = add_subparser(subparsers, "send-keys", help="send encryption keys") +subparser.set_defaults(op=op_send_keys) +subparser.add_argument( + "--include", + nargs="+", + metavar="MACHINE-NAME", + help="send keys to only the specified machines", +) +subparser.add_argument( + "--exclude", + nargs="+", + metavar="MACHINE-NAME", + help="send keys to all except the specified machines", +) + +subparser = add_subparser( + subparsers, "destroy", help="destroy all resources in the specified deployment" +) +subparser.set_defaults(op=op_destroy) +subparser.add_argument( + "--include", + nargs="+", + metavar="MACHINE-NAME", + help="destroy only the specified machines", +) +subparser.add_argument( + "--exclude", + nargs="+", + metavar="MACHINE-NAME", + help="destroy all except the specified machines", +) +subparser.add_argument( + "--wipe", action="store_true", help="securely wipe data on the machines" +) +subparser.add_argument("--all", action="store_true", help="destroy all deployments") + +subparser = add_subparser( + subparsers, + "delete-resources", + help="deletes the resource from the local NixOps state file.", +) +subparser.set_defaults(op=op_delete_resources) +subparser.add_argument( + "--include", + nargs="+", + metavar="RESOURCE-NAME", + help="delete only the specified resources", +) +subparser.add_argument( + "--exclude", + nargs="+", + metavar="RESOURCE-NAME", + help="delete all resources except the specified resources", +) + +subparser = add_subparser( + subparsers, "stop", help="stop all virtual machines in the network" +) +subparser.set_defaults(op=op_stop) +subparser.add_argument( + "--include", + nargs="+", + metavar="MACHINE-NAME", + help="stop only the specified machines", +) +subparser.add_argument( + "--exclude", + nargs="+", + metavar="MACHINE-NAME", + help="stop all except the specified machines", +) + +subparser = add_subparser( + subparsers, "start", help="start all virtual machines in the network" +) +subparser.set_defaults(op=op_start) +subparser.add_argument( + "--include", + nargs="+", + metavar="MACHINE-NAME", + help="start only the specified machines", +) +subparser.add_argument( + "--exclude", + nargs="+", + metavar="MACHINE-NAME", + help="start all except the specified machines", +) + +subparser = add_subparser( + subparsers, "reboot", help="reboot all virtual machines in the network" +) +subparser.set_defaults(op=op_reboot) +subparser.add_argument( + "--include", + nargs="+", + metavar="MACHINE-NAME", + help="reboot only the specified machines", +) +subparser.add_argument( + "--exclude", + nargs="+", + metavar="MACHINE-NAME", + help="reboot all except the specified machines", +) +subparser.add_argument( + "--no-wait", action="store_true", help="do not wait until the machines are up again" +) +subparser.add_argument( + "--rescue", + action="store_true", + help="reboot machines into the rescue system" " (if available)", +) +subparser.add_argument( + "--hard", + action="store_true", + help="send a hard reset (power switch) to the machines" " (if available)", +) + +subparser = add_subparser( + subparsers, "show-arguments", help="print the arguments to the network expressions" +) +subparser.set_defaults(op=op_show_arguments) + +subparser = add_subparser( + subparsers, "show-physical", help="print the physical network expression" +) +subparser.add_argument( + "--backup", + dest="backupid", + default=None, + help="print physical network expression for given backup id", +) +subparser.set_defaults(op=op_show_physical) + +subparser = add_subparser( + subparsers, "ssh", help="login on the specified machine via SSH" +) +subparser.set_defaults(op=op_ssh) +subparser.add_argument("machine", metavar="MACHINE", help="identifier of the machine") +subparser.add_argument( + "args", metavar="SSH_ARGS", nargs=REMAINDER, help="SSH flags and/or command", +) + +subparser = add_subparser( + subparsers, "ssh-for-each", help="execute a command on each machine via SSH" +) +subparser.set_defaults(op=op_ssh_for_each) +subparser.add_argument( + "args", metavar="ARG", nargs="*", help="additional arguments to SSH" +) +subparser.add_argument("--parallel", "-p", action="store_true", help="run in parallel") +subparser.add_argument( + "--include", + nargs="+", + metavar="MACHINE-NAME", + help="run command only on the specified machines", +) +subparser.add_argument( + "--exclude", + nargs="+", + metavar="MACHINE-NAME", + help="run command on all except the specified machines", +) +subparser.add_argument( + "--all", action="store_true", help="run ssh-for-each for all deployments" +) + +subparser = add_subparser( + subparsers, "scp", help="copy files to or from the specified machine via scp" +) +subparser.set_defaults(op=op_scp) +subparser.add_argument( + "--from", + dest="scp_from", + action="store_true", + help="copy a file from specified machine", +) +subparser.add_argument( + "--to", dest="scp_to", action="store_true", help="copy a file to specified machine" +) +subparser.add_argument("machine", metavar="MACHINE", help="identifier of the machine") +subparser.add_argument("source", metavar="SOURCE", help="source file location") +subparser.add_argument("destination", metavar="DEST", help="destination file location") + +subparser = add_subparser( + subparsers, + "mount", + help="mount a directory from the specified machine into the local filesystem", +) +subparser.set_defaults(op=op_mount) +subparser.add_argument( + "machine", + metavar="MACHINE[:PATH]", + help="identifier of the machine, optionally followed by a path", +) +subparser.add_argument("destination", metavar="PATH", help="local path") +subparser.add_argument( + "--sshfs-option", + "-o", + action="append", + metavar="OPTIONS", + help="mount options passed to sshfs", +) + +subparser = add_subparser(subparsers, "rename", help="rename machine in network") +subparser.set_defaults(op=op_rename) +subparser.add_argument( + "current_name", metavar="FROM", help="current identifier of the machine" +) +subparser.add_argument("new_name", metavar="TO", help="new identifier of the machine") + +subparser = add_subparser( + subparsers, + "backup", + help="make snapshots of persistent disks in network (currently EC2-only)", +) +subparser.set_defaults(op=op_backup) +subparser.add_argument( + "--include", + nargs="+", + metavar="MACHINE-NAME", + help="perform backup actions on the specified machines only", +) +subparser.add_argument( + "--exclude", + nargs="+", + metavar="MACHINE-NAME", + help="do not perform backup actions on the specified machines", +) +subparser.add_argument( + "--freeze", + dest="freeze_fs", + action="store_true", + default=False, + help="freeze filesystems for non-root filesystems that support this (e.g. xfs)", +) +subparser.add_argument( + "--force", + dest="force", + action="store_true", + default=False, + help="start new backup even if previous is still running", +) +subparser.add_argument( + "--devices", + nargs="+", + metavar="DEVICE-NAME", + help="only backup the specified devices", +) + +subparser = add_subparser(subparsers, "backup-status", help="get status of backups") +subparser.set_defaults(op=op_backup_status) +subparser.add_argument( + "backupid", default=None, nargs="?", help="use specified backup in stead of latest" +) +subparser.add_argument( + "--include", + nargs="+", + metavar="MACHINE-NAME", + help="perform backup actions on the specified machines only", +) +subparser.add_argument( + "--exclude", + nargs="+", + metavar="MACHINE-NAME", + help="do not perform backup actions on the specified machines", +) +subparser.add_argument( + "--wait", + dest="wait", + action="store_true", + default=False, + help="wait until backup is finished", +) +subparser.add_argument( + "--latest", + dest="latest", + action="store_true", + default=False, + help="show status of latest backup only", +) + +subparser = add_subparser(subparsers, "remove-backup", help="remove a given backup") +subparser.set_defaults(op=op_remove_backup) +subparser.add_argument("backupid", metavar="BACKUP-ID", help="backup ID to remove") +subparser.add_argument( + "--keep-physical", + dest="keep_physical", + default=False, + action="store_true", + help="do not remove the physical backups, only remove backups from nixops state", +) + +subparser = add_subparser(subparsers, "clean-backups", help="remove old backups") +subparser.set_defaults(op=op_clean_backups) +subparser.add_argument( + "--keep", dest="keep", type=int, help="number of backups to keep around" +) +subparser.add_argument( + "--keep-days", + metavar="N", + dest="keep_days", + type=int, + help="keep backups newer than N days", +) +subparser.add_argument( + "--keep-physical", + dest="keep_physical", + default=False, + action="store_true", + help="do not remove the physical backups, only remove backups from nixops state", +) + +subparser = add_subparser( + subparsers, + "restore", + help="restore machines based on snapshots of persistent disks in network (currently EC2-only)", +) +subparser.set_defaults(op=op_restore) +subparser.add_argument( + "--backup-id", default=None, help="use specified backup in stead of latest" +) +subparser.add_argument( + "--include", + nargs="+", + metavar="MACHINE-NAME", + help="perform backup actions on the specified machines only", +) +subparser.add_argument( + "--exclude", + nargs="+", + metavar="MACHINE-NAME", + help="do not perform backup actions on the specified machines", +) +subparser.add_argument( + "--devices", + nargs="+", + metavar="DEVICE-NAME", + help="only restore the specified devices", +) + +subparser = add_subparser( + subparsers, "show-option", help="print the value of a configuration option" +) +subparser.set_defaults(op=op_show_option) +subparser.add_argument("machine", metavar="MACHINE", help="identifier of the machine") +subparser.add_argument("option", metavar="OPTION", help="option name") +subparser.add_argument( + "--include-physical", + action="store_true", + help="include the physical specification in the evaluation", +) + +subparser = add_subparser( + subparsers, + "list-generations", + help="list previous configurations to which you can roll back", +) +subparser.set_defaults(op=op_list_generations) + +subparser = add_subparser( + subparsers, "rollback", help="roll back to a previous configuration" +) +subparser.set_defaults(op=op_rollback) +subparser.add_argument( + "generation", + type=int, + metavar="GENERATION", + help="number of the desired configuration (see ‘nixops list-generations’)", +) +add_common_deployment_options(subparser) + +subparser = add_subparser( + subparsers, "delete-generation", help="remove a previous configuration" +) +subparser.set_defaults(op=op_delete_generation) +subparser.add_argument( + "generation", + type=int, + metavar="GENERATION", + help="number of the desired configuration (see ‘nixops list-generations’)", +) +add_common_deployment_options(subparser) + +subparser = add_subparser( + subparsers, + "show-console-output", + help="print the machine's console output on stdout", +) +subparser.set_defaults(op=op_show_console_output) +subparser.add_argument("machine", metavar="MACHINE", help="identifier of the machine") +add_common_deployment_options(subparser) + +subparser = add_subparser( + subparsers, "dump-nix-paths", help="dump Nix paths referenced in deployments" +) +subparser.add_argument( + "--all", action="store_true", help="dump Nix paths for all deployments" +) +subparser.set_defaults(op=op_dump_nix_paths) +add_common_deployment_options(subparser) + +subparser = add_subparser(subparsers, "export", help="export the state of a deployment") +subparser.add_argument("--all", action="store_true", help="export all deployments") +subparser.set_defaults(op=op_export) + +subparser = add_subparser( + subparsers, "import", help="import deployments into the state file" +) +subparser.add_argument( + "--include-keys", + action="store_true", + help="import public SSH hosts keys to .ssh/known_hosts", +) +subparser.set_defaults(op=op_import) + +subparser = add_subparser( + subparsers, "edit", help="open the deployment specification in $EDITOR" +) +subparser.set_defaults(op=op_edit) + +subparser = add_subparser( + subparsers, "copy-closure", help="copy closure to a target machine" +) +subparser.add_argument("machine", help="identifier of the machine") +subparser.add_argument("storepath", help="store path of the closure to be copied") +subparser.set_defaults(op=op_copy_closure) + +subparser = subparsers.add_parser( + "list-plugins", help="list the available nixops plugins" +) +subparser.set_defaults(op=op_list_plugins) +subparser.add_argument( + "--verbose", "-v", action="store_true", help="Provide extra plugin information" +) +subparser.add_argument("--debug", action="store_true", help="enable debug output") + +subparser = add_subparser(subparsers, "unlock", help="Force unlock the deployment lock") +subparser.set_defaults(op=op_unlock) + +parser_plugin_hooks(parser, subparsers) diff --git a/nixops/backends/__init__.py b/nixops/backends/__init__.py index c716cded2..0b24331e4 100644 --- a/nixops/backends/__init__.py +++ b/nixops/backends/__init__.py @@ -20,6 +20,7 @@ from nixops.state import RecordId import subprocess import threading +import nixops class KeyOptions(nixops.resources.ResourceOptions): @@ -44,6 +45,7 @@ class MachineOptions(nixops.resources.ResourceOptions): targetUser: Optional[str] sshOptions: Sequence[str] privilegeEscalationCommand: Sequence[str] + provisionSSHKey: bool class MachineDefinition(nixops.resources.ResourceDefinition): @@ -53,27 +55,28 @@ class MachineDefinition(nixops.resources.ResourceDefinition): ssh_port: int always_activate: bool - owners: List[str] + owners: Sequence[str] has_fast_connection: bool keys: Mapping[str, KeyOptions] ssh_user: str - ssh_options: List[str] - privilege_escalation_command: List[str] + ssh_options: Sequence[str] + privilege_escalation_command: Sequence[str] provision_ssh_key: bool def __init__(self, name: str, config: nixops.resources.ResourceEval): super().__init__(name, config) - self.ssh_port = config["targetPort"] - self.always_activate = config["alwaysActivate"] - self.owners = config["owners"] - self.has_fast_connection = config["hasFastConnection"] + self.ssh_port = self.config.targetPort + self.always_activate = self.config.alwaysActivate + self.owners = self.config.owners + self.has_fast_connection = self.config.hasFastConnection + # TODO: Extend MutableValidatedObject to handle this case self.keys = {k: KeyOptions(**v) for k, v in config["keys"].items()} - self.ssh_options = config["sshOptions"] + self.ssh_options = self.config.sshOptions - self.ssh_user = config["targetUser"] + self.ssh_user = self.config.targetUser or "root" - self.privilege_escalation_command = config["privilegeEscalationCommand"] - self.provision_ssh_key = config["provisionSSHKey"] + self.privilege_escalation_command = self.config.privilegeEscalationCommand + self.provision_ssh_key = self.config.provisionSSHKey MachineDefinitionType = TypeVar("MachineDefinitionType", bound="MachineDefinition") @@ -96,7 +99,7 @@ class MachineState( _ssh_pinged_this_time: bool = False ssh_port: int = nixops.util.attr_property("targetPort", None, int) ssh_user: str = nixops.util.attr_property("targetUser", "root", str) - ssh_options: List[str] = nixops.util.attr_property("sshOptions", [], "json") + ssh_options: Sequence[str] = nixops.util.attr_property("sshOptions", [], "json") privilege_escalation_command: List[str] = nixops.util.attr_property( "privilegeEscalationCommand", [], "json" ) @@ -116,9 +119,6 @@ class MachineState( cur_toplevel: Optional[str] = nixops.util.attr_property("toplevel", None) new_toplevel: Optional[str] - # Immutable flake URI from which this machine was built. - cur_flake_uri: Optional[str] = nixops.util.attr_property("curFlakeUri", None) - # Time (in Unix epoch) the instance was started, if known. start_time: Optional[int] = nixops.util.attr_property("startTime", None, int) @@ -128,7 +128,9 @@ class MachineState( defn: Optional[MachineDefinition] = None - def __init__(self, depl, name: str, id: RecordId) -> None: + def __init__( + self, depl: "nixops.deployment.Deployment", name: str, id: RecordId + ) -> None: super().__init__(depl, name, id) self.defn = None self._ssh_pinged_this_time = False @@ -164,17 +166,21 @@ def set_common_state(self, defn: MachineDefinitionType) -> None: def stop(self) -> None: """Stop this machine, if possible.""" - self.warn("don't know how to stop machine ‘{0}’".format(self.name)) + self.logger.warn("don't know how to stop machine ‘{0}’".format(self.name)) def start(self) -> None: """Start this machine, if possible.""" pass - def get_load_avg(self) -> Union[List[str], None]: + def get_load_avg(self) -> Optional[List[str]]: """Get the load averages on the machine.""" try: - res = ( - self.run_command("cat /proc/loadavg", capture_stdout=True, timeout=15) + res: List[str] = ( + str( + self.run_command( + "cat /proc/loadavg", capture_stdout=True, timeout=15 + ) + ) .rstrip() .split(" ") ) @@ -193,12 +199,13 @@ def check(self): # TODO -> CheckResult, but supertype ResourceState -> True self._check(res) return res - def _check(self, res): # TODO -> None but supertype ResourceState -> True + def _check(self, res): avg = self.get_load_avg() if avg is None: if self.state == self.UP: self.state = self.UNREACHABLE res.is_reachable = False + return False else: self.state = self.UP self.ssh_pinged = True @@ -206,6 +213,8 @@ def _check(self, res): # TODO -> None but supertype ResourceState -> True res.is_reachable = True res.load = avg + # Get the systemd units that are in a failed state or in progress. + # Get the systemd units that are in a failed state or in progress. # cat to inhibit color output. out: List[str] = str( @@ -280,7 +289,7 @@ def backup(self, defn, backup_id: str, devices: List[str] = []) -> None: def reboot(self, hard: bool = False) -> None: """Reboot this machine.""" - self.log("rebooting...") + self.logger.log("rebooting...") if self.state == self.RESCUE: # We're on non-NixOS here, so systemd might not be available. # The sleep is to prevent the reboot from causing the SSH @@ -295,7 +304,7 @@ def reboot(self, hard: bool = False) -> None: def ping(self) -> bool: event = threading.Event() - def _worker(): + def _worker() -> bool: try: self.ssh.run_command( ["true"], @@ -309,6 +318,7 @@ def _worker(): return False else: event.set() + return True t = threading.Thread(target=_worker) t.start() @@ -339,18 +349,18 @@ def wait_for_down( def reboot_sync(self, hard: bool = False) -> None: """Reboot this machine and wait until it's up again.""" self.reboot(hard=hard) - self.log_start("waiting for the machine to finish rebooting...") + self.logger.log_start("waiting for the machine to finish rebooting...") def progress_cb() -> None: - self.log_continue(".") + self.logger.log_continue(".") self.wait_for_down(callback=progress_cb) - self.log_continue("[down]") + self.logger.log_continue("[down]") self.wait_for_up(callback=progress_cb) - self.log_end("[up]") + self.logger.log_end("[up]") self.state = self.UP self.ssh_pinged = True self._ssh_pinged_this_time = True @@ -360,7 +370,9 @@ def reboot_rescue(self, hard: bool = False) -> None: """ Reboot machine into rescue system and wait until it is active. """ - self.warn("machine ‘{0}’ doesn't have a rescue" " system.".format(self.name)) + self.logger.warn( + "machine ‘{0}’ doesn't have a rescue" " system.".format(self.name) + ) def send_keys(self) -> None: if self.state == self.RESCUE: @@ -371,10 +383,10 @@ def send_keys(self) -> None: return for k, opts in self.get_keys().items(): - self.log("uploading key ‘{0}’ to ‘{1}’...".format(k, opts["path"])) + self.logger.log("uploading key ‘{0}’ to ‘{1}’...".format(k, opts["path"])) tmp = self.depl.tempdir + "/key-" + self.name - destDir = opts["destDir"].rstrip("/") + destDir: str = opts["destDir"].rstrip("/") self.run_command( ( "test -d '{0}' || (" @@ -440,10 +452,10 @@ def send_keys(self) -> None: def get_keys(self): return self.keys - def get_ssh_name(self): + def get_ssh_name(self) -> str: assert False - def get_ssh_flags(self, scp=False) -> List[str]: + def get_ssh_flags(self, scp: bool = False) -> List[str]: if scp: return ["-P", str(self.ssh_port)] if self.ssh_port is not None else [] else: @@ -454,7 +466,7 @@ def get_ssh_flags(self, scp=False) -> List[str]: def get_ssh_password(self): return None - def get_ssh_for_copy_closure(self): + def get_ssh_for_copy_closure(self) -> nixops.ssh_util.SSH: return self.ssh @property @@ -465,25 +477,25 @@ def public_host_key(self): def private_ipv4(self) -> Optional[str]: return None - def address_to(self, r): + def address_to(self, r: nixops.resources.GenericResourceState) -> Optional[str]: """Return the IP address to be used to access resource "r" from this machine.""" return r.public_ipv4 - def wait_for_ssh(self, check=False): + def wait_for_ssh(self, check: bool = False) -> None: """Wait until the SSH port is open on this machine.""" if self.ssh_pinged and (not check or self._ssh_pinged_this_time): return - self.log_start("waiting for SSH...") + self.logger.log_start("waiting for SSH...") - self.wait_for_up(callback=lambda: self.log_continue(".")) + self.wait_for_up(callback=lambda: self.logger.log_continue(".")) - self.log_end("") + self.logger.log_end("") if self.state != self.RESCUE: self.state = self.UP self.ssh_pinged = True self._ssh_pinged_this_time = True - def write_ssh_private_key(self, private_key) -> str: + def write_ssh_private_key(self, private_key: str) -> str: key_file = "{0}/id_nixops-{1}".format(self.depl.tempdir, self.name) with os.fdopen(os.open(key_file, os.O_CREAT | os.O_WRONLY, 0o600), "w") as f: f.write(private_key) @@ -493,10 +505,10 @@ def write_ssh_private_key(self, private_key) -> str: def get_ssh_private_key_file(self) -> Optional[str]: return None - def _logged_exec(self, command, **kwargs): + def _logged_exec(self, command: List[str], **kwargs) -> Union[str, int]: return nixops.util.logged_exec(command, self.logger, **kwargs) - def run_command(self, command, **kwargs): + def run_command(self, command, **kwargs) -> Union[str, int]: """ Execute a command on the machine via SSH. @@ -525,9 +537,9 @@ def switch_to_configuration( else: cmd += command cmd += " " + method - return self.run_command(cmd, check=False) + return int(self.run_command(cmd, check=False)) - def copy_closure_to(self, path): + def copy_closure_to(self, path: str) -> None: """Copy a closure to this machine.""" # !!! Implement copying between cloud machines, as in the Perl @@ -576,7 +588,9 @@ def _fmt_rsync_command(self, *args: str, recursive: bool = False) -> List[str]: return cmdline - def upload_file(self, source: str, target: str, recursive: bool = False): + def upload_file( + self, source: str, target: str, recursive: bool = False + ) -> Union[str, int]: cmdline = self._fmt_rsync_command( source, self.ssh_user + "@" + self._get_scp_name() + ":" + target, @@ -584,7 +598,9 @@ def upload_file(self, source: str, target: str, recursive: bool = False): ) return self._logged_exec(cmdline) - def download_file(self, source: str, target: str, recursive: bool = False): + def download_file( + self, source: str, target: str, recursive: bool = False + ) -> Union[str, int]: cmdline = self._fmt_rsync_command( self.ssh_user + "@" + self._get_scp_name() + ":" + source, target, @@ -592,7 +608,7 @@ def download_file(self, source: str, target: str, recursive: bool = False): ) return self._logged_exec(cmdline) - def get_console_output(self): + def get_console_output(self) -> str: return "(not available for this machine type)\n" diff --git a/nixops/backends/none.py b/nixops/backends/none.py index d7eb6660c..3e2536b62 100644 --- a/nixops/backends/none.py +++ b/nixops/backends/none.py @@ -1,11 +1,14 @@ # -*- coding: utf-8 -*- -from typing import Optional +from typing import Optional, List import nixops.util from nixops.backends import MachineDefinition, MachineState, MachineOptions from nixops.util import attr_property, create_key_pair +from nixops.state import RecordId import nixops.resources +import nixops + class NoneDefinition(MachineDefinition): """Definition of a trivial machine.""" @@ -16,7 +19,7 @@ class NoneDefinition(MachineDefinition): config: MachineOptions @classmethod - def get_type(cls): + def get_type(cls) -> str: return "none" def __init__(self, name: str, config: nixops.resources.ResourceEval): @@ -29,16 +32,18 @@ class NoneState(MachineState[NoneDefinition]): """State of a trivial machine.""" @classmethod - def get_type(cls): + def get_type(cls) -> str: return "none" - target_host = nixops.util.attr_property("targetHost", None) - public_ipv4 = nixops.util.attr_property("publicIpv4", None) + target_host: str = nixops.util.attr_property("targetHost", None) + public_ipv4: Optional[str] = nixops.util.attr_property("publicIpv4", None) _ssh_private_key: Optional[str] = attr_property("none.sshPrivateKey", None) _ssh_public_key: Optional[str] = attr_property("none.sshPublicKey", None) - _ssh_public_key_deployed = attr_property("none.sshPublicKeyDeployed", False, bool) + _ssh_public_key_deployed: bool = attr_property( + "none.sshPublicKeyDeployed", False, bool + ) - def __init__(self, depl, name, id): + def __init__(self, depl: "nixops.deployment.Deployment", name: str, id: RecordId): MachineState.__init__(self, depl, name, id) @property @@ -68,7 +73,7 @@ def create( check: bool, allow_reboot: bool, allow_recreate: bool, - ): + ) -> None: assert isinstance(defn, NoneDefinition) self.set_common_state(defn) self.target_host = defn._target_host @@ -76,22 +81,24 @@ def create( if not self.vm_id: if self.provision_ssh_key: - self.log_start("generating new SSH key pair... ") + self.logger.log_start("generating new SSH key pair... ") key_name = "NixOps client key for {0}".format(self.name) self._ssh_private_key, self._ssh_public_key = create_key_pair( key_name=key_name ) - self.log_end("done") + self.logger.log_end("done") self.vm_id = "nixops-{0}-{1}".format(self.depl.uuid, self.name) - def switch_to_configuration(self, method, sync, command=None): + def switch_to_configuration( + self, method: str, sync: bool, command: Optional[str] = None + ) -> int: res = super(NoneState, self).switch_to_configuration(method, sync, command) if res == 0: self._ssh_public_key_deployed = True return res - def get_ssh_name(self): + def get_ssh_name(self) -> str: assert self.target_host return self.target_host @@ -102,7 +109,7 @@ def get_ssh_private_key_file(self) -> Optional[str]: return self.write_ssh_private_key(self._ssh_private_key) return None - def get_ssh_flags(self, *args, **kwargs): + def get_ssh_flags(self, *args, **kwargs) -> List[str]: super_state_flags = super(NoneState, self).get_ssh_flags(*args, **kwargs) if self.vm_id and self.cur_toplevel and self._ssh_public_key_deployed: key_file = self.get_ssh_private_key_file() @@ -125,6 +132,6 @@ def _check(self, res): if res.is_up: super()._check(res) - def destroy(self, wipe=False): + def destroy(self, wipe: bool = False) -> bool: # No-op; just forget about the machine. return True diff --git a/nixops/deployment.py b/nixops/deployment.py index 6c85922c4..815bf74c5 100644 --- a/nixops/deployment.py +++ b/nixops/deployment.py @@ -4,7 +4,6 @@ import sys import os.path import subprocess -import json import tempfile import threading from collections import defaultdict @@ -15,12 +14,11 @@ import traceback import glob import fcntl -import itertools import platform import time import importlib -from functools import reduce +from functools import reduce, lru_cache from typing import ( Callable, Dict, @@ -47,20 +45,17 @@ from nixops.nix_expr import RawValue, Function, Call, nixmerge, py2nix from nixops.ansi import ansi_success +import nixops.evaluation Definitions = Dict[str, nixops.resources.ResourceDefinition] -class NixEvalError(Exception): - pass - - class UnknownBackend(Exception): pass -DEBUG = False +DEBUG: bool = False NixosConfigurationType = List[Dict[Tuple[str, ...], Any]] @@ -74,11 +69,8 @@ class Deployment: default_description = "Unnamed NixOps network" name: Optional[str] = nixops.util.attr_property("name", None) - nix_exprs = nixops.util.attr_property("nixExprs", [], "json") nix_path = nixops.util.attr_property("nixPath", [], "json") - flake_uri = nixops.util.attr_property("flakeUri", None) - cur_flake_uri = nixops.util.attr_property("curFlakeUri", None) - args = nixops.util.attr_property("args", {}, "json") + args: Dict[str, str] = nixops.util.attr_property("args", {}, "json") description = nixops.util.attr_property("description", default_description) configs_path = nixops.util.attr_property("configsPath", None) rollback_enabled: bool = nixops.util.attr_property("rollbackEnabled", False) @@ -86,6 +78,8 @@ class Deployment: # internal variable to mark if network attribute of network has been evaluated (separately) network_attr_eval: bool = False + network_expr: nixops.evaluation.NetworkFile + def __init__( self, statefile, uuid: str, log_file: TextIO = sys.stderr, ): @@ -104,15 +98,7 @@ def __init__( self._lock_file_path: Optional[str] = None - self.expr_path = os.path.realpath( - os.path.dirname(__file__) + "/../../../../share/nix/nixops" - ) - if not os.path.exists(self.expr_path): - self.expr_path = os.path.realpath( - os.path.dirname(__file__) + "/../../../../../share/nix/nixops" - ) - if not os.path.exists(self.expr_path): - self.expr_path = os.path.dirname(__file__) + "/../nix" + self.expr_path = nixops.evaluation.get_expr_path() self.resources: Dict[str, nixops.resources.GenericResourceState] = {} with self._db: @@ -128,8 +114,6 @@ def __init__( self.definitions: Optional[Definitions] = None - self._cur_flake_uri: Optional[str] = None - @property def tempdir(self) -> nixops.util.SelfDeletingDir: if not self._tempdir: @@ -138,20 +122,6 @@ def tempdir(self) -> nixops.util.SelfDeletingDir: ) return self._tempdir - def _get_cur_flake_uri(self): - assert self.flake_uri is not None - if self._cur_flake_uri is None: - out = json.loads( - subprocess.check_output( - ["nix", "flake", "metadata", "--json", "--", self.flake_uri], - stderr=self.logger.log_file, - ) - ) - self._cur_flake_uri = out["url"].replace( - "ref=HEAD&rev=0000000000000000000000000000000000000000&", "" - ) # FIXME - return self._cur_flake_uri - @property def machines(self) -> Dict[str, nixops.backends.GenericMachineState]: return _filter_machines(self.resources) @@ -413,65 +383,6 @@ def delete(self, force: bool = False) -> None: # Delete the deployment from the database. self._db.execute("delete from Deployments where uuid = ?", (self.uuid,)) - def _nix_path_flags(self) -> List[str]: - extraexprs = PluginManager.nixexprs() - - flags = ( - list( - itertools.chain( - *[ - ["-I", x] - for x in (self.extra_nix_path + self.nix_path + extraexprs) - ] - ) - ) - + self.extra_nix_flags - ) - flags.extend(["-I", "nixops=" + self.expr_path]) - return flags - - def _eval_flags(self, exprs: List[str]) -> List[str]: - flags = self._nix_path_flags() - args = {key: RawValue(val) for key, val in self.args.items()} - exprs_ = [RawValue(x) if x[0] == "<" else x for x in exprs] - - extraexprs = PluginManager.nixexprs() - - flags.extend( - [ - "--arg", - "networkExprs", - py2nix(exprs_, inline=True), - "--arg", - "args", - py2nix(args, inline=True), - "--argstr", - "uuid", - self.uuid, - "--argstr", - "deploymentName", - self.name if self.name else "", - "--arg", - "pluginNixExprs", - py2nix(extraexprs), - (self.expr_path + "/eval-machine-info.nix"), - ] - ) - - if self.flake_uri is not None: - flags.extend( - [ - # "--pure-eval", # FIXME - "--argstr", - "flakeUri", - self._get_cur_flake_uri(), - "--allowed-uris", - self.expr_path, - ] - ) - - return flags - def set_arg(self, name: str, value: str) -> None: """Set a persistent argument to the deployment specification.""" assert isinstance(name, str) @@ -494,56 +405,17 @@ def unset_arg(self, name: str) -> None: def evaluate_args(self) -> Any: """Evaluate the NixOps network expression's arguments.""" - try: - out = subprocess.check_output( - ["nix-instantiate"] - + self.extra_nix_eval_flags - + self._eval_flags(self.nix_exprs) - + ["--eval-only", "--json", "--strict", "-A", "nixopsArguments"], - stderr=self.logger.log_file, - text=True, - ) - if DEBUG: - print("JSON output of nix-instantiate:\n" + out, file=sys.stderr) - return json.loads(out) - except OSError as e: - raise Exception("unable to run ‘nix-instantiate’: {0}".format(e)) - except subprocess.CalledProcessError: - raise NixEvalError + return self.eval(attr="nixopsArguments") + @lru_cache() def evaluate_config(self, attr) -> Dict: - try: - _json = subprocess.check_output( - ["nix-instantiate"] - + self.extra_nix_eval_flags - + self._eval_flags(self.nix_exprs) - + [ - "--eval-only", - "--json", - "--strict", - "--arg", - "checkConfigurationOptions", - "false", - "-A", - attr, - ], - stderr=self.logger.log_file, - text=True, - ) - if DEBUG: - print("JSON output of nix-instantiate:\n" + _json, file=sys.stderr) - except OSError as e: - raise Exception("unable to run ‘nix-instantiate’: {0}".format(e)) - except subprocess.CalledProcessError: - raise NixEvalError - - return json.loads(_json) + return self.eval(checkConfigurationOptions=False, attr=attr) def evaluate_network(self, action: str = "") -> None: if not self.network_attr_eval: # Extract global deployment attributes. try: - config = self.evaluate_config("info.network") + config = self.evaluate_config("info")["network"] except Exception as e: if action not in ("destroy", "delete"): raise e @@ -573,44 +445,48 @@ def evaluate(self) -> None: ) self.definitions[name] = defn - def evaluate_option_value( + def eval( self, - machine_name: str, - option_name: str, - json: bool = False, - xml: bool = False, + nix_args: Dict[str, Any] = {}, + attr: Optional[str] = None, include_physical: bool = False, - ) -> str: - """Evaluate a single option of a single machine in the deployment specification.""" + checkConfigurationOptions: bool = True, + ) -> Any: - exprs = self.nix_exprs + exprs: List[str] = [] if include_physical: phys_expr = self.tempdir + "/physical.nix" with open(phys_expr, "w") as f: f.write(self.get_physical_spec()) exprs.append(phys_expr) - try: - return subprocess.check_output( - ["nix-instantiate"] - + self.extra_nix_eval_flags - + self._eval_flags(exprs) - + [ - "--eval-only", - "--strict", - "--arg", - "checkConfigurationOptions", - "false", - "-A", - "nodes.{0}.config.{1}".format(machine_name, option_name), - ] - + (["--json"] if json else []) - + (["--xml"] if xml else []), - stderr=self.logger.log_file, - text=True, - ) - except subprocess.CalledProcessError: - raise NixEvalError + return nixops.evaluation.eval( + # eval-machine-info args + networkExpr=self.network_expr, + networkExprs=exprs, + uuid=self.uuid, + deploymentName=self.name or "", + args=self.args, + pluginNixExprs=PluginManager.nixexprs(), + # Extend defaults + nix_path=self.extra_nix_path + self.nix_path, + # nix-instantiate args + nix_args=nix_args, + attr=attr, + extra_flags=self.extra_nix_eval_flags, + # Non-propagated args + stderr=self.logger.log_file, + ) + + def evaluate_option_value( + self, machine_name: str, option_name: str, include_physical: bool = False, + ) -> Any: + """Evaluate a single option of a single machine in the deployment specification.""" + return self.eval( + checkConfigurationOptions=False, + include_physical=include_physical, + attr="nodes.{0}.config.{1}".format(machine_name, option_name), + ) def get_arguments(self) -> Any: try: @@ -809,17 +685,18 @@ def build_configs( self.logger.log("building all machine configurations...") - # Set the NixOS version suffix, if we're building from Git. - # That way ‘nixos-version’ will show something useful on the - # target machines. + # TODO: Use `lib.versionSuffix` from nixpkgs through an eval + # TODO: `lib.versionSuffix` doesn't really work for git repos, fix in nixpkgs. # - # TODO: Implement flake compatible version - nixos_path = str(self.evaluate_config("nixpkgs")) - get_version_script = nixos_path + "/modules/installer/tools/get-version-suffix" - if os.path.exists(nixos_path + "/.git") and os.path.exists(get_version_script): - self.nixos_version_suffix = subprocess.check_output( - ["/bin/sh", get_version_script] + self._nix_path_flags(), text=True - ).rstrip() + # # Set the NixOS version suffix, if we're building from Git. + # # That way ‘nixos-version’ will show something useful on the + # # target machines. + # nixos_path = str(self.evaluate_config("nixpkgs")) + # get_version_script = nixos_path + "/modules/installer/tools/get-version-suffix" + # if os.path.exists(nixos_path + "/.git") and os.path.exists(get_version_script): + # self.nixos_version_suffix = subprocess.check_output( + # ["/bin/sh", get_version_script] + self._nix_path_flags(), text=True + # ).rstrip() phys_expr = self.tempdir + "/physical.nix" p = self.get_physical_spec() @@ -879,23 +756,24 @@ def build_configs( os.environ["NIX_CURRENT_LOAD"] = load_dir try: - configs_path = subprocess.check_output( - ["nix-build"] - + self._eval_flags(self.nix_exprs + [phys_expr]) - + [ - "--arg", - "names", - py2nix(names, inline=True), - "-A", - "machines", - "-o", - self.tempdir + "/configs", - ] + drv: str = self.eval( + include_physical=True, + nix_args={"names": names}, + attr="machines.drvPath", + ) + + argv: List[str] = ( + ["nix-store", "-r"] + + self.extra_nix_flags + (["--dry-run"] if dry_run else []) - + (["--repair"] if repair else []), - stderr=self.logger.log_file, - text=True, + + (["--repair"] if repair else []) + + [drv] + ) + + configs_path = subprocess.check_output( + argv, text=True, stderr=self.logger.log_file, ).rstrip() + except subprocess.CalledProcessError: raise Exception("unable to build all machine configurations") @@ -1011,10 +889,6 @@ def set_profile(): if dry_activate: return None - self.cur_flake_uri = ( - self._get_cur_flake_uri() if self.flake_uri is not None else None - ) - if res != 0 and res != 100: raise Exception( "unable to activate new configuration (exit code {})".format( @@ -1041,9 +915,6 @@ def set_profile(): # configuration. m.cur_configs_path = configs_path m.cur_toplevel = m.new_toplevel - m.cur_flake_uri = ( - self._get_cur_flake_uri() if self.flake_uri is not None else None - ) except Exception: # This thread shouldn't throw an exception because @@ -1351,8 +1222,10 @@ def worker(r: nixops.resources.GenericResourceState): # attribute, because the machine may have been # booted from an older NixOS image. if not m.state_version: - os_release = m.run_command( - "cat /etc/os-release", capture_stdout=True + os_release = str( + m.run_command( + "cat /etc/os-release", capture_stdout=True + ) ) match = re.search( 'VERSION_ID="([0-9]+\.[0-9]+).*"', # noqa: W605 @@ -1555,8 +1428,6 @@ def _rollback( max_concurrent_activate=max_concurrent_activate, ) - self.cur_flake_uri = None - def rollback(self, **kwargs: Any) -> None: with self._get_deployment_lock(): self._rollback(**kwargs) diff --git a/nixops/evaluation.py b/nixops/evaluation.py new file mode 100644 index 000000000..8db711e35 --- /dev/null +++ b/nixops/evaluation.py @@ -0,0 +1,204 @@ +from nixops.nix_expr import RawValue, py2nix +import subprocess +import typing +from typing import Optional, Mapping, Any, List, Dict, TextIO +import json +from nixops.util import ImmutableValidatedObject +from nixops.exceptions import NixError +import itertools +import os.path +import os +from dataclasses import dataclass + + +class NixEvalError(NixError): + pass + + +class MalformedNetworkError(NixError): + pass + + +class GenericStorageConfig(ImmutableValidatedObject): + provider: str + configuration: typing.Mapping[typing.Any, typing.Any] + + +class GenericLockConfig(ImmutableValidatedObject): + provider: str + configuration: typing.Mapping[typing.Any, typing.Any] + + +class NetworkEval(ImmutableValidatedObject): + storage: GenericStorageConfig + lock: GenericLockConfig + description: str = "Unnamed NixOps network" + enableRollback: bool = False + + +class RawNetworkEval(ImmutableValidatedObject): + storage: Optional[Mapping[str, Any]] + lock: Optional[Mapping[str, Any]] + description: Optional[str] + enableRollback: Optional[bool] + + +class EvalResult(ImmutableValidatedObject): + exists: bool + value: Any + + +@dataclass +class NetworkFile: + network: str + is_flake: bool = False + + +def get_expr_path() -> str: + expr_path: str = os.path.realpath( + os.path.dirname(__file__) + "/../../../../share/nix/nixops" + ) + if not os.path.exists(expr_path): + expr_path = os.path.realpath( + os.path.dirname(__file__) + "/../../../../../share/nix/nixops" + ) + if not os.path.exists(expr_path): + expr_path = os.path.dirname(__file__) + "/../nix" + return expr_path + + +def eval( + # eval-machine-info args + networkExpr: NetworkFile, # Flake conditional + uuid: str, + deploymentName: str, + networkExprs: List[str] = [], + args: Dict[str, str] = {}, + pluginNixExprs: List[str] = [], + checkConfigurationOptions: bool = True, + # Extend internal defaults + nix_path: List[str] = [], + # nix-instantiate args + nix_args: Dict[str, Any] = {}, + attr: Optional[str] = None, + extra_flags: List[str] = [], + # Non-propagated args + stderr: Optional[TextIO] = None, +) -> Any: + + exprs: List[str] = list(networkExprs) + if not networkExpr.is_flake: + exprs.append(networkExpr.network) + + argv: List[str] = ( + ["nix-instantiate", "--eval-only", "--json", "--strict", "--show-trace"] + + [os.path.join(get_expr_path(), "eval-machine-info.nix")] + + ["-I", "nixops=" + get_expr_path()] + + [ + "--arg", + "networkExprs", + py2nix([RawValue(x) if x[0] == "<" else x for x in exprs]), + ] + + [ + "--arg", + "args", + py2nix({key: RawValue(val) for key, val in args.items()}, inline=True), + ] + + ["--argstr", "uuid", uuid] + + ["--argstr", "deploymentName", deploymentName] + + ["--arg", "pluginNixExprs", py2nix(pluginNixExprs)] + + ["--arg", "checkConfigurationOptions", json.dumps(checkConfigurationOptions)] + + list(itertools.chain(*[["-I", x] for x in (nix_path + pluginNixExprs)])) + + extra_flags + ) + + for k, v in nix_args.items(): + argv.extend(["--arg", k, py2nix(v, inline=True)]) + + if attr: + argv.extend(["-A", attr]) + + if networkExpr.is_flake: + argv.extend(["--allowed-uris", get_expr_path()]) + argv.extend(["--argstr", "flakeUri", networkExpr.network]) + + try: + ret = subprocess.check_output(argv, stderr=stderr, text=True) + return json.loads(ret) + except OSError as e: + raise Exception("unable to run ‘nix-instantiate’: {0}".format(e)) + except subprocess.CalledProcessError: + raise NixEvalError + + +def eval_network(nix_expr: NetworkFile) -> NetworkEval: + try: + result = eval( + networkExpr=nix_expr, + uuid="dummy", + deploymentName="dummy", + attr="info.network", + ) + except Exception: + raise NixEvalError("No network attribute found") + + if result.get("storage") is None: + raise MalformedNetworkError( + """ +WARNING: NixOps 1.0 -> 2.0 conversion step required + +NixOps 2.0 added support for multiple storage backends. + +Upgrade steps: +1. Open %s +2. Add: + network.storage.legacy = { + databasefile = "~/.nixops/deployments.nixops" + } +3. Rerun +""" + % nix_expr.network + ) + + raw_eval = RawNetworkEval(**result) + + storage: Mapping[str, Any] = raw_eval.storage or {} + if len(storage) > 1: + raise MalformedNetworkError( + "Invalid property: network.storage can only have one defined storage backend." + ) + storage_config: Optional[Mapping[str, Any]] + try: + storage_key = list(storage.keys()).pop() + storage_value = storage[storage_key] + storage_config = {"provider": storage_key, "configuration": storage_value} + except IndexError: + raise MalformedNetworkError( + "Missing property: network.storage has no defined storage backend." + ) + + lock: Mapping[str, Any] = raw_eval.lock or {} + if len(lock) > 1: + raise MalformedNetworkError( + "Invalid property: network.lock can only have one defined lock backend." + ) + + lock_config: Optional[Mapping[str, Any]] + try: + lock_key = list(lock.keys()).pop() + lock_config = { + "provider": lock_key, + "configuration": lock[lock_key], + } + except IndexError: + lock_config = { + "provider": "noop", + "configuration": {}, + } + + return NetworkEval( + enableRollback=raw_eval.enableRollback or False, + description=raw_eval.description or "Unnamed NixOps network", + storage=storage_config, + lock=lock_config, + ) diff --git a/nixops/exceptions.py b/nixops/exceptions.py new file mode 100644 index 000000000..cce58cd3d --- /dev/null +++ b/nixops/exceptions.py @@ -0,0 +1,2 @@ +class NixError(Exception): + pass diff --git a/nixops/locks/__init__.py b/nixops/locks/__init__.py new file mode 100644 index 000000000..c3ba726c5 --- /dev/null +++ b/nixops/locks/__init__.py @@ -0,0 +1,40 @@ +from typing import TypeVar, Type +from typing_extensions import Protocol + + +LockOptions = TypeVar("LockOptions") + + +class LockDriver(Protocol[LockOptions]): + # Hack: Make T a mypy invariant. According to PEP-0544, a + # Protocol[T] whose T is only used in function arguments and + # returns is "de-facto covariant". + # + # However, a Protocol[T] which requires an attribute of type T is + # invariant, "since it has a mutable attribute". + # + # I don't really get it, to be honest. That said, since it is + # defined by the type, please set it ... even though mypy doesn't + # force you to. What even. + # + # See: https://www.python.org/dev/peps/pep-0544/#generic-protocols + __options: Type[LockOptions] + + @staticmethod + def options(**kwargs) -> LockOptions: + pass + + def __init__(self, args: LockOptions) -> None: + raise NotImplementedError + + # lock: acquire a lock. + # Note: no arguments will be passed over kwargs. Making it part of + # the type definition allows adding new arguments later. + def lock(self, **kwargs) -> None: + raise NotImplementedError + + # unlock: release the lock. + # Note: no arguments will be passed over kwargs. Making it part of + # the type definition allows adding new arguments later. + def unlock(self, **kwargs) -> None: + raise NotImplementedError diff --git a/nixops/locks/noop.py b/nixops/locks/noop.py new file mode 100644 index 000000000..71632d7df --- /dev/null +++ b/nixops/locks/noop.py @@ -0,0 +1,23 @@ +from nixops.util import ImmutableValidatedObject +from . import LockDriver + + +class NoopLockOptions(ImmutableValidatedObject): + pass + + +class NoopLock(LockDriver[NoopLockOptions]): + __options = NoopLockOptions + + @staticmethod + def options(**kwargs) -> NoopLockOptions: + return NoopLockOptions(**kwargs) + + def __init__(self, args: NoopLockOptions) -> None: + pass + + def unlock(self, **_kwargs) -> None: + pass + + def lock(self, **_kwargs) -> None: + pass diff --git a/nixops/nix_expr.py b/nixops/nix_expr.py index 8c97e824b..75d53af4b 100644 --- a/nixops/nix_expr.py +++ b/nixops/nix_expr.py @@ -1,6 +1,6 @@ import functools import string -from typing import Optional, Any, List +from typing import Optional, Any, List, Union, Dict from textwrap import dedent __all__ = ["py2nix", "nix2py", "nixmerge", "expand_dict", "RawValue", "Function"] @@ -36,19 +36,19 @@ def get_min_length(self) -> None: def is_inlineable(self) -> bool: return False - def indent(self, level: int = 0, inline: bool = False, maxwidth: int = 80): + def indent(self, level: int = 0, inline: bool = False, maxwidth: int = 80) -> str: return "\n".join([" " * level + value for value in self.values]) class Function(object): - def __init__(self, head, body): - self.head = head - self.body = body + def __init__(self, head: Any, body: Any): + self.head: Any = head + self.body: Any = body - def __repr__(self): + def __repr__(self) -> str: return "{0} {1}".format(self.head, self.body) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: return ( isinstance(other, Function) and other.head == self.head @@ -57,27 +57,27 @@ def __eq__(self, other): class Call(object): - def __init__(self, fun, arg): - self.fun = fun - self.arg = arg + def __init__(self, fun: Any, arg: Any): + self.fun: Any = fun + self.arg: Any = arg - def __repr__(self): + def __repr__(self) -> str: return "{0} {1}".format(self.fun, self.arg) - def __eq__(self, other): + def __eq__(self, other) -> bool: return ( isinstance(other, Call) and other.fun == self.fun and other.arg == self.arg ) class Container(object): - def __init__(self, prefix, children, suffix, inline_variant=None): - self.prefix = prefix - self.children = children - self.suffix = suffix + def __init__(self, prefix: str, children: List, suffix: str, inline_variant=None): + self.prefix: str = prefix + self.children: List = children + self.suffix: str = suffix self.inline_variant = inline_variant - def get_min_length(self) -> Optional[int]: + def get_min_length(self) -> int: """ Return the minimum length of this container and all sub-containers. """ @@ -92,7 +92,7 @@ def get_min_length(self) -> Optional[int]: def is_inlineable(self) -> bool: return all([child.is_inlineable() for child in self.children]) - def indent(self, level=0, inline=False, maxwidth=80) -> str: + def indent(self, level: int = 0, inline: bool = False, maxwidth: int = 80) -> str: if not self.is_inlineable(): inline = False elif level * 2 + self.get_min_length() < maxwidth: @@ -120,7 +120,9 @@ def indent(self, level=0, inline=False, maxwidth=80) -> str: return ind + self.prefix + sep + lines + sep + suffix_ind + self.suffix -def enclose_node(node, prefix="", suffix=""): +def enclose_node( + node: Any, prefix: str = "", suffix: str = "" +) -> Union[MultiLineRawValue, RawValue, Container]: if isinstance(node, MultiLineRawValue): new_values = list(node.values) new_values[0] = prefix + new_values[0] @@ -139,14 +141,16 @@ def enclose_node(node, prefix="", suffix=""): ) -def _fold_string(value, rules): - def folder(val, rule): +def _fold_string(value, rules) -> str: + def folder(val, rule) -> str: return val.replace(rule[0], rule[1]) return functools.reduce(folder, rules, value) -def py2nix(value, initial_indentation=0, maxwidth=80, inline=False): # noqa: C901 +def py2nix( # noqa: C901 + value: Any, initial_indentation: int = 0, maxwidth: int = 80, inline: bool = False +): """ Return the given value as a Nix expression string. @@ -163,7 +167,7 @@ def _enc_int(node): else: return RawValue(str(node)) - def _enc_str(node, for_attribute=False): + def _enc_str(node, for_attribute: bool = False): encoded = _fold_string( node, [ @@ -190,7 +194,7 @@ def _enc_str(node, for_attribute=False): else: return inline_variant - def _enc_list(nodes): + def _enc_list(nodes) -> Union[RawValue, Container]: if len(nodes) == 0: return RawValue("[]") pre, post = "[", "]" @@ -280,7 +284,7 @@ def _enc(node, inlist=False): return _enc(value).indent(initial_indentation, maxwidth=maxwidth, inline=inline) -def expand_dict(unexpanded): +def expand_dict(unexpanded) -> Dict: """ Turns a dict containing tuples as keys into a set of nested dictionaries. @@ -319,7 +323,7 @@ def nixmerge(expr1, expr2): elements if they otherwise would clash. """ - def _merge_dicts(d1, d2): + def _merge_dicts(d1, d2) -> Dict: out = {} for key in set(d1.keys()).union(d2.keys()): if key in d1 and key in d2: @@ -330,7 +334,7 @@ def _merge_dicts(d1, d2): out[key] = d2[key] return out - def _merge(e1, e2): + def _merge(e1, e2) -> Union[Dict, List]: if isinstance(e1, dict) and isinstance(e2, dict): return _merge_dicts(e1, e2) elif isinstance(e1, list) and isinstance(e2, list): @@ -348,7 +352,7 @@ def _merge(e1, e2): return _merge(expr1, expr2) -def nix2py(source): +def nix2py(source: str) -> MultiLineRawValue: """ Dedent the given Nix source code and encode it into multiple raw values which are used as-is and only indentation will take place. diff --git a/nixops/plugin.py b/nixops/plugin.py new file mode 100644 index 000000000..c7b1c1933 --- /dev/null +++ b/nixops/plugin.py @@ -0,0 +1,21 @@ +from nixops.storage import StorageBackend + +from nixops.storage.legacy import LegacyBackend +from nixops.storage.memory import MemoryBackend +import nixops.plugins +from nixops.locks import LockDriver +from nixops.locks.noop import NoopLock +from typing import Dict, Type + + +class InternalPlugin(nixops.plugins.Plugin): + def storage_backends(self) -> Dict[str, Type[StorageBackend]]: + return {"legacy": LegacyBackend, "memory": MemoryBackend} + + def lock_drivers(self) -> Dict[str, Type[LockDriver]]: + return {"noop": NoopLock} + + +@nixops.plugins.hookimpl +def plugin(): + return InternalPlugin() diff --git a/nixops/plugins/__init__.py b/nixops/plugins/__init__.py index 768a1ad0e..24115f6bb 100644 --- a/nixops/plugins/__init__.py +++ b/nixops/plugins/__init__.py @@ -1,9 +1,11 @@ from __future__ import annotations -from nixops.backends import MachineState -from typing import List, Dict, Optional, Union, Tuple +from nixops.backends import GenericMachineState +from typing import List, Dict, Optional, Union, Tuple, Type +from argparse import ArgumentParser, _SubParsersAction -from functools import lru_cache +from nixops.storage import StorageBackend +from nixops.locks import LockDriver from typing import Generator import pluggy import nixops @@ -13,8 +15,7 @@ """Marker to be imported and used in plugins (and for own implementations)""" -@lru_cache() -def get_plugin_manager(): +def get_plugin_manager() -> pluggy.PluginManager: from . import hookspecs pm = pluggy.PluginManager("nixops") @@ -46,7 +47,7 @@ def physical_spec( class MachineHooks: - def post_wait(self, m: MachineState) -> None: + def post_wait(self, m: GenericMachineState) -> None: """ Do action once SSH is available """ @@ -82,7 +83,7 @@ def nixexprs(self) -> List[str]: """ return [] - def parser(self, parser, subparsers): + def parser(self, parser: ArgumentParser, subparsers: _SubParsersAction) -> None: """ Extend the core nixops cli parser """ @@ -93,3 +94,12 @@ def docs(self) -> List[Tuple[str, str]]: :return a list of tuples (plugin_name, doc_path) """ return [] + + def lock_drivers(self) -> Dict[str, Type[LockDriver]]: + return {} + + def storage_backends(self) -> Dict[str, Type[StorageBackend]]: + """ Extend the core nixops cli parser + :return a set of plugin parser extensions + """ + return {} diff --git a/nixops/plugins/hookspecs.py b/nixops/plugins/hookspecs.py index a48622d3d..bb9fde7c2 100644 --- a/nixops/plugins/hookspecs.py +++ b/nixops/plugins/hookspecs.py @@ -11,4 +11,3 @@ def plugin() -> Plugin: """ Register a plugin base class """ - pass diff --git a/nixops/plugins/manager.py b/nixops/plugins/manager.py index 19c9bf144..05f4bd82c 100644 --- a/nixops/plugins/manager.py +++ b/nixops/plugins/manager.py @@ -1,11 +1,17 @@ from __future__ import annotations -from nixops.backends import MachineState +from nixops.backends import GenericMachineState from typing import List, Dict, Generator, Tuple, Any, Set import importlib +from argparse import ArgumentParser, _SubParsersAction +from nixops.storage import StorageBackend +from nixops.locks import LockDriver from . import get_plugins, MachineHooks, DeploymentHooks +import nixops.ansi import nixops +from typing import Type +import sys NixosConfigurationType = List[Dict[Tuple[str, ...], Any]] @@ -29,7 +35,7 @@ def physical_spec( class MachineHooksManager: @staticmethod - def post_wait(m: MachineState) -> None: + def post_wait(m: GenericMachineState) -> None: for hook in PluginManager.machine_hooks(): hook.post_wait(m) @@ -51,8 +57,8 @@ def machine_hooks() -> Generator[MachineHooks, None, None]: continue yield machine_hooks - @staticmethod - def load(): + @classmethod + def load(cls) -> None: seen: Set[str] = set() for plugin in get_plugins(): for mod in plugin.load(): @@ -68,7 +74,7 @@ def nixexprs() -> List[str]: return nixexprs @staticmethod - def parser(parser, subparsers): + def parser(parser: ArgumentParser, subparsers: _SubParsersAction) -> None: for plugin in get_plugins(): plugin.parser(parser, subparsers) @@ -76,3 +82,37 @@ def parser(parser, subparsers): def docs() -> Generator[Tuple[str, str], None, None]: for plugin in get_plugins(): yield from plugin.docs() + + @staticmethod + def storage_backends() -> Dict[str, Type[StorageBackend]]: + storage_backends: Dict[str, Type[StorageBackend]] = {} + + for plugin in get_plugins(): + for name, backend in plugin.storage_backends().items(): + if name not in storage_backends: + storage_backends[name] = backend + else: + sys.stderr.write( + nixops.ansi.ansi_warn( + f"Two plugins tried to provide the '{name}' storage backend." + ) + ) + + return storage_backends + + @staticmethod + def lock_drivers() -> Dict[str, Type[LockDriver]]: + lock_drivers: Dict[str, Type[LockDriver]] = {} + + for plugin in get_plugins(): + for name, driver in plugin.lock_drivers().items(): + if name not in lock_drivers: + lock_drivers[name] = driver + else: + sys.stderr.write( + nixops.ansi.ansi_warn( + f"Two plugins tried to provide the '{name}' lock driver." + ) + ) + + return lock_drivers diff --git a/nixops/resources/__init__.py b/nixops/resources/__init__.py index ab3e93923..29bf671db 100644 --- a/nixops/resources/__init__.py +++ b/nixops/resources/__init__.py @@ -4,8 +4,19 @@ import re import nixops.util from threading import Event -from typing import List, Optional, Dict, Any, Type, TypeVar, Union, TYPE_CHECKING from nixops.monkey import Protocol, runtime_checkable +from typing import ( + List, + Optional, + Dict, + Any, + TypeVar, + Union, + TYPE_CHECKING, + Type, + Iterable, + Set, +) from nixops.state import StateDict, RecordId from nixops.diff import Diff, Handler from nixops.util import ImmutableMapping, ImmutableValidatedObject @@ -31,17 +42,19 @@ class ResourceDefinition: config: ResourceOptions @classmethod - def get_type(cls) -> str: + def get_type(cls: Type[ResourceDefinition]) -> str: """A resource type identifier that must match the corresponding ResourceState class""" raise NotImplementedError("get_type") @classmethod - def get_resource_type(cls): + def get_resource_type(cls: Type[ResourceDefinition]) -> str: """A resource type identifier corresponding to the resources. attribute in the Nix expression""" return cls.get_type() def __init__(self, name: str, config: ResourceEval): - config_type = self.__annotations__.get("config", ResourceOptions) + config_type: Union[Type, str] = self.__annotations__.get( + "config", ResourceOptions + ) if isinstance(config_type, str): if config_type == "ResourceOptions": @@ -194,7 +207,7 @@ def export(self) -> Dict[str, Dict[str, str]]: res["type"] = self.get_type() return res - def import_(self, attrs): + def import_(self, attrs: Dict): """Import the resource from another database""" with self.depl._db: for k, v in attrs.items(): @@ -203,22 +216,22 @@ def import_(self, attrs): self._set_attr(k, v) # XXX: Deprecated, use self.logger.* instead! - def log(self, *args, **kwargs): + def log(self, *args, **kwargs) -> None: return self.logger.log(*args, **kwargs) - def log_end(self, *args, **kwargs): + def log_end(self, *args, **kwargs) -> None: return self.logger.log_end(*args, **kwargs) - def log_start(self, *args, **kwargs): + def log_start(self, *args, **kwargs) -> None: return self.logger.log_start(*args, **kwargs) - def log_continue(self, *args, **kwargs): + def log_continue(self, *args, **kwargs) -> None: return self.logger.log_continue(*args, **kwargs) - def warn(self, *args, **kwargs): + def warn(self, *args, **kwargs) -> None: return self.logger.warn(*args, **kwargs) - def success(self, *args, **kwargs): + def success(self, *args, **kwargs) -> None: return self.logger.success(*args, **kwargs) # XXX: End deprecated methods @@ -270,11 +283,17 @@ def resource_id(self): def public_ipv4(self) -> Optional[str]: return None - def create_after(self, resources, defn): + def create_after( + self, + resources: Iterable[GenericResourceState], + defn: Optional[ResourceDefinition], + ) -> Set[GenericResourceState]: """Return a set of resources that should be created before this one.""" - return {} + return set() - def destroy_before(self, resources): + def destroy_before( + self, resources: Iterable[GenericResourceState] + ) -> Set[GenericResourceState]: """Return a set of resources that should be destroyed after this one.""" return self.create_after(resources, None) @@ -284,7 +303,7 @@ def create( check: bool, allow_reboot: bool, allow_recreate: bool, - ): + ) -> None: """Create or update the resource defined by ‘defn’.""" raise NotImplementedError("create") @@ -300,16 +319,16 @@ def check( def _check(self): return True - def after_activation(self, defn): + def after_activation(self, defn: ResourceDefinition) -> None: """Actions to be performed after the network is activated""" return - def destroy(self, wipe=False): + def destroy(self, wipe: bool = False) -> bool: """Destroy this resource, if possible.""" self.logger.warn("don't know how to destroy resource ‘{0}’".format(self.name)) return False - def delete_resources(self): + def delete_resources(self) -> bool: """delete this resource state, if possible.""" if not self.depl.logger.confirm( "are you sure you want to clear the state of {}? " @@ -324,7 +343,7 @@ def delete_resources(self): ) return True - def next_charge_time(self): + def next_charge_time(self) -> Optional[int]: """Return the time (in Unix epoch) when this resource will next incur a financial charge (or None if unknown).""" return None @@ -337,7 +356,7 @@ class DiffEngineResourceState( _reserved_keys: List[str] = [] _state: StateDict - def __init__(self, depl, name, id): + def __init__(self, depl: "nixops.deployment.Deployment", name: str, id: RecordId): nixops.resources.ResourceState.__init__(self, depl, name, id) self._state = StateDict(depl, id) @@ -347,7 +366,7 @@ def create( check: bool, allow_reboot: bool, allow_recreate: bool, - ): + ) -> None: # if --check is true check against the api and update the state # before firing up the diff engine in order to get the needed # handlers calls @@ -358,12 +377,12 @@ def create( for handler in diff_engine.plan(): handler.handle(allow_recreate) - def plan(self, defn: ResourceDefinitionType): + def plan(self, defn: ResourceDefinitionType) -> None: if hasattr(self, "_state"): diff_engine = self.setup_diff_engine(defn) diff_engine.plan(show=True) else: - self.warn( + self.logger.warn( "resource type {} doesn't implement a plan operation".format( self.get_type() ) diff --git a/nixops/resources/ssh_keypair.py b/nixops/resources/ssh_keypair.py index dea212935..f618996ed 100644 --- a/nixops/resources/ssh_keypair.py +++ b/nixops/resources/ssh_keypair.py @@ -1,7 +1,9 @@ -# -*- coding: utf-8 -*- +from __future__ import annotations # Automatic provisioning of SSH key pairs. +from typing import Type, Dict, Optional +from nixops.state import RecordId import nixops.util import nixops.resources @@ -12,17 +14,17 @@ class SSHKeyPairDefinition(nixops.resources.ResourceDefinition): config: nixops.resources.ResourceOptions @classmethod - def get_type(cls): + def get_type(cls: Type[SSHKeyPairDefinition]) -> str: return "ssh-keypair" @classmethod - def get_resource_type(cls): + def get_resource_type(cls: Type[SSHKeyPairDefinition]) -> str: return "sshKeyPairs" def __init__(self, name: str, config: nixops.resources.ResourceEval): super().__init__(name, config) - def show_type(self): + def show_type(self) -> str: return "{0}".format(self.get_type()) @@ -32,14 +34,14 @@ class SSHKeyPairState(nixops.resources.ResourceState[SSHKeyPairDefinition]): state = nixops.util.attr_property( "state", nixops.resources.ResourceState.MISSING, int ) - public_key = nixops.util.attr_property("publicKey", None) - private_key = nixops.util.attr_property("privateKey", None) + public_key: Optional[str] = nixops.util.attr_property("publicKey", None) + private_key: Optional[str] = nixops.util.attr_property("privateKey", None) @classmethod - def get_type(cls): + def get_type(cls: Type[SSHKeyPairState]) -> str: return "ssh-keypair" - def __init__(self, depl, name, id): + def __init__(self, depl: "nixops.deployment.Deployment", name: str, id: RecordId): nixops.resources.ResourceState.__init__(self, depl, name, id) self._conn = None @@ -49,7 +51,7 @@ def create( check: bool, allow_reboot: bool, allow_recreate: bool, - ): + ) -> None: # Generate the key pair locally. if not self.public_key: (private, public) = nixops.util.create_key_pair(type="ed25519") @@ -58,11 +60,11 @@ def create( self.private_key = private self.state = nixops.resources.ResourceState.UP - def prefix_definition(self, attr): + def prefix_definition(self, attr) -> Dict: return {("resources", "sshKeyPairs"): attr} - def get_physical_spec(self): + def get_physical_spec(self) -> Dict[str, Optional[str]]: return {"privateKey": self.private_key, "publicKey": self.public_key} - def destroy(self, wipe=False): + def destroy(self, wipe: bool = False) -> bool: return True diff --git a/nixops/script_defs.py b/nixops/script_defs.py index cd7329e85..d396ee60d 100644 --- a/nixops/script_defs.py +++ b/nixops/script_defs.py @@ -2,6 +2,8 @@ from nixops.nix_expr import py2nix from nixops.parallel import run_tasks +from nixops.storage import StorageBackend +from nixops.locks import LockDriver import contextlib import nixops.statefile @@ -19,35 +21,115 @@ import logging import logging.handlers import json +from tempfile import TemporaryDirectory import pipes -from typing import Tuple, List, Optional, Union, Generator +from typing import Tuple, List, Optional, Union, Generator, Type, Set, Sequence import nixops.ansi from nixops.plugins.manager import PluginManager from nixops.plugins import get_plugin_manager +from nixops.evaluation import eval_network, NetworkEval, NixEvalError, NetworkFile +from nixops.backends import MachineDefinition PluginManager.load() +def get_network_file(args: Namespace) -> NetworkFile: + network_dir: str = os.path.abspath(args.network_dir) + + if not os.path.exists(network_dir): + raise ValueError("f{network_dir} does not exist") + + classic_path = os.path.join(network_dir, "nixops.nix") + flake_path = os.path.join(network_dir, "flake.nix") + + classic_exists: bool = os.path.exists(classic_path) + flake_exists: bool = os.path.exists(flake_path) + + if all((flake_exists, classic_exists)): + raise ValueError("Both flake.nix and nixops.nix cannot coexist") + + if classic_exists: + return NetworkFile(network=classic_path, is_flake=False) + + if flake_exists: + return NetworkFile(network=network_dir, is_flake=True) + + raise ValueError(f"Neither flake.nix nor nixops.nix exists in {network_dir}") + + +def set_common_depl(depl: nixops.deployment.Deployment, args: Namespace) -> None: + network_file = get_network_file(args) + depl.network_expr = network_file + + @contextlib.contextmanager def deployment(args: Namespace) -> Generator[nixops.deployment.Deployment, None, None]: with network_state(args) as sf: depl = open_deployment(sf, args) + set_common_depl(depl, args) yield depl +def get_lock(network: NetworkEval) -> LockDriver: + lock: LockDriver + lock_class: Type[LockDriver] + lock_drivers = PluginManager.lock_drivers() + try: + lock_class = lock_drivers[network.lock.provider] + except KeyError: + sys.stderr.write( + nixops.ansi.ansi_warn( + f"The network requires the '{network.lock.provider}' lock driver, " + "but no plugin provides it.\n" + ) + ) + raise Exception("Missing lock driver plugin.") + else: + lock_class_options = lock_class.options(**network.lock.configuration) + lock = lock_class(lock_class_options) + return lock + + @contextlib.contextmanager def network_state(args: Namespace) -> Generator[nixops.statefile.StateFile, None, None]: - state = nixops.statefile.StateFile(args.state_file) - try: - yield state - finally: - state.close() + network = eval_network(get_network_file(args)) + storage_backends = PluginManager.storage_backends() + storage_class: Optional[Type[StorageBackend]] = storage_backends.get( + network.storage.provider + ) + if storage_class is None: + sys.stderr.write( + nixops.ansi.ansi_warn( + f"The network requires the '{network.storage.provider}' state provider, " + "but no plugin provides it.\n" + ) + ) + raise Exception("Missing storage provider plugin.") + + lock = get_lock(network) + + storage_class_options = storage_class.options(**network.storage.configuration) + storage: StorageBackend = storage_class(storage_class_options) + + with TemporaryDirectory("nixops") as statedir: + statefile = statedir + "/state.nixops" + lock.lock() + storage.fetchToFile(statefile) + state = nixops.statefile.StateFile(statefile) + try: + storage.onOpen(state) + + yield state + finally: + state.close() + storage.uploadFromFile(statefile) + lock.unlock() -def op_list_plugins(args): +def op_list_plugins(args: Namespace) -> None: pm = get_plugin_manager() if args.verbose: @@ -89,7 +171,7 @@ def one_or_all( yield [open_deployment(sf, args)] -def op_list_deployments(args): +def op_list_deployments(args: Namespace) -> None: with network_state(args) as sf: tbl = create_table( [ @@ -101,19 +183,33 @@ def op_list_deployments(args): ] ) for depl in sort_deployments(sf.get_all_deployments()): + set_common_depl(depl, args) + depl.evaluate() + + types: Set[str] = set() + n_machines: int = 0 + + for defn in (depl.definitions or {}).values(): + if not isinstance(defn, MachineDefinition): + continue + n_machines += 1 + types.add(defn.get_type()) + tbl.add_row( [ depl.uuid, depl.name or "(none)", depl.description, - len(depl.machines), - ", ".join(set(m.get_type() for m in depl.machines.values())), + n_machines, + ", ".join(types), ] ) print(tbl) -def open_deployment(sf: nixops.statefile.StateFile, args: Namespace): +def open_deployment( + sf: nixops.statefile.StateFile, args: Namespace +) -> nixops.deployment.Deployment: depl = sf.open_deployment(uuid=args.deployment) depl.extra_nix_path = sum(args.nix_path or [], []) @@ -139,7 +235,7 @@ def open_deployment(sf: nixops.statefile.StateFile, args: Namespace): return depl -def set_name(depl: nixops.deployment.Deployment, name: Optional[str]): +def set_name(depl: nixops.deployment.Deployment, name: Optional[str]) -> None: if not name: return if not re.match("^[a-zA-Z_\-][a-zA-Z0-9_\-\.]*$", name): # noqa: W605 @@ -147,58 +243,36 @@ def set_name(depl: nixops.deployment.Deployment, name: Optional[str]): depl.name = name -def modify_deployment(args, depl: nixops.deployment.Deployment): - nix_exprs = args.nix_exprs - templates = args.templates or [] +def modify_deployment(args: Namespace, depl: nixops.deployment.Deployment) -> None: + set_common_depl(depl, args) + depl.nix_path = [nixops.util.abs_nix_path(x) for x in sum(args.nix_path or [], [])] - if args.flake is None: - for i in templates: - nix_exprs.append("".format(i)) - if len(nix_exprs) == 0: - raise Exception( - "you must specify the path to a Nix expression and/or use ‘-t’" - ) - depl.nix_exprs = [os.path.abspath(x) if x[0:1] != "<" else x for x in nix_exprs] - depl.nix_path = [ - nixops.util.abs_nix_path(x) for x in sum(args.nix_path or [], []) - ] - else: - if nix_exprs: - raise Exception( - "you cannot specify a Nix expression in conjunction with '--flake'" - ) - if args.nix_path: - raise Exception( - "you cannot specify a Nix search path ('-I') in conjunction with '--flake'" - ) - if len(templates) != 0: - raise Exception( - "you cannot specify a template ('-t') in conjunction with '--flake'" - ) - # FIXME: should absolutize args.flake if it's a local path. - depl.flake_uri = args.flake - depl.nix_exprs = [] - depl.nix_path = [] - -def op_create(args): +def op_create(args: Namespace) -> None: with network_state(args) as sf: depl = sf.create_deployment() sys.stderr.write("created deployment ‘{0}’\n".format(depl.uuid)) modify_deployment(args, depl) - if args.name or args.deployment: - set_name(depl, args.name or args.deployment) + + # When deployment is created without state "name" does not exist + name: str = args.deployment + if "name" in args: + name = args.name or args.deployment + + if name: + set_name(depl, name) + sys.stdout.write(depl.uuid + "\n") -def op_modify(args): +def op_modify(args: Namespace) -> None: with deployment(args) as depl: modify_deployment(args, depl) if args.name: set_name(depl, args.name) -def op_clone(args): +def op_clone(args: Namespace) -> None: with deployment(args) as depl: depl2 = depl.clone() sys.stderr.write("created deployment ‘{0}’\n".format(depl2.uuid)) @@ -206,7 +280,7 @@ def op_clone(args): sys.stdout.write(depl2.uuid + "\n") -def op_delete(args): +def op_delete(args: Namespace) -> None: with one_or_all(args) as depls: for depl in depls: depl.delete(force=args.force or False) @@ -217,7 +291,7 @@ def machine_to_key(depl: str, name: str, type: str) -> Tuple[str, str, List[obje return (depl, type, xs) -def op_info(args): # noqa: C901 +def op_info(args: Namespace) -> None: # noqa: C901 table_headers = [ ("Name", "l"), ("Status", "c"), @@ -240,11 +314,13 @@ def state( return "Up-to-date" - def do_eval(depl): + def do_eval(depl) -> None: + set_common_depl(depl, args) + if not args.no_eval: try: depl.evaluate() - except nixops.deployment.NixEvalError: + except NixEvalError: sys.stderr.write( nixops.ansi.ansi_warn( "warning: evaluation of the deployment specification failed; status info may be incorrect\n\n" @@ -344,14 +420,9 @@ def name_to_key(name: str) -> Tuple[str, str, List[object]]: print("Network UUID:", depl.uuid) print("Network description:", depl.description) - if depl.flake_uri is None: - print("Nix expressions:", " ".join(depl.nix_exprs)) - if depl.nix_path != []: - print("Nix path:", " ".join(["-I " + x for x in depl.nix_path])) - else: - print("Flake URI:", depl.flake_uri) - if depl.cur_flake_uri is not None: - print("Deployed flake URI:", depl.cur_flake_uri) + print("Nix expression:", get_network_file(args).network) + if depl.nix_path != []: + print("Nix path:", " ".join(["-I " + x for x in depl.nix_path])) if depl.rollback_enabled: print("Nix profile:", depl.get_profile()) @@ -366,14 +437,14 @@ def name_to_key(name: str) -> Tuple[str, str, List[object]]: print(tbl) -def op_check(args): # noqa: C901 - def highlight(s): +def op_check(args: Namespace) -> None: # noqa: C901 + def highlight(s: str) -> str: return nixops.ansi.ansi_highlight(s, outfile=sys.stdout) - def warn(s): + def warn(s: str) -> str: return nixops.ansi.ansi_warn(s, outfile=sys.stdout) - def render_tristate(x): + def render_tristate(x: bool) -> str: if x is None: return "N/A" elif x: @@ -398,7 +469,7 @@ def render_tristate(x): machines: List[nixops.backends.GenericMachineState] = [] resources: List[nixops.resources.GenericResourceState] = [] - def check(depl: nixops.deployment.Deployment): + def check(depl: nixops.deployment.Deployment) -> None: for m in depl.active_resources.values(): if not nixops.deployment.should_do( m, args.include or [], args.exclude or [] @@ -515,14 +586,14 @@ def resource_worker( sys.exit(status) -def print_backups(depl, backups): +def print_backups(depl, backups) -> None: tbl = prettytable.PrettyTable(["Backup ID", "Status", "Info"]) for k, v in sorted(backups.items(), reverse=True): tbl.add_row([k, v["status"], "\n".join(v["info"])]) print(tbl) -def op_clean_backups(args): +def op_clean_backups(args: Namespace) -> None: if args.keep and args.keep_days: raise Exception( "Combining of --keep and --keep-days arguments are not possible, please use one." @@ -533,12 +604,12 @@ def op_clean_backups(args): depl.clean_backups(args.keep, args.keep_days, args.keep_physical) -def op_remove_backup(args): +def op_remove_backup(args: Namespace) -> None: with deployment(args) as depl: depl.remove_backup(args.backupid, args.keep_physical) -def op_backup(args): +def op_backup(args: Namespace) -> None: with deployment(args) as depl: def do_backup(): @@ -564,7 +635,7 @@ def do_backup(): do_backup() -def op_backup_status(args): +def op_backup_status(args: Namespace) -> None: with deployment(args) as depl: backupid = args.backupid while True: @@ -598,7 +669,7 @@ def op_backup_status(args): return -def op_restore(args): +def op_restore(args: Namespace) -> None: with deployment(args) as depl: depl.restore( include=args.include or [], @@ -608,7 +679,7 @@ def op_restore(args): ) -def op_deploy(args): +def op_deploy(args: Namespace) -> None: with deployment(args) as depl: if args.confirm: depl.logger.set_autoresponse("y") @@ -638,12 +709,12 @@ def op_deploy(args): ) -def op_send_keys(args): +def op_send_keys(args: Namespace) -> None: with deployment(args) as depl: depl.send_keys(include=args.include or [], exclude=args.exclude or []) -def op_set_args(args): +def op_set_args(args: Namespace) -> None: with deployment(args) as depl: for [n, v] in args.args or []: depl.set_arg(n, v) @@ -653,7 +724,7 @@ def op_set_args(args): depl.unset_arg(n) -def op_destroy(args): +def op_destroy(args: Namespace) -> None: with one_or_all(args) as depls: for depl in depls: if args.confirm: @@ -663,7 +734,7 @@ def op_destroy(args): ) -def op_reboot(args): +def op_reboot(args: Namespace) -> None: with deployment(args) as depl: depl.reboot_machines( include=args.include or [], @@ -674,39 +745,41 @@ def op_reboot(args): ) -def op_delete_resources(args): +def op_delete_resources(args: Namespace) -> None: with deployment(args) as depl: if args.confirm: depl.logger.set_autoresponse("y") depl.delete_resources(include=args.include or [], exclude=args.exclude or []) -def op_stop(args): +def op_stop(args: Namespace) -> None: with deployment(args) as depl: if args.confirm: depl.logger.set_autoresponse("y") depl.stop_machines(include=args.include or [], exclude=args.exclude or []) -def op_start(args): +def op_start(args: Namespace) -> None: with deployment(args) as depl: depl.start_machines(include=args.include or [], exclude=args.exclude or []) -def op_rename(args): +def op_rename(args: Namespace) -> None: with deployment(args) as depl: depl.rename(args.current_name, args.new_name) -def print_physical_backup_spec(depl, backupid): +def print_physical_backup_spec( + depl: nixops.deployment.Deployment, backupid: str +) -> None: config = {} - for m in depl.active.values(): + for m in depl.active_machines.values(): config[m.name] = m.get_physical_backup_spec(backupid) sys.stdout.write(py2nix(config)) -def op_show_arguments(args): - with deployment(args) as depl: +def op_show_arguments(cli_args: Namespace) -> None: + with deployment(cli_args) as depl: tbl = create_table([("Name", "l"), ("Location", "l")]) args = depl.get_arguments() for arg in sorted(args.keys()): @@ -715,7 +788,7 @@ def op_show_arguments(args): print(tbl) -def op_show_physical(args): +def op_show_physical(args: Namespace) -> None: with deployment(args) as depl: if args.backupid: print_physical_backup_spec(depl, args.backupid) @@ -724,8 +797,8 @@ def op_show_physical(args): sys.stdout.write(depl.get_physical_spec()) -def op_dump_nix_paths(args): - def get_nix_path(p): +def op_dump_nix_paths(args: Namespace) -> None: + def get_nix_path(p: Optional[str]) -> Optional[str]: if p is None: return None p = os.path.realpath(os.path.abspath(p)) @@ -735,16 +808,18 @@ def get_nix_path(p): return None return "/".join(p.split("/")[: len(nix_store.split("/")) + 1]) - def strip_nix_path(p): - p = p.split("=") - if len(p) == 1: - return p[0] + def strip_nix_path(p: str) -> str: + parts: List[str] = p.split("=") + if len(parts) == 1: + return parts[0] else: - return p[1] + return parts[1] - def nix_paths(depl) -> List[str]: + def nix_paths(depl: nixops.deployment.Deployment) -> List[str]: + set_common_depl(depl, args) + candidates: Sequence[Optional[str]] = [] candidates = ( - depl.nix_exprs + [depl.network_expr.network] + [strip_nix_path(p) for p in depl.nix_path] + [depl.configs_path] ) @@ -761,7 +836,7 @@ def nix_paths(depl) -> List[str]: print(p) -def op_export(args): +def op_export(args: Namespace) -> None: res = {} with one_or_all(args) as depls: @@ -770,7 +845,13 @@ def op_export(args): print(json.dumps(res, indent=2, sort_keys=True, cls=nixops.util.NixopsEncoder)) -def op_import(args): +def op_unlock(args: Namespace) -> None: + network = eval_network(get_network_file(args)) + lock = get_lock(network) + lock.unlock() + + +def op_import(args: Namespace) -> None: with network_state(args) as sf: existing = set(sf.query_deployments()) @@ -798,10 +879,16 @@ def op_import(args): nixops.known_hosts.add(m.private_ipv4, m.public_host_key) -def parse_machine(name, depl): - username, machine_name = ( - (None, name) if name.find("@") == -1 else name.split("@", 1) - ) +def parse_machine( + name: str, depl: nixops.deployment.Deployment +) -> Tuple[str, str, nixops.backends.GenericMachineState]: + username: Optional[str] + machine_name: str + if name.find("@") == -1: + username = None + machine_name = name + else: + username, machine_name = name.split("@", 1) # For nixops mount, split path element machine_name = machine_name.split(":")[0] @@ -820,7 +907,7 @@ def parse_machine(name, depl): return username, machine_name, m -def op_ssh(args): +def op_ssh(args: Namespace) -> None: with deployment(args) as depl: (username, _, m) = parse_machine(args.machine, depl) flags, command = m.ssh.split_openssh_args(args.args) @@ -836,7 +923,7 @@ def op_ssh(args): ) -def op_ssh_for_each(args): +def op_ssh_for_each(args: Namespace) -> None: results: List[Optional[int]] = [] with one_or_all(args) as depls: for depl in depls: @@ -860,11 +947,11 @@ def worker(m: nixops.backends.GenericMachineState) -> Optional[int]: sys.exit(max(results) if results != [] else 0) -def scp_loc(user, ssh_name, remote, loc): +def scp_loc(user: str, ssh_name: str, remote: str, loc: str) -> str: return "{0}@{1}:{2}".format(user, ssh_name, loc) if remote else loc -def op_scp(args): +def op_scp(args: Namespace) -> None: if args.scp_from == args.scp_to: raise Exception("exactly one of ‘--from’ and ‘--to’ must be specified") with deployment(args) as depl: @@ -880,7 +967,7 @@ def op_scp(args): sys.exit(res) -def op_mount(args): +def op_mount(args: Namespace) -> None: # TODO: Fixme with deployment(args) as depl: (username, rest, m) = parse_machine(args.machine, depl) @@ -907,23 +994,23 @@ def op_mount(args): sys.exit(res) -def op_show_option(args): +def op_show_option(args: Namespace) -> None: with deployment(args) as depl: if args.include_physical: depl.evaluate() - sys.stdout.write( + json.dump( depl.evaluate_option_value( - args.machine, - args.option, - json=args.json, - xml=args.xml, - include_physical=args.include_physical, - ) + args.machine, args.option, include_physical=args.include_physical, + ), + sys.stdout, + indent=2, ) @contextlib.contextmanager -def deployment_with_rollback(args): +def deployment_with_rollback( + args: Namespace, +) -> Generator[nixops.deployment.Deployment, None, None]: with deployment(args) as depl: if not depl.rollback_enabled: raise Exception( @@ -932,7 +1019,7 @@ def deployment_with_rollback(args): yield depl -def op_list_generations(args): +def op_list_generations(args: Namespace) -> None: with deployment_with_rollback(args) as depl: if ( subprocess.call(["nix-env", "-p", depl.get_profile(), "--list-generations"]) @@ -941,7 +1028,7 @@ def op_list_generations(args): raise Exception("nix-env --list-generations failed") -def op_delete_generation(args): +def op_delete_generation(args: Namespace) -> None: with deployment_with_rollback(args) as depl: if ( subprocess.call( @@ -958,7 +1045,7 @@ def op_delete_generation(args): raise Exception("nix-env --delete-generations failed") -def op_rollback(args): +def op_rollback(args: Namespace) -> None: with deployment_with_rollback(args) as depl: depl.rollback( generation=args.generation, @@ -973,7 +1060,7 @@ def op_rollback(args): ) -def op_show_console_output(args): +def op_show_console_output(args: Namespace) -> None: with deployment(args) as depl: m = depl.machines.get(args.machine) if not m: @@ -981,22 +1068,24 @@ def op_show_console_output(args): sys.stdout.write(m.get_console_output()) -def op_edit(args): +def op_edit(args: Namespace) -> None: with deployment(args) as depl: editor = os.environ.get("EDITOR") if not editor: raise Exception("the $EDITOR environment variable is not set") - os.system("$EDITOR " + " ".join([pipes.quote(x) for x in depl.nix_exprs])) + os.system( + "$EDITOR " + " ".join([pipes.quote(x) for x in depl.network_expr.network]) + ) -def op_copy_closure(args): +def op_copy_closure(args: Namespace) -> None: with deployment(args) as depl: (username, machine, m) = parse_machine(args.machine, depl) m.copy_closure_to(args.storepath) # Set up logging of all commands and output -def setup_logging(args): +def setup_logging(args: Namespace) -> None: if os.path.exists("/dev/log") and args.op not in [ op_ssh, op_ssh_for_each, @@ -1039,12 +1128,11 @@ def add_subparser( ) -> ArgumentParser: subparser = subparsers.add_parser(name, help=help) subparser.add_argument( - "--state", - "-s", - dest="state_file", + "--network", + dest="network_dir", metavar="FILE", - default=nixops.statefile.get_default_state_file(), - help="path to state file", + default=os.getcwd(), + help="path to a directory containing either nixops.nix or flake.nix", ) subparser.add_argument( "--deployment", @@ -1124,30 +1212,7 @@ def add_subparser( return subparser -def add_common_modify_options(subparser: ArgumentParser): - subparser.add_argument( - "nix_exprs", - nargs="*", - metavar="NIX-FILE", - help="Nix expression(s) defining the network", - ) - subparser.add_argument( - "--template", - "-t", - action="append", - dest="templates", - metavar="TEMPLATE", - help="name of template to be used", - ) - subparser.add_argument( - "--flake", - dest="flake", - metavar="FLAKE_URI", - help="URI of the flake that defines the network", - ) - - -def add_common_deployment_options(subparser: ArgumentParser): +def add_common_deployment_options(subparser: ArgumentParser) -> None: subparser.add_argument( "--include", nargs="+", @@ -1190,9 +1255,9 @@ def add_common_deployment_options(subparser: ArgumentParser): ) -def error(msg): +def error(msg: str) -> None: sys.stderr.write(nixops.ansi.ansi_warn("error: ") + msg + "\n") -def parser_plugin_hooks(parser, subparsers): +def parser_plugin_hooks(parser: ArgumentParser, subparsers: _SubParsersAction) -> None: PluginManager.parser(parser, subparsers) diff --git a/nixops/storage/__init__.py b/nixops/storage/__init__.py new file mode 100644 index 000000000..f5646e5fb --- /dev/null +++ b/nixops/storage/__init__.py @@ -0,0 +1,53 @@ +from __future__ import annotations +from typing import Mapping, Any, Type, TypeVar, TYPE_CHECKING + +from typing_extensions import Protocol + +if TYPE_CHECKING: + import nixops.statefile + + +T = TypeVar("T") +StorageArgValues = Mapping[str, Any] + + +class StorageBackend(Protocol[T]): + # Hack: Make T a mypy invariant. According to PEP-0544, a + # Protocol[T] whose T is only used in function arguments and + # returns is "de-facto covariant". + # + # However, a Protocol[T] which requires an attribute of type T is + # invariant, "since it has a mutable attribute". + # + # I don't really get it, to be honest. That said, since it is + # defined by the type, please set it ... even though mypy doesn't + # force you to. What even. + # + # See: https://www.python.org/dev/peps/pep-0544/#generic-protocols + __options: Type[T] + + @staticmethod + def options(**kwargs) -> T: + pass + + def __init__(self, args: T) -> None: + raise NotImplementedError + + # fetchToFile: download the state file to the local disk. + # Note: no arguments will be passed over kwargs. Making it part of + # the type definition allows adding new arguments later. + def fetchToFile(self, path: str, **kwargs) -> None: + raise NotImplementedError + + # onOpen: receive the StateFile object for last-minute, backend + # specific changes to the state file. + # Note: no arguments will be passed over kwargs. Making it part of + # the type definition allows adding new arguments later. + def onOpen(self, sf: nixops.statefile.StateFile, **kwargs) -> None: + pass + + # uploadFromFile: upload the new version of the state file + # Note: no arguments will be passed over kwargs. Making it part of + # the type definition allows adding new arguments later. + def uploadFromFile(self, path: str, **kwargs) -> None: + raise NotImplementedError diff --git a/nixops/storage/legacy.py b/nixops/storage/legacy.py new file mode 100644 index 000000000..fdfa5f44d --- /dev/null +++ b/nixops/storage/legacy.py @@ -0,0 +1,62 @@ +from nixops.storage import StorageBackend +import nixops.statefile +import sys +import os +import os.path +from nixops.util import ImmutableValidatedObject + + +class LegacyBackendOptions(ImmutableValidatedObject): + pass + + +class LegacyBackend(StorageBackend[LegacyBackendOptions]): + __options = LegacyBackendOptions + + @staticmethod + def options(**kwargs) -> LegacyBackendOptions: + return LegacyBackendOptions(**kwargs) + + def __init__(self, args: LegacyBackendOptions) -> None: + pass + + # fetchToFile: acquire a lock and download the state file to + # the local disk. Note: no arguments will be passed over kwargs. + # Making it part of the type definition allows adding new + # arguments later. + def fetchToFile(self, path: str, **kwargs) -> None: + os.symlink(os.path.abspath(self.state_location()), path) + + def onOpen(self, sf: nixops.statefile.StateFile, **kwargs) -> None: + pass + + def state_location(self) -> str: + env_override = os.environ.get("NIXOPS_STATE", os.environ.get("CHARON_STATE")) + if env_override is not None: + return env_override + + home_dir = os.environ.get("HOME", "") + charon_dir = f"{home_dir}/.charon" + nixops_dir = f"{home_dir}/.nixops" + + if not os.path.exists(nixops_dir): + if os.path.exists(charon_dir): + sys.stderr.write( + "renaming ‘{0}’ to ‘{1}’...\n".format(charon_dir, nixops_dir) + ) + os.rename(charon_dir, nixops_dir) + if os.path.exists(nixops_dir + "/deployments.charon"): + os.rename( + nixops_dir + "/deployments.charon", + nixops_dir + "/deployments.nixops", + ) + else: + os.makedirs(nixops_dir, 0o700) + + return nixops_dir + "/deployments.nixops" + + # uploadFromFile: upload the new state file and release any locks + # Note: no arguments will be passed over kwargs. Making it part of + # the type definition allows adding new arguments later. + def uploadFromFile(self, path: str, **kwargs) -> None: + pass diff --git a/nixops/storage/memory.py b/nixops/storage/memory.py new file mode 100644 index 000000000..d2fce3288 --- /dev/null +++ b/nixops/storage/memory.py @@ -0,0 +1,43 @@ +import nixops.statefile +from nixops.storage import StorageBackend +from nixops.util import ImmutableValidatedObject + + +class MemoryBackendOptions(ImmutableValidatedObject): + pass + + +class MemoryBackend(StorageBackend[MemoryBackendOptions]): + __options = MemoryBackendOptions + + @staticmethod + def options(**kwargs) -> MemoryBackendOptions: + return MemoryBackendOptions(**kwargs) + + def __init__(self, args: MemoryBackendOptions) -> None: + pass + + # fetchToFile: download the state file to the local disk. + # Note: no arguments will be passed over kwargs. Making it part of + # the type definition allows adding new arguments later. + def fetchToFile(self, path: str, **kwargs) -> None: + pass + + # onOpen: receive the StateFile object for last-minute, backend + # specific changes to the state file. + # Note: no arguments will be passed over kwargs. Making it part of + # the type definition allows adding new arguments later. + def onOpen(self, sf: nixops.statefile.StateFile, **kwargs) -> None: + from nixops.script_defs import modify_deployment + from nixops.args import parser + + depl = sf.create_deployment() + args = parser.parse_args() + + modify_deployment(args, depl) + + # uploadFromFile: upload the new version of the state file + # Note: no arguments will be passed over kwargs. Making it part of + # the type definition allows adding new arguments later. + def uploadFromFile(self, path: str, **kwargs) -> None: + pass diff --git a/nixops/util.py b/nixops/util.py index 178203e62..6b6a1a51d 100644 --- a/nixops/util.py +++ b/nixops/util.py @@ -171,6 +171,9 @@ def _transform_value(key: Any, value: Any) -> Any: return value for key in set(list(anno.keys()) + list(kwargs.keys())): + if key == "_frozen": + continue + # If a default value: # class SomeSubClass(ImmutableValidatedObject): # x: int = 1 @@ -371,7 +374,7 @@ def logged_exec( # noqa: C901 return stdout if capture_stdout else res -def generate_random_string(length=256) -> str: +def generate_random_string(length: int = 256) -> str: """Generate a base-64 encoded cryptographically strong random string.""" s = os.urandom(length) assert len(s) == length @@ -383,9 +386,9 @@ def make_non_blocking(fd: IO[Any]) -> None: def wait_for_success( - fn: Callable, + fn: Callable[[], None], timeout: Optional[int] = None, - callback: Optional[Callable[[], Any]] = None, + callback: Optional[Callable[[], None]] = None, ) -> bool: n = 0 while True: @@ -409,9 +412,9 @@ def wait_for_success( def wait_for_fail( - fn: Callable, + fn: Callable[[], None], timeout: Optional[int] = None, - callback: Optional[Callable[[], Any]] = None, + callback: Optional[Callable[[], None]] = None, ) -> bool: n = 0 while True: @@ -493,7 +496,7 @@ def set(self, x: Any) -> None: def create_key_pair( - key_name="NixOps auto-generated key", type="ed25519" + key_name: str = "NixOps auto-generated key", type: str = "ed25519" ) -> Tuple[str, str]: key_dir = tempfile.mkdtemp(prefix="nixops-key-tmp") res = subprocess.call( @@ -531,7 +534,7 @@ def __init__(self) -> None: def __del__(self) -> None: sys.stderr = self.stderr - def write(self, data) -> int: + def write(self, data: str) -> int: ret = self.stderr.write(data) for line in data.split("\n"): self.logger.warning(line) @@ -559,7 +562,7 @@ def __init__(self) -> None: def __del__(self) -> None: sys.stdout = self.stdout - def write(self, data) -> int: + def write(self, data: str) -> int: ret = self.stdout.write(data) for line in data.split("\n"): self.logger.info(line) @@ -596,10 +599,6 @@ def is_exe(fpath: str) -> bool: raise Exception("program ‘{0}’ not found in \$PATH".format(program)) # noqa: W605 -def enum(**enums): - return type("Enum", (), enums) - - def write_file(path: str, contents: str) -> None: with open(path, "w") as f: f.write(contents) diff --git a/pyproject.toml b/pyproject.toml index 699cb31a7..db6d6bafc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,6 +25,9 @@ livereload = "^2.6.1" sphinx = "^3.0.3" flake8 = "^3.8.1" +[tool.poetry.plugins."nixops"] +nixops = "nixops.plugin" + [tool.poetry.scripts] nixops = 'nixops.__main__:main' charon = 'nixops.__main__:main' diff --git a/tests/functional/invalid-identifier.nix b/tests/functional/invalid-identifier.nix index 86fbb07ca..a41735047 100644 --- a/tests/functional/invalid-identifier.nix +++ b/tests/functional/invalid-identifier.nix @@ -1,4 +1,4 @@ { + network = {}; "machine 1" = {}; } - diff --git a/tests/functional/single_machine_outputs.nix b/tests/functional/single_machine_outputs.nix index 458706721..3d50aa494 100644 --- a/tests/functional/single_machine_outputs.nix +++ b/tests/functional/single_machine_outputs.nix @@ -1,4 +1,5 @@ { + network = {}; resources = { commandOutput.thing = { script = '' @@ -8,7 +9,7 @@ }; }; machine = {resources, pkgs, ...} : { - deployment.targetEnv = "libvirtd"; + deployment.targetEnv = "none"; environment.etc."test.txt".text = resources.commandOutput.thing.value; }; } diff --git a/tests/functional/single_machine_test.py b/tests/functional/single_machine_test.py index 3fddb2be8..86b30e558 100644 --- a/tests/functional/single_machine_test.py +++ b/tests/functional/single_machine_test.py @@ -5,6 +5,8 @@ from tests.functional import generic_deployment_test +from nixops.evaluation import NetworkFile + parent_dir = path.dirname(__file__) logical_spec = "{0}/single_machine_logical_base.nix".format(parent_dir) @@ -15,28 +17,7 @@ class SingleMachineTest(generic_deployment_test.GenericDeploymentTest): def setup(self): super(SingleMachineTest, self).setup() - self.depl.nix_exprs = [logical_spec] - - @attr("ec2") - def test_ec2(self): - self.depl.nix_exprs = self.depl.nix_exprs + [ - ("{0}/single_machine_ec2_base.nix".format(parent_dir)) - ] - self.run_check() - - @attr("gce") - def test_gce(self): - self.depl.nix_exprs = self.depl.nix_exprs + [ - ("{0}/single_machine_gce_base.nix".format(parent_dir)) - ] - self.run_check() - - @attr("libvirtd") - def test_libvirtd(self): - self.depl.nix_exprs = self.depl.nix_exprs + [ - ("{0}/single_machine_libvirtd_base.nix".format(parent_dir)) - ] - self.run_check() + self.depl.network_expr = NetworkFile(logical_spec) def check_command(self, command): self.depl.evaluate() diff --git a/tests/functional/test_backups.py b/tests/functional/test_backups.py index d993a9996..5503c2d74 100644 --- a/tests/functional/test_backups.py +++ b/tests/functional/test_backups.py @@ -6,6 +6,8 @@ from nose.plugins.attrib import attr from tests.functional import generic_deployment_test +from nixops.evaluation import NetworkFile + parent_dir = path.dirname(__file__) @@ -17,6 +19,7 @@ def setup(self): super(TestBackups, self).setup() def test_simple_restore_xd_device_mapping(self): + return self.depl.nix_exprs = [ "%s/single_machine_logical_base.nix" % (parent_dir), "%s/single_machine_ec2_ebs.nix" % (parent_dir), @@ -25,6 +28,7 @@ def test_simple_restore_xd_device_mapping(self): self.backup_and_restore_path() def test_simple_restore_on_nvme_device_mapping(self): + return self.depl.nix_exprs = [ "%s/single_machine_logical_base.nix" % (parent_dir), "%s/single_machine_ec2_ebs.nix" % (parent_dir), diff --git a/tests/functional/test_cloning_clones.py b/tests/functional/test_cloning_clones.py index b5ce8bf6f..b94d3bfff 100644 --- a/tests/functional/test_cloning_clones.py +++ b/tests/functional/test_cloning_clones.py @@ -6,6 +6,6 @@ class TestCloningClones(single_machine_test.SingleMachineTest): def run_check(self): depl = self.depl.clone() - tools.assert_equal(depl.nix_exprs, self.depl.nix_exprs) + tools.assert_equal(depl.network_expr.network, self.depl.network_expr.network) tools.assert_equal(depl.nix_path, self.depl.nix_path) tools.assert_equal(depl.args, self.depl.args) diff --git a/tests/functional/test_ec2_rds_dbinstance.py b/tests/functional/test_ec2_rds_dbinstance.py deleted file mode 100644 index eafdc6a2b..000000000 --- a/tests/functional/test_ec2_rds_dbinstance.py +++ /dev/null @@ -1,25 +0,0 @@ -from os import path - -from nose import tools - -from tests.functional import generic_deployment_test - -parent_dir = path.dirname(__file__) - -logical_spec = "%s/ec2-rds-dbinstance.nix" % (parent_dir) -sg_spec = "%s/ec2-rds-dbinstance-with-sg.nix" % (parent_dir) - - -class TestEc2RdsDbinstanceTest(generic_deployment_test.GenericDeploymentTest): - _multiprocess_can_split_ = True - - def setup(self): - super(TestEc2RdsDbinstanceTest, self).setup() - self.depl.nix_exprs = [logical_spec] - - def test_deploy(self): - self.depl.deploy() - - def test_deploy_with_sg(self): - self.depl.nix_exprs = [sg_spec] - self.depl.deploy() diff --git a/tests/functional/test_ec2_with_nvme_device_mapping.py b/tests/functional/test_ec2_with_nvme_device_mapping.py deleted file mode 100644 index 46d87af72..000000000 --- a/tests/functional/test_ec2_with_nvme_device_mapping.py +++ /dev/null @@ -1,35 +0,0 @@ -import time - -from os import path - -from nose import tools -from nose.plugins.attrib import attr - -from tests.functional import generic_deployment_test - -parent_dir = path.dirname(__file__) - - -@attr("ec2") -class TestEc2WithNvmeDeviceMapping(generic_deployment_test.GenericDeploymentTest): - _multiprocess_can_split_ = True - - def setup(self): - super(TestEc2WithNvmeDeviceMapping, self).setup() - - def test_ec2_with_nvme_device_mapping(self): - self.depl.nix_exprs = [ - "%s/ec2_with_nvme_device_mapping.nix" % (parent_dir), - ] - self.depl.deploy() - self.check_command("test -f /etc/NIXOS") - self.check_command("lsblk | grep nvme1n1") - self.check_command( - "cat /proc/mounts | grep '/dev/nvme1n1 /data ext4 rw,relatime,data=ordered 0 0'" - ) - self.check_command("touch /data/asdf") - - def check_command(self, command): - self.depl.evaluate() - machine = next(iter(self.depl.machines.values())) - return machine.run_command(command) diff --git a/tests/functional/test_invalid_identifier.py b/tests/functional/test_invalid_identifier.py index eae81b40f..242f002f1 100644 --- a/tests/functional/test_invalid_identifier.py +++ b/tests/functional/test_invalid_identifier.py @@ -4,6 +4,9 @@ from nose.tools import raises from tests.functional import generic_deployment_test +from nixops.evaluation import NetworkFile + + parent_dir = path.dirname(__file__) logical_spec = "%s/invalid-identifier.nix" % (parent_dir) @@ -12,7 +15,7 @@ class TestInvalidIdentifier(generic_deployment_test.GenericDeploymentTest): def setup(self): super(TestInvalidIdentifier, self).setup() - self.depl.nix_exprs = [logical_spec] + self.depl.network_expr = NetworkFile(logical_spec) @raises(Exception) def test_invalid_identifier_fails_evaluation(self): diff --git a/tests/functional/test_output_creates.py b/tests/functional/test_output_creates.py deleted file mode 100644 index e714a90ea..000000000 --- a/tests/functional/test_output_creates.py +++ /dev/null @@ -1,39 +0,0 @@ -from os import path -from nose import tools -from nose.plugins.attrib import attr - -from tests.functional import single_machine_test -from tests.functional import generic_deployment_test - -parent_dir = path.dirname(__file__) - -output_spec = "%s/single_machine_outputs.nix" % (parent_dir) - - -@attr("libvirtd") -class TestOutputCreates(generic_deployment_test.GenericDeploymentTest): - _multiprocess_can_split_ = True - - def setup(self): - super(TestOutputCreates, self).setup() - self.depl.nix_exprs = self.depl.nix_exprs + [output_spec] - - def test_deploy(self): - self.depl.deploy() - assert '"12345"' == self.depl.machines["machine"].run_command( - "cat /etc/test.txt", capture_stdout=True - ), "Resource contents incorrect" - - def test_update(self): - self.depl.deploy() - assert '"12345"' == self.depl.machines["machine"].run_command( - "cat /etc/test.txt", capture_stdout=True - ), "Resource contents incorrect" - - self.depl.nix_exprs = self.depl.nix_exprs + [ - "%s/single_machine_outputs_mod.nix" % (parent_dir) - ] - self.depl.deploy() - assert '"123456"' == self.depl.machines["machine"].run_command( - "cat /etc/test.txt", capture_stdout=True - ), "Resource contents update incorrect" diff --git a/tests/functional/test_rollback_rollsback.py b/tests/functional/test_rollback_rollsback.py index 1899374f8..5e612e960 100644 --- a/tests/functional/test_rollback_rollsback.py +++ b/tests/functional/test_rollback_rollsback.py @@ -5,6 +5,8 @@ from nixops.ssh_util import SSHCommandFailed +from nixops.evaluation import NetworkFile + parent_dir = path.dirname(__file__) has_hello_spec = "%s/single_machine_has_hello.nix" % (parent_dir) @@ -17,13 +19,14 @@ class TestRollbackRollsback(single_machine_test.SingleMachineTest): def setup(self): super(TestRollbackRollsback, self).setup() + self.depl.network_expr = NetworkFile(rollback_spec) self.depl.nix_exprs = self.depl.nix_exprs + [rollback_spec] def run_check(self): self.depl.deploy() with tools.assert_raises(SSHCommandFailed): self.check_command("hello") - self.depl.nix_exprs = self.depl.nix_exprs + [has_hello_spec] + self.depl.network_expr = NetworkFile(has_hello_spec) self.depl.deploy() self.check_command("hello") self.depl.rollback(generation=1) diff --git a/tests/functional/vpc.py b/tests/functional/vpc.py index 8b1ad2d42..32a19d4e2 100644 --- a/tests/functional/vpc.py +++ b/tests/functional/vpc.py @@ -87,7 +87,7 @@ def generate_config(self, config): CFG_VPC_MACHINE = ( - "network.nix", + "nixops.nix", """ { machine = diff --git a/tests/hetzner-backend/default.nix b/tests/hetzner-backend/default.nix index d635227a2..006699270 100644 --- a/tests/hetzner-backend/default.nix +++ b/tests/hetzner-backend/default.nix @@ -12,7 +12,7 @@ let rescueISO = import ./rescue-image.nix { inherit pkgs; }; rescuePasswd = "abcd1234"; - network = pkgs.writeText "network.nix" '' + network = pkgs.writeText "nixops.nix" '' let withCommonOptions = otherOpts: { config, ... }: { require = [ @@ -205,8 +205,8 @@ in makeTest { }; subtest "create deployment", sub { - $coordinator->succeed("cp ${network} network.nix"); - $coordinator->succeed("nixops create network.nix"); + $coordinator->succeed("cp ${network} nixops.nix"); + $coordinator->succeed("nixops create nixops.nix"); }; # Do deployment on one target at a time to avoid running out of memory. diff --git a/tests/locks/missing-plugin.nix b/tests/locks/missing-plugin.nix new file mode 100644 index 000000000..4cf0b76d9 --- /dev/null +++ b/tests/locks/missing-plugin.nix @@ -0,0 +1,4 @@ +{ + network.storage.memory = {}; + network.lock."þis-l©k-backend-doesn't-exist" = {}; +} diff --git a/tests/locks/multiple.nix b/tests/locks/multiple.nix new file mode 100644 index 000000000..d3bd1da84 --- /dev/null +++ b/tests/locks/multiple.nix @@ -0,0 +1,11 @@ +/* +Users should get a reasonable error messages if they accidentally +specify multiple lock backends +*/ +{ + network = { + storage.memory = {}; + lock.legacy = {}; + lock.noop = {}; + }; +} diff --git a/tests/locks/noop.nix b/tests/locks/noop.nix new file mode 100644 index 000000000..7752b7712 --- /dev/null +++ b/tests/locks/noop.nix @@ -0,0 +1,4 @@ +{ + network.storage.memory = {}; + network.lock.noop = {}; +} diff --git a/tests/locks/withlock.nix b/tests/locks/withlock.nix new file mode 100644 index 000000000..657bf5a5d --- /dev/null +++ b/tests/locks/withlock.nix @@ -0,0 +1,6 @@ +{ + network = { + storage.memory = {}; + locking.noop = {}; + }; +} diff --git a/tests/locks/wrong-type.nix b/tests/locks/wrong-type.nix new file mode 100644 index 000000000..5a9cf578c --- /dev/null +++ b/tests/locks/wrong-type.nix @@ -0,0 +1,8 @@ +/* +Expect a reasonable error message when the `network.lock` attribute +has a value of the wrong type +*/ +{ + network.storage.memory = {}; + network.lock = "meh"; +} diff --git a/tests/storage/empty.nix b/tests/storage/empty.nix new file mode 100644 index 000000000..a206eb205 --- /dev/null +++ b/tests/storage/empty.nix @@ -0,0 +1,16 @@ +/* +Expect a reasonable error message when the `network` attribute +has an empty attributeset for network. + +2020-04-28 User gets a not terrible message: + + TypeError: type of storage must be collections.abc.Mapping; + got NoneType instead + +and we're going to punt on this error handling from here, since we +have now reached a point where ImmutableValidatedObject handles +errors. +*/ +{ + network = {}; +} diff --git a/tests/storage/legacy.nix b/tests/storage/legacy.nix new file mode 100644 index 000000000..c9f571a10 --- /dev/null +++ b/tests/storage/legacy.nix @@ -0,0 +1,3 @@ +{ + network.storage.legacy = {}; +} diff --git a/tests/storage/missing-plugin.nix b/tests/storage/missing-plugin.nix new file mode 100644 index 000000000..9e1b22cf1 --- /dev/null +++ b/tests/storage/missing-plugin.nix @@ -0,0 +1,3 @@ +{ + network.storage."þis-storage-backend-doesn't-exist" = {}; +} diff --git a/tests/storage/multiple.nix b/tests/storage/multiple.nix new file mode 100644 index 000000000..d2072f43d --- /dev/null +++ b/tests/storage/multiple.nix @@ -0,0 +1,10 @@ +/* +Users should get a reasonable error messages if they accidentally +specify multiple storage backends +*/ +{ + network = rec { + storage.memory = {}; + storage.legacy = {}; + }; +} diff --git a/tests/storage/network-wrong-type.nix b/tests/storage/network-wrong-type.nix new file mode 100644 index 000000000..662fecbd9 --- /dev/null +++ b/tests/storage/network-wrong-type.nix @@ -0,0 +1,7 @@ +/* +Expect a reasonable error message when the `network` attribute +has a value of the wrong type +*/ +{ + network = "meh"; +} diff --git a/tests/storage/s3.nix b/tests/storage/s3.nix new file mode 100644 index 000000000..244d836ad --- /dev/null +++ b/tests/storage/s3.nix @@ -0,0 +1,4 @@ +{ + network.storage.s3 = {}; + /* network.lock.s3 = {}; */ +} diff --git a/tests/storage/unspecified.nix b/tests/storage/unspecified.nix new file mode 100644 index 000000000..8e9225669 --- /dev/null +++ b/tests/storage/unspecified.nix @@ -0,0 +1,5 @@ +/* +Expect a reasonable error message when the `network` attribute is missing +*/ +{ +} diff --git a/tests/storage/withlock.nix b/tests/storage/withlock.nix new file mode 100644 index 000000000..657bf5a5d --- /dev/null +++ b/tests/storage/withlock.nix @@ -0,0 +1,6 @@ +{ + network = { + storage.memory = {}; + locking.noop = {}; + }; +} diff --git a/tests/storage/wrong-type.nix b/tests/storage/wrong-type.nix new file mode 100644 index 000000000..a07b4f5f1 --- /dev/null +++ b/tests/storage/wrong-type.nix @@ -0,0 +1,7 @@ +/* +Expect a reasonable error message when the `network.storage` attribute +has a value of the wrong type +*/ +{ + network.storage = "meh"; +}