diff --git a/plugins/module_utils/vm.py b/plugins/module_utils/vm.py index 002dbf6f..b427a77f 100644 --- a/plugins/module_utils/vm.py +++ b/plugins/module_utils/vm.py @@ -33,6 +33,7 @@ from ..module_utils.task_tag import TaskTag from ..module_utils import errors from ..module_utils.snapshot_schedule import SnapshotSchedule +from ..module_utils.hypercore_version import HyperCoreVersion # FROM_ANSIBLE_TO_HYPERCORE_POWER_STATE and FROM_HYPERCORE_TO_ANSIBLE_POWER_STATE are mappings for how # states are stored in python/ansible and how are they stored in hypercore @@ -59,17 +60,6 @@ ) -FROM_ANSIBLE_TO_HYPERCORE_MACHINE_TYPE = { - "UEFI": "scale-8.10", - "BIOS": "scale-7.2", - "vTPM+UEFI": "scale-uefi-tpm-9.2", -} - -FROM_HYPERCORE_TO_ANSIBLE_MACHINE_TYPE = { - v: k for k, v in FROM_ANSIBLE_TO_HYPERCORE_MACHINE_TYPE.items() -} - - VM_PAYLOAD_KEYS = [ "blockDevs", "bootDevices", @@ -109,6 +99,86 @@ ) +class VmMachineType: + # In table below is left side output from 'sc vmmachinetypes show' command, + # on most recent HyperCore version. + _map_hypercore_to_ansible = { + "scale-bios-9.3": "BIOS", + "scale-7.2": "BIOS", # ? + "scale-5.4": "BIOS", # ? + "scale-8.10": "UEFI", + "scale-uefi-9.3": "UEFI", + "scale-uefi-tpm-compatible-9.3": "vTPM+UEFI-compatible", + "scale-bios-lsi-9.2": "BIOS", # ? + "scale-uefi-tpm-9.3": "vTPM+UEFI", + "scale-6.4": "BIOS", # ? + "scale-uefi-tpm-9.2": "vTPM+UEFI", + } + + # Check in GUI what exactly is sent for selected VM machine type + # when creating a new VM. + # On 9.1.14.208456 + _map_ansible_to_hypercore_91 = { + "BIOS": "scale-7.2", + "UEFI": "scale-8.10", + } + # On 9.2.13.211102 + _map_ansible_to_hypercore_92 = { + "BIOS": "scale-7.2", + "UEFI": "scale-8.10", + "vTPM+UEFI": "scale-uefi-tpm-9.2", + } + # On 9.3.1.212486 (pre-release) + _map_ansible_to_hypercore_93 = { + "BIOS": "scale-bios-9.3", + "UEFI": "scale-uefi-9.3", + "vTPM+UEFI": "scale-uefi-tpm-9.3", + "vTPM+UEFI-compatible": "scale-uefi-tpm-compatible-9.3", + } + # HyperCore machineTypeKeyword can be: "bios" "uefi" "tpm" "tpm-compatible" + _map_ansible_to_hypercore_machine_type_keyword = { + "BIOS": "bios", + "UEFI": "uefi", + "vTPM+UEFI": "tpm", + "vTPM+UEFI-compatible": "tpm-compatible", + } + + @classmethod + def from_ansible_to_hypercore( + cls, ansible_machine_type: str, hcversion: HyperCoreVersion + ) -> str: + # Empty string is returned if ansible_machine_type cannot bve used with give HyperCore version. + if not ansible_machine_type: + return "" + if hcversion.verify(">=9.3.0"): + map_ansible_to_hypercore = cls._map_ansible_to_hypercore_93 + elif hcversion.verify(">=9.2.0"): + map_ansible_to_hypercore = cls._map_ansible_to_hypercore_92 + else: + # hcversion >=9.1.0, might work with earlier version too + map_ansible_to_hypercore = cls._map_ansible_to_hypercore_91 + return map_ansible_to_hypercore.get(ansible_machine_type, "") + + @classmethod + def from_hypercore_to_ansible(cls, vm_dict: dict) -> str: + # We receive whole vm_dict as returned by HC3, + # and use machineTypeKeyword if present. + if "machineTypeKeyword" in vm_dict: + _map_hypercore_machine_type_keyword_to_ansible = { + cls._map_ansible_to_hypercore_machine_type_keyword[k]: k + for k in cls._map_ansible_to_hypercore_machine_type_keyword + } + # "machineTypeKeyword" is available in HyperCore 9.3 or later + return _map_hypercore_machine_type_keyword_to_ansible.get( + vm_dict["machineTypeKeyword"], "" + ) + return cls._map_hypercore_to_ansible.get(vm_dict["machineType"], "") + + @classmethod + def from_ansible_to_hypercore_machine_type_keyword(cls, ansible_machine_type: str): + return cls._map_ansible_to_hypercore_machine_type_keyword[ansible_machine_type] + + class VM(PayloadMapper): # Fields cloudInitData, desiredDisposition and latestTaskTag are left out and won't be transferred between # ansible and hypercore transformations @@ -225,14 +295,7 @@ def from_hypercore(cls, vm_dict, rest_client) -> Optional[VM]: snapshot_schedule = SnapshotSchedule.get_snapshot_schedule( query={"uuid": vm_dict["snapshotScheduleUUID"]}, rest_client=rest_client ) - try: - machine_type = FROM_HYPERCORE_TO_ANSIBLE_MACHINE_TYPE[ - vm_dict["machineType"] - ] - except KeyError: - raise errors.ScaleComputingError( - f"Virtual machine: {vm_dict['name']} has an invalid Machine type: {vm_dict['machineType']}." - ) + machine_type = VmMachineType.from_hypercore_to_ansible(vm_dict) return cls( uuid=vm_dict["uuid"], # No uuid when creating object from ansible node_uuid=vm_dict["nodeUUID"], # Needed in vm_node_affinity @@ -424,7 +487,7 @@ def import_vm(cls, rest_client, ansible_dict): timeout=None, ) - def to_hypercore(self): + def to_hypercore(self, hcversion: HyperCoreVersion): vm_dict = dict( name=self.name, description=self.description, @@ -444,10 +507,16 @@ def to_hypercore(self): vm_dict["state"] = FROM_ANSIBLE_TO_HYPERCORE_POWER_STATE.get( self.power_state, "unknown-power-state-sorry" ) - if self.machine_type: - vm_dict["machineType"] = FROM_ANSIBLE_TO_HYPERCORE_MACHINE_TYPE[ - self.machine_type - ] + + # if self.machine_type: + # vm_dict["machineType"] = FROM_ANSIBLE_TO_HYPERCORE_MACHINE_TYPE[ + # self.machine_type + # ] + if self.machine_type and hcversion.verify("<9.3.0"): + vm_dict["machineType"] = VmMachineType.from_ansible_to_hypercore( + self.machine_type, hcversion + ) + return vm_dict def to_ansible(self): @@ -510,7 +579,8 @@ def post_vm_payload(self, rest_client, ansible_dict): # The rest of the keys from VM_PAYLOAD_KEYS will get set properly automatically # Cloud init will be obtained through ansible_dict - If method will be reused outside of vm module, # cloud_init should be one of the ansible_dict's keys. - payload = self.to_hypercore() + hcversion = HyperCoreVersion(rest_client) + payload = self.to_hypercore(hcversion) VM._post_vm_payload_set_disks(payload, rest_client) payload["netDevs"] = [ filter_dict(nic, *nic.keys()) for nic in payload["netDevs"] @@ -521,6 +591,14 @@ def post_vm_payload(self, rest_client, ansible_dict): if cloud_init_payload is not None: dom["cloudInitData"] = cloud_init_payload options = dict(attachGuestToolsISO=payload["attachGuestToolsISO"]) + if hcversion.verify(">=9.3.0"): + machine_type_keyword = ( + VmMachineType.from_ansible_to_hypercore_machine_type_keyword( + self.machine_type + ) + ) + if machine_type_keyword: + options.update(dict(machineTypeKeyword=machine_type_keyword)) return dict(dom=dom, options=options) @staticmethod diff --git a/plugins/modules/vm.py b/plugins/modules/vm.py index 28783cdf..6c565a85 100644 --- a/plugins/modules/vm.py +++ b/plugins/modules/vm.py @@ -224,8 +224,12 @@ - Scale I(Hardware) version. - Required if creating a new VM. - Only relevant when creating the VM. This property cannot be modified. + - HyperCore needs to support requested machine_type. + BIOS and UEFI - available since 9.1. + vTPM+UEFI - available since 9.2. + vTPM+UEFI-compatible - available since 9.3. type: str - choices: [ BIOS, UEFI, vTPM+UEFI ] + choices: [ BIOS, UEFI, vTPM+UEFI, vTPM+UEFI-compatible ] version_added: 1.1.0 notes: - The C(record) return value will be changed from list (containing a single item) to dict. @@ -399,8 +403,10 @@ ManageVMParams, ManageVMDisks, ManageVMNics, + VmMachineType, ) from ..module_utils.task_tag import TaskTag +from ..module_utils.hypercore_version import HyperCoreVersion MODULE_PATH = "scale_computing.hypercore.vm" @@ -504,6 +510,19 @@ def ensure_absent(module, rest_client): return False, [], dict(), reboot +def check_params(module, rest_client): + # Check if used machine_type is available in this HC3 version. + ansible_machine_type = module.params.get("machine_type") + if ansible_machine_type: + hcversion = HyperCoreVersion(rest_client) + hypercore_machine_type = VmMachineType.from_ansible_to_hypercore( + ansible_machine_type, hcversion + ) + if not hypercore_machine_type: + msg = f"machine_type={ansible_machine_type} is not supported on HyperCore version {hcversion.version}." + module.fail_json(msg=msg) + + def main(): module = AnsibleModule( supports_check_mode=False, # False ATM @@ -656,7 +675,10 @@ def main(): snapshot_schedule=dict( type="str", ), - machine_type=dict(type="str", choices=["BIOS", "UEFI", "vTPM+UEFI"]), + machine_type=dict( + type="str", + choices=["BIOS", "UEFI", "vTPM+UEFI", "vTPM+UEFI-compatible"], + ), ), required_if=[ ( @@ -686,6 +708,7 @@ def main(): try: client = Client.get_client(module.params["cluster_instance"]) rest_client = RestClient(client) + check_params(module, rest_client) changed, record, diff, reboot = run(module, rest_client) module.exit_json(changed=changed, record=record, diff=diff, vm_rebooted=reboot) except errors.ScaleComputingError as e: diff --git a/tests/unit/plugins/conftest.py b/tests/unit/plugins/conftest.py index dbfe248b..99bf4602 100644 --- a/tests/unit/plugins/conftest.py +++ b/tests/unit/plugins/conftest.py @@ -31,6 +31,10 @@ TaskTag, ) +from ansible_collections.scale_computing.hypercore.plugins.module_utils.hypercore_version import ( + HyperCoreVersion, +) + @pytest.fixture def client(mocker): @@ -193,3 +197,10 @@ def runner(module, params=None): basic.AnsibleModule, exit_json=exit_json_mock, fail_json=fail_json_mock ) return runner + + +@pytest.fixture +def hcversion(mocker, version_str="9.2.0.112233"): + hcv = HyperCoreVersion(rest_client=None) # type: ignore + hcv._version = version_str + return hcv diff --git a/tests/unit/plugins/module_utils/test_vm.py b/tests/unit/plugins/module_utils/test_vm.py index 62e429dd..42ff18d0 100644 --- a/tests/unit/plugins/module_utils/test_vm.py +++ b/tests/unit/plugins/module_utils/test_vm.py @@ -150,7 +150,7 @@ def test_vm_from_hypercore_dict_is_none(self, rest_client): vm_from_hypercore = VM.from_hypercore(vm_dict, rest_client) assert vm == vm_from_hypercore - def test_vm_to_hypercore(self): + def test_vm_to_hypercore(self, hcversion): vm = VM( uuid=None, # No uuid when creating object from ansible name="VM-name", @@ -166,7 +166,7 @@ def test_vm_to_hypercore(self): operating_system="os_windows_server_2012", ) - assert vm.to_hypercore() == dict( + assert vm.to_hypercore(hcversion) == dict( name="VM-name", description="desc", mem=42, @@ -561,7 +561,7 @@ def test_get_specific_disk_multiple_results(self): with pytest.raises(errors.ScaleComputingError, match="Disk"): vm.get_specific_disk(disk_query) - def test_get_or_fail_when_get(self, rest_client, mocker): + def test_get_or_fail_when_get(self, rest_client, hcversion, mocker): rest_client.list_records.return_value = [ { "uuid": "7542f2gg-5f9a-51ff-8a91-8ceahgf47ghg", @@ -597,10 +597,10 @@ def test_get_or_fail_when_get(self, rest_client, mocker): hypercore_dict = rest_client.list_records.return_value[0] actual = VM.from_hypercore( vm_dict=hypercore_dict, rest_client=rest_client - ).to_hypercore() + ).to_hypercore(hcversion) results = VM.get_or_fail( query={"name": "XLAB_test_vm"}, rest_client=rest_client - )[0].to_hypercore() + )[0].to_hypercore(hcversion) assert results == actual def test_get_or_fail_when_fail(self, rest_client): @@ -611,7 +611,7 @@ def test_get_or_fail_when_fail(self, rest_client): ): VM.get_or_fail(query={"name": "XLAB-test-vm"}, rest_client=rest_client) - def test_post_vm_payload_cloud_init_absent(self, rest_client): + def test_post_vm_payload_cloud_init_absent_v92(self, rest_client): vm = VM( uuid=None, name="VM-name", @@ -628,6 +628,10 @@ def test_post_vm_payload_cloud_init_absent(self, rest_client): machine_type="BIOS", ) + rest_client.get_record.side_effect = [ + dict(icosVersion="9.2.0.12345"), + ] + post_vm_payload = vm.post_vm_payload(rest_client, {}) assert post_vm_payload == { @@ -645,7 +649,7 @@ def test_post_vm_payload_cloud_init_absent(self, rest_client): "options": {"attachGuestToolsISO": False}, } - def test_post_vm_payload_cloud_init_present(self, rest_client): + def test_post_vm_payload_cloud_init_present_v92(self, rest_client): vm = VM( uuid=None, name="VM-name", @@ -670,6 +674,10 @@ def test_post_vm_payload_cloud_init_present(self, rest_client): machine_type="BIOS", ) + rest_client.get_record.side_effect = [ + dict(icosVersion="9.2.0.12345"), + ] + post_vm_payload = vm.post_vm_payload(rest_client, ansible_dict) assert post_vm_payload == { @@ -691,6 +699,59 @@ def test_post_vm_payload_cloud_init_present(self, rest_client): "options": {"attachGuestToolsISO": False}, } + def test_post_vm_payload_cloud_init_present_v93(self, rest_client): + vm = VM( + uuid=None, + name="VM-name", + tags=["XLAB-test-tag1", "XLAB-test-tag2"], + description="desc", + memory=42, + power_state="started", + vcpu=2, + nics=[], + disks=[], + boot_devices=[], + attach_guest_tools_iso=False, + operating_system=None, + machine_type="BIOS", + ) + + ansible_dict = dict( + cloud_init=dict( + user_data="cloud_init-user-data", + meta_data="cloud_init-meta-data", + ), + machine_type="BIOS", + ) + + rest_client.get_record.side_effect = [ + dict(icosVersion="9.3.0.12345"), + ] + + post_vm_payload = vm.post_vm_payload(rest_client, ansible_dict) + + assert post_vm_payload == { + "dom": { + "blockDevs": [], + "bootDevices": [], + "cloudInitData": { + "metaData": "Y2xvdWRfaW5pdC1tZXRhLWRhdGE=", + "userData": "Y2xvdWRfaW5pdC11c2VyLWRhdGE=", + }, + "description": "desc", + #"machineType": "scale-7.2", + "mem": 42, + "name": "VM-name", + "netDevs": [], + "numVCPU": 2, + "tags": "XLAB-test-tag1,XLAB-test-tag2", + }, + "options": { + "attachGuestToolsISO": False, + "machineTypeKeyword": "bios", + }, + } + def test_post_vm_payload_set_disks(self, rest_client): vm_dict = dict( uuid="", diff --git a/tests/unit/plugins/modules/test_vm.py b/tests/unit/plugins/modules/test_vm.py index 3d0539f5..93a1cbe3 100644 --- a/tests/unit/plugins/modules/test_vm.py +++ b/tests/unit/plugins/modules/test_vm.py @@ -1110,7 +1110,7 @@ def test_ensure_present_updated_boot_order( True, ) - def test_ensure_present_create_vm_no_boot_devices_power_state_is_shutdown( + def test_ensure_present_create_vm_no_boot_devices_power_state_is_shutdown_v92( self, create_module, rest_client, task_wait, mocker ): module = create_module( @@ -1141,6 +1141,7 @@ def test_ensure_present_create_vm_no_boot_devices_power_state_is_shutdown( rest_client.get_record.side_effect = [ None, + dict(icosVersion="9.2.0.12345"), dict( uuid="id", nodeUUID="", @@ -1275,7 +1276,173 @@ def test_ensure_present_create_vm_no_boot_devices_power_state_is_shutdown( False, ) - def test_ensure_present_create_vm_boot_devices_set_power_state_is_shutdown( + def test_ensure_present_create_vm_no_boot_devices_power_state_is_shutdown_v93( + self, create_module, rest_client, task_wait, mocker + ): + module = create_module( + params=dict( + cluster_instance=dict( + host="https://0.0.0.0", + username="admin", + password="admin", + ), + vm_name="VM-name-unique", + description="desc", + memory=42, + vcpu=2, + power_state="shutdown", + state="present", + tags="", + disks=[], + nics=[], + boot_devices=[], + attach_guest_tools_iso=None, + cloud_init=dict( + user_data=None, + meta_data=None, + ), + machine_type="BIOS", + ), + ) + + rest_client.get_record.side_effect = [ + None, + dict(icosVersion="9.3.0.12345"), + dict( + uuid="id", + nodeUUID="", + name="VM-name-unique", + tags="XLAB-test-tag1,XLAB-test-tag2", + description="desc", + mem=42, + state="SHUTDOWN", + numVCPU=2, + netDevs=[], + blockDevs=[], + bootDevices=[], + attachGuestToolsISO=False, + operatingSystem=None, + affinityStrategy={ + "strictAffinity": False, + "preferredNodeUUID": "", + "backupNodeUUID": "", + }, + snapshotScheduleUUID="snapshot-id", + machineType="scale-7.2", + sourceVirDomainUUID="", + snapUUIDs=[], + ), + dict( + uuid="id", + nodeUUID="", + name="VM-name-unique", + tags="XLAB-test-tag1,XLAB-test-tag2", + description="desc", + mem=42, + state="SHUTDOWN", + numVCPU=2, + netDevs=[], + blockDevs=[], + bootDevices=[], + attachGuestToolsISO=False, + operatingSystem=None, + affinityStrategy={ + "strictAffinity": False, + "preferredNodeUUID": "", + "backupNodeUUID": "", + }, + snapshotScheduleUUID="snapshot-id", + machineType="scale-7.2", + sourceVirDomainUUID="", + snapUUIDs=[], + ), + ] + mocker.patch( + "ansible_collections.scale_computing.hypercore.plugins.module_utils.vm.Node.get_node" + ).return_value = None + mocker.patch( + "ansible_collections.scale_computing.hypercore.plugins.module_utils.vm.SnapshotSchedule.get_snapshot_schedule" + ).return_value = None + rest_client.create_record.return_value = { + "taskTag": 123, + "createdUUID": "", + } + result = vm.ensure_present(module, rest_client) + assert result == ( + True, + [ + { + "attach_guest_tools_iso": False, + "boot_devices": [], + "description": "desc", + "disks": [], + "machine_type": "BIOS", + "memory": 42, + "nics": [], + "node_affinity": { + "backup_node": { + "backplane_ip": "", + "lan_ip": "", + "node_uuid": "", + "peer_id": None, + }, + "preferred_node": { + "backplane_ip": "", + "lan_ip": "", + "node_uuid": "", + "peer_id": None, + }, + "strict_affinity": False, + }, + "operating_system": None, + "power_state": "shutdown", + "replication_source_vm_uuid": "", + "snapshot_schedule": "", + "tags": ["XLAB-test-tag1", "XLAB-test-tag2"], + "uuid": "id", + "vcpu": 2, + "vm_name": "VM-name-unique", + } + ], + { + "after": { + "attach_guest_tools_iso": False, + "boot_devices": [], + "description": "desc", + "disks": [], + "machine_type": "BIOS", + "memory": 42, + "nics": [], + "node_affinity": { + "backup_node": { + "backplane_ip": "", + "lan_ip": "", + "node_uuid": "", + "peer_id": None, + }, + "preferred_node": { + "backplane_ip": "", + "lan_ip": "", + "node_uuid": "", + "peer_id": None, + }, + "strict_affinity": False, + }, + "operating_system": None, + "power_state": "shutdown", + "replication_source_vm_uuid": "", + "snapshot_schedule": "", + "tags": ["XLAB-test-tag1", "XLAB-test-tag2"], + "uuid": "id", + "vcpu": 2, + "vm_name": "VM-name-unique", + }, + "before": None, + }, + False, + ) + + def test_ensure_present_create_vm_boot_devices_set_power_state_is_shutdown_v92( self, create_module, rest_client, task_wait, mocker ): module = create_module( @@ -1320,6 +1487,7 @@ def test_ensure_present_create_vm_boot_devices_set_power_state_is_shutdown( rest_client.get_record.side_effect = [ None, + dict(icosVersion="9.2.0.12345"), dict( uuid="id", nodeUUID="", @@ -1546,7 +1714,7 @@ def test_ensure_present_create_vm_boot_devices_set_power_state_is_shutdown( False, ) - def test_ensure_present_no_boot_devices_set_power_state_is_not_shutdown( + def test_ensure_present_no_boot_devices_set_power_state_is_not_shutdown_v92( self, create_module, rest_client, task_wait, mocker ): module = create_module( @@ -1583,6 +1751,7 @@ def test_ensure_present_no_boot_devices_set_power_state_is_not_shutdown( rest_client.get_record.side_effect = [ None, + dict(icosVersion="9.2.0.12345"), dict( uuid="id", nodeUUID="",