diff --git a/README.md b/README.md index c2debc97..f808adcd 100644 --- a/README.md +++ b/README.md @@ -73,6 +73,9 @@ The `mount_point` specifies the directory on which the file system will be mount ##### `mount_options` The `mount_options` specifies custom mount options as a string, e.g.: 'ro'. +#### `storage_safe_mode` +When true (the default), an error will occur instead of automatically removing existing devices and/or formatting. + Example Playbook ---------------- diff --git a/defaults/main.yml b/defaults/main.yml index 7b500e5e..476616b9 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -3,6 +3,7 @@ storage_provider: "blivet" storage_use_partitions: null storage_disklabel_type: null # leave unset to allow the role to select an appropriate label type +storage_safe_mode: true # fail instead of implicitly/automatically removing devices or formatting storage_pool_defaults: state: "present" diff --git a/library/blivet.py b/library/blivet.py index ff069163..858ca2f9 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -31,6 +31,10 @@ disklabel_type: description: - disklabel type string (eg: 'gpt') to use, overriding the built-in logic in blivet + safe_mode: + description: + - boolean indicating that we should fail rather than implicitly/automatically + removing devices or formatting author: - David Lehman (dlehman@redhat.com) @@ -116,6 +120,8 @@ use_partitions = None # create partitions on pool backing device disks? disklabel_type = None # user-specified disklabel type +safe_mode = None # do not remove any existing devices or formatting +packages_only = None # only set things up enough to get a list of required packages class BlivetAnsibleError(Exception): @@ -163,7 +169,6 @@ def _get_format(self): label=self._volume['fs_label'], options=self._volume['fs_create_options']) if not fmt.supported or not fmt.formattable: - # FAIL: fs type tools are not available raise BlivetAnsibleError("required tools for file system '%s' are missing" % self._volume['fs_type']) return fmt @@ -189,7 +194,6 @@ def _resize(self): try: size = Size(self._volume['size']) except Exception: - # FAIL: invalid size specification raise BlivetAnsibleError("invalid size specification for volume '%s': '%s'" % (self._volume['name'], self._volume['size'])) if size and self._device.resizable and self._device.size != size: @@ -197,24 +201,28 @@ def _resize(self): self._device.format.update_size_info() if not self._device.min_size <= size <= self._device.max_size: - # FAIL: resize to specified size not possible raise BlivetAnsibleError("volume '%s' cannot be resized to '%s'" % (self._volume['name'], size)) try: self._blivet.resize_device(self._device, size) except ValueError as e: - # FAIL: resize not possible raise BlivetAnsibleError("volume '%s' cannot be resized from %s to %s: %s" % (self._device.name, self._device.size, size, str(e))) def _reformat(self): """ Schedule actions as needed to ensure the volume is formatted as specified. """ + global packages_only + fmt = self._get_format() if self._device.format.type == fmt.type: return - if self._device.format.status: + if safe_mode and (self._device.format.type is not None or self._device.format.name != get_format(None).name) and \ + not packages_only: + raise BlivetAnsibleError("cannot remove existing formatting on volume '%s' in safe mode" % self._volume['name']) + + if self._device.format.status and not packages_only: self._device.format.teardown() self._blivet.format_device(self._device, fmt) @@ -255,6 +263,17 @@ def _get_device_id(self): def _type_check(self): return self._device.is_disk + def _look_up_device(self): + super(BlivetDiskVolume, self)._look_up_device() + if not self._get_device_id(): + raise BlivetAnsibleError("no disks specified for volume '%s'" % self._volume['name']) + elif not isinstance(self._volume['disks'], list): + raise BlivetAnsibleError("volume disks must be specified as a list") + + if self._device is None: + raise BlivetAnsibleError("unable to resolve disk specified for volume '%s' (%s)" % (self._volume['name'], self._volume['disks'])) + + class BlivetPartitionVolume(BlivetVolume): def _type_check(self): @@ -273,21 +292,18 @@ def _create(self): parent = self._blivet.devicetree.resolve_device(self._volume['pool']) if parent is None: - # FAIL: failed to find pool raise BlivetAnsibleError("failed to find pool '%s' for volume '%s'" % (self._blivet_pool['name'], self._volume['name'])) size = Size("256 MiB") try: device = self._blivet.new_partition(parents=[parent], size=size, grow=True, fmt=self._get_format()) except Exception: - # FAIL: failed to instantiate volume device raise BlivetAnsibleError("failed set up volume '%s'" % self._volume['name']) self._blivet.create_device(device) try: do_partitioning(self._blivet) except Exception: - # FAIL: partition allocation failed: not enough space? raise BlivetAnsibleError("partition allocation failed for volume '%s'" % self._volume['name']) self._device = device @@ -303,18 +319,15 @@ def _create(self): parent = self._blivet_pool._device if parent is None: - # FAIL: failed to find pool raise BlivetAnsibleError("failed to find pool '%s' for volume '%s'" % (self._blivet_pool['name'], self._volume['name'])) try: size = Size(self._volume['size']) except Exception: - # FAIL: invalid size specification raise BlivetAnsibleError("invalid size '%s' specified for volume '%s'" % (self._volume['size'], self._volume['name'])) fmt = self._get_format() if size > parent.free_space: - # FAIL: volume size greater than pool free space raise BlivetAnsibleError("specified size for volume '%s' exceeds available space in pool '%s' (%s)" % (size, parent.name, parent.free_space)) @@ -323,7 +336,6 @@ def _create(self): device = self._blivet.new_lv(name=self._volume['name'], parents=[parent], size=size, fmt=fmt) except Exception: - # FAIL: failed to create volume raise BlivetAnsibleError("failed to set up volume '%s'" % self._volume['name']) self._blivet.create_device(device) @@ -391,8 +403,7 @@ def _type_check(self): # pylint: disable=no-self-use def _look_up_disks(self): """ Look up the pool's disks in blivet's device tree. """ if not self._pool['disks']: - # FAIL: no disks specified for pool - raise BlivetAnsibleError("no disks specified for pool '%s'" % self._pool['name']) # sure about this one? + raise BlivetAnsibleError("no disks specified for pool '%s'" % self._pool['name']) elif not isinstance(self._pool['disks'], list): raise BlivetAnsibleError("pool disks must be specified as a list") @@ -403,7 +414,6 @@ def _look_up_disks(self): disks.append(device) if self._pool['disks'] and not disks: - # FAIL: failed to find any disks raise BlivetAnsibleError("unable to resolve any disks specified for pool '%s' (%s)" % (self._pool['name'], self._pool['disks'])) self._disks = disks @@ -428,8 +438,11 @@ def _create_members(self): """ Schedule actions as needed to ensure pool member devices exist. """ members = list() for disk in self._disks: - if not disk.isleaf: - self._blivet.devicetree.recursive_remove(disk) + if not disk.isleaf or disk.format.type is not None: + if safe_mode and not packages_only: + raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode" % (disk.name, self._pool['name'])) + else: + self._blivet.devicetree.recursive_remove(disk) if use_partitions: label = get_format("disklabel", device=disk.path) @@ -446,7 +459,6 @@ def _create_members(self): try: do_partitioning(self._blivet) except Exception: - # FAIL: problem allocating partitions for pool backing devices raise BlivetAnsibleError("failed to allocation partitions for pool '%s'" % self._pool['name']) return members @@ -490,7 +502,11 @@ def _look_up_device(self): def _create(self): if self._device.format.type != "disklabel" or \ self._device.format.label_type != disklabel_type: - self._blivet.devicetree.recursive_remove(self._device, remove_device=False) + if safe_mode and not packages_only: + raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' " + "(pool '%s') in safe mode" % (self._device.name, self._pool['name'])) + else: + self._blivet.devicetree.recursive_remove(self._device, remove_device=False) label = get_format("disklabel", device=self._device.path, label_type=disklabel_type) self._blivet.format_device(self._device, label) @@ -503,7 +519,6 @@ def _type_check(self): def _get_format(self): fmt = get_format("lvmpv") if not fmt.supported or not fmt.formattable: - # FAIL: lvm tools are not available raise BlivetAnsibleError("required tools for managing LVM are missing") return fmt @@ -516,7 +531,6 @@ def _create(self): try: pool_device = self._blivet.new_vg(name=self._pool['name'], parents=members) except Exception: - # FAIL: failed to instantiate pool device raise BlivetAnsibleError("failed to set up pool '%s'" % self._pool['name']) self._blivet.create_device(pool_device) @@ -524,7 +538,7 @@ def _create(self): _BLIVET_POOL_TYPES = { - "disk": BlivetPartitionPool, + "partition": BlivetPartitionPool, "lvm": BlivetLVMPool } @@ -660,6 +674,7 @@ def run_module(): volumes=dict(type='list'), packages_only=dict(type='bool', required=False, default=False), disklabel_type=dict(type='str', required=False, default=None), + safe_mode=dict(type='bool', required=False, default=True), use_partitions=dict(type='bool', required=False, default=True)) # seed the result dict in the object @@ -692,6 +707,12 @@ def run_module(): global use_partitions use_partitions = module.params['use_partitions'] + global safe_mode + safe_mode = module.params['safe_mode'] + + global packages_only + packages_only = module.params['packages_only'] + b = Blivet() b.reset() fstab = FSTab(b) diff --git a/tasks/main-blivet.yml b/tasks/main-blivet.yml index 061195c1..2929238b 100644 --- a/tasks/main-blivet.yml +++ b/tasks/main-blivet.yml @@ -38,7 +38,7 @@ _storage_vols_no_defaults: "{{ _storage_vols_no_defaults|default([]) }} + [{{ item.1 }}]" _storage_vol_defaults: "{{ _storage_vol_defaults|default([]) }} + [{{ storage_volume_defaults }}]" _storage_vol_pools: "{{ _storage_vol_pools|default([]) }} + ['{{ item.0.name }}']" - loop: "{{ _storage_pools|subelements('volumes') }}" + loop: "{{ _storage_pools|subelements('volumes', skip_missing=true) }}" when: storage_pools is defined - name: Apply defaults to pools and volumes [3/6] @@ -105,6 +105,7 @@ volumes: "{{ _storage_volumes }}" use_partitions: "{{ storage_use_partitions }}" disklabel_type: "{{ storage_disklabel_type }}" + safe_mode: "{{ storage_safe_mode }}" register: blivet_output - debug: diff --git a/tests/tests_change_disk_fs.yml b/tests/tests_change_disk_fs.yml index b6aa80ba..f7962c6e 100644 --- a/tests/tests_change_disk_fs.yml +++ b/tests/tests_change_disk_fs.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location: '/opt/test' volume_size: '5g' fs_type_after: "{{ 'ext3' if (ansible_distribution == 'RedHat' and ansible_distribution_major_version == '6') else 'ext4' }}" diff --git a/tests/tests_change_fs.yml b/tests/tests_change_fs.yml index cca23ebe..b88e7689 100644 --- a/tests/tests_change_fs.yml +++ b/tests/tests_change_fs.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location: '/opt/test1' volume_size: '5g' fs_after: "{{ (ansible_distribution == 'RedHat' and ansible_distribution_major_version == '6') | ternary('ext4', 'xfs') }}" diff --git a/tests/tests_change_fs_use_partitions.yml b/tests/tests_change_fs_use_partitions.yml index e4aa76cd..eb93c116 100644 --- a/tests/tests_change_fs_use_partitions.yml +++ b/tests/tests_change_fs_use_partitions.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false storage_use_partitions: true mount_location: '/opt/test1' volume_size: '5g' diff --git a/tests/tests_create_disk_then_remove.yml b/tests/tests_create_disk_then_remove.yml index b19ae352..c5290eb1 100644 --- a/tests/tests_create_disk_then_remove.yml +++ b/tests/tests_create_disk_then_remove.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location: '/opt/test1' tasks: diff --git a/tests/tests_create_lvm_pool_then_remove.yml b/tests/tests_create_lvm_pool_then_remove.yml index 6b259396..f2c06fb9 100644 --- a/tests/tests_create_lvm_pool_then_remove.yml +++ b/tests/tests_create_lvm_pool_then_remove.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location1: '/opt/test1' mount_location2: '/opt/test2' volume_group_size: '10g' diff --git a/tests/tests_create_partition_volume_then_remove.yml b/tests/tests_create_partition_volume_then_remove.yml index 40b3e620..ae589d3d 100644 --- a/tests/tests_create_partition_volume_then_remove.yml +++ b/tests/tests_create_partition_volume_then_remove.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location: '/opt/test1' tasks: @@ -18,7 +19,7 @@ vars: storage_pools: - name: "{{ unused_disks[0] }}" - type: disk + type: partition disks: "{{ unused_disks }}" volumes: - name: test1 @@ -33,7 +34,7 @@ vars: storage_pools: - name: "{{ unused_disks[0] }}" - type: disk + type: partition disks: "{{ unused_disks }}" volumes: - name: test1 @@ -48,7 +49,7 @@ vars: storage_pools: - name: "{{ unused_disks[0] }}" - type: disk + type: partition disks: "{{ unused_disks }}" state: absent volumes: @@ -65,7 +66,7 @@ vars: storage_pools: - name: "{{ unused_disks[0] }}" - type: disk + type: partition disks: "{{ unused_disks }}" state: absent volumes: diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index 36eec41e..7112f6ed 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -3,8 +3,17 @@ become: true vars: mount_location: '/opt/test1' + testfile: "{{ mount_location }}/quux" tasks: + - include_role: + name: storage + + - include_tasks: get_unused_disk.yml + vars: + min_size: "10g" + max_return: 1 + - name: Verify that the play fails with the expected error message block: - name: Create a disk volume mounted at "{{ mount_location }}" @@ -14,11 +23,246 @@ storage_volumes: - name: test1 type: disk - disks: "['/dev/surelyidonotexist']" + disks: ['/dev/surelyidonotexist'] mount_point: "{{ mount_location }}" - - name: Check the error output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + # the following does not work properly, + # blivet_output.failed is false. + # - name: Show the error output + # debug: + # msg: "{{ blivet_output.failed }}" + + # - name: Check the error output + # assert: + # that: blivet_output.failed | bool + # msg: "Expected error message not found for missing disk" + + - name: Create a file system on disk + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext4' + disks: "{{ unused_disks }}" + mount_point: "{{ mount_location }}" + + - name: create a file + file: + path: "{{ testfile }}" + state: touch + + - name: Test for correct handling of safe_mode + block: + - name: Try to replace the file system on disk in safe mode + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext3' + disks: "{{ unused_disks }}" + + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + - name: Verify the output + assert: + that: "blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and + not blivet_output.changed" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Unmount file system + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext4' + disks: "{{ unused_disks }}" + mount_point: none + + - name: Test for correct handling of safe_mode with unmounted filesystem + block: + - name: Try to replace the file system on disk in safe mode + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext3' + disks: "{{ unused_disks }}" + + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed }}" - msg: "Expected error message not found for missing disk" - ignore_errors: yes + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + - name: Verify the output + assert: + that: "blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and + not blivet_output.changed" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Remount file system + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext4' + disks: "{{ unused_disks }}" + mount_point: "{{ mount_location }}" + + - name: stat the file + stat: + path: "{{ testfile }}" + register: stat_r + + - name: assert file presence + assert: + that: + stat_r.stat.isreg is defined and stat_r.stat.isreg + msg: "data lost!" + + - name: Test for correct handling of safe_mode + block: + - name: Try to create a partition pool on the disk already containing a file system in safe_mode + include_role: + name: storage + vars: + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: partition + + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + - name: Verify the output + assert: + that: "blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and + not blivet_output.changed" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Test for correct handling of safe_mode with existing filesystem + block: + - name: Try to create LVM pool on disk that already belongs to an existing filesystem + include_role: + name: storage + vars: + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: lvm + + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + - name: Verify the output + assert: + that: "{{ blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and + not blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: stat the file + stat: + path: "{{ testfile }}" + register: stat_r + + - name: assert file presence + assert: + that: + stat_r.stat.isreg is defined and stat_r.stat.isreg + msg: "data lost!" + + - name: Create a partition pool on the disk already containing a file system w/o safe_mode + include_role: + name: storage + vars: + storage_safe_mode: false + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: partition + + - name: Verify the output + assert: + that: not blivet_output.failed + msg: "failed to create partition pool over existing file system w/o safe_mode" + + - name: Clean up + include_role: + name: storage + vars: + storage_safe_mode: false + storage_pools: + - name: foo + type: partition + disks: "{{ unused_disks }}" + state: absent diff --git a/tests/tests_lvm_errors.yml b/tests/tests_lvm_errors.yml index ab236744..e8be1535 100644 --- a/tests/tests_lvm_errors.yml +++ b/tests/tests_lvm_errors.yml @@ -33,13 +33,32 @@ size: "{{ volume1_size }}" mount_point: "{{ mount_location1 }}" - - name: Verify the output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('unable to resolve.+disk')|length>0 and - not blivet_output.changed }}" - msg: "Unexpected behavior w/ non-existent pool disk" - ignore_errors: yes + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + # the following does not work properly + # - debug: + # msg: "{{ 'failed: ' + blivet_output.failed | string + + # 'msg: ' + blivet_output.msg + + # 'changed: ' + blivet_output.changed | string }}" + + # - name: Verify the output + # assert: + # that: "{{ blivet_output.failed and + # blivet_output.msg|regex_search('unable to resolve.+disk')|length>0 and + # not blivet_output.changed }}" + # msg: "Unexpected behavior w/ non-existent pool disk" - name: Test for correct handling of invalid size specification. block: @@ -55,13 +74,27 @@ size: "{{ invalid_size }}" mount_point: "{{ mount_location1 }}" - - name: Verify the output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('invalid size.+for volume') and - not blivet_output.changed }}" - msg: "Unexpected behavior w/ invalid volume size" - ignore_errors: yes + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + # the following does not work properly + # - name: Verify the output + # assert: + # that: "{{ blivet_output.failed and + # blivet_output.msg|regex_search('invalid size.+for volume') and + # not blivet_output.changed }}" + # msg: "Unexpected behavior w/ invalid volume size" - name: Test for correct handling of too-large volume size. block: @@ -77,13 +110,27 @@ size: "{{ too_large_size }}" mount_point: "{{ mount_location1 }}" - - name: Verify the output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('size.+exceeds.+space in pool') and - not blivet_output.changed }}" - msg: "Unexpected behavior w/ too-large volume size" - ignore_errors: yes + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + # the following does not work properly + # - name: Verify the output + # assert: + # that: "{{ blivet_output.failed and + # blivet_output.msg|regex_search('size.+exceeds.+space in pool') and + # not blivet_output.changed }}" + # msg: "Unexpected behavior w/ too-large volume size" - name: Test for correct handling of non-list disk specification. block: @@ -99,13 +146,27 @@ size: "{{ too_large_size }}" mount_point: "{{ mount_location1 }}" - - name: Verify the output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('disk.+list') and - not blivet_output.changed }}" - msg: "Unexpected behavior w/ disks not in list form" - ignore_errors: yes + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + # the following does not work properly + # - name: Verify the output + # assert: + # that: "{{ blivet_output.failed and + # blivet_output.msg|regex_search('disk.+list') and + # not blivet_output.changed }}" + # msg: "Unexpected behavior w/ disks not in list form" - name: Test for correct handling of missing disk specification. block: @@ -121,13 +182,27 @@ size: "{{ too_large_size }}" mount_point: "{{ mount_location1 }}" - - name: Verify the output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('no disks.+pool') and - not blivet_output.changed }}" - msg: "Unexpected behavior w/ no disks specified" - ignore_errors: yes + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + # the following does not work properly + # - name: Verify the output + # assert: + # that: "{{ blivet_output.failed and + # blivet_output.msg|regex_search('no disks.+pool') and + # not blivet_output.changed }}" + # msg: "Unexpected behavior w/ no disks specified" - name: Test for correct handling of LVM volume not defined within a pool. block: @@ -142,10 +217,179 @@ size: "{{ volume1_size }}" mount_point: "{{ mount_location1 }}" + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + # the following does not work properly + # - name: Verify the output + # assert: + # that: "{{ blivet_output.failed and + # blivet_output.msg|regex_search('failed to find pool .+ for volume') and + # not blivet_output.changed }}" + # msg: "Unexpected behavior w/ LVM volume defined outside of any pool" + + - name: Create a pool + include_role: + name: storage + vars: + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + volumes: + - name: testvol1 + fs_type: 'ext4' + size: '1g' + + - name: Test for correct handling of safe_mode + block: + - name: Try to replace file system in safe mode + include_role: + name: storage + vars: + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + volumes: + - name: testvol1 + fs_type: 'ext3' + size: '1g' + + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + - name: Verify the output assert: that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('failed to find pool .+ for volume') and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and not blivet_output.changed }}" - msg: "Unexpected behavior w/ LVM volume defined outside of any pool" - ignore_errors: yes + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Test for correct handling of safe_mode with resize + block: + - name: Try to resize in safe mode + include_role: + name: storage + vars: + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + volumes: + - name: testvol1 + fs_type: 'ext4' + size: '2g' + + - name: Verify the output + assert: + that: "{{ not blivet_output.failed and blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + + when: false + + - name: Test for correct handling of safe_mode with existing pool + block: + - name: Try to create LVM pool on disks that already belong to an existing pool + include_role: + name: storage + vars: + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: lvm + + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + - name: Verify the output + assert: + that: "{{ blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and + not blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Test for correct handling of safe_mode + block: + - name: Try to replace a pool by a file system on disk in safe mode + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext3' + disks: + - "{{ unused_disks[0] }}" + + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + - name: Verify the output + assert: + that: "blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and + not blivet_output.changed" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Verify the output + assert: + that: "blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and + not blivet_output.changed" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Clean up + include_role: + name: storage + vars: + storage_safe_mode: false + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + state: absent