From ab7389d8d31ec699b0cfcd43b3d98b47bc8b2052 Mon Sep 17 00:00:00 2001 From: David Lehman Date: Wed, 25 Sep 2019 14:32:29 -0400 Subject: [PATCH 1/7] Add flag to prevent implicit removal of existing device/fs. --- README.md | 3 + defaults/main.yml | 1 + library/blivet.py | 54 +++++++----- tasks/main-blivet.yml | 1 + tests/tests_change_disk_fs.yml | 1 + tests/tests_change_fs.yml | 1 + tests/tests_change_fs_use_partitions.yml | 1 + tests/tests_create_disk_then_remove.yml | 1 + tests/tests_create_lvm_pool_then_remove.yml | 1 + ...ts_create_partition_volume_then_remove.yml | 1 + tests/tests_disk_errors.yml | 82 ++++++++++++++++++ tests/tests_lvm_errors.yml | 84 +++++++++++++++++++ 12 files changed, 211 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index c2debc97..f808adcd 100644 --- a/README.md +++ b/README.md @@ -73,6 +73,9 @@ The `mount_point` specifies the directory on which the file system will be mount ##### `mount_options` The `mount_options` specifies custom mount options as a string, e.g.: 'ro'. +#### `storage_safe_mode` +When true (the default), an error will occur instead of automatically removing existing devices and/or formatting. + Example Playbook ---------------- diff --git a/defaults/main.yml b/defaults/main.yml index 7b500e5e..476616b9 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -3,6 +3,7 @@ storage_provider: "blivet" storage_use_partitions: null storage_disklabel_type: null # leave unset to allow the role to select an appropriate label type +storage_safe_mode: true # fail instead of implicitly/automatically removing devices or formatting storage_pool_defaults: state: "present" diff --git a/library/blivet.py b/library/blivet.py index ff069163..1db95e4d 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -31,6 +31,10 @@ disklabel_type: description: - disklabel type string (eg: 'gpt') to use, overriding the built-in logic in blivet + safe_mode: + description: + - boolean indicating that we should fail rather than implicitly/automatically + removing devices or formatting author: - David Lehman (dlehman@redhat.com) @@ -116,6 +120,7 @@ use_partitions = None # create partitions on pool backing device disks? disklabel_type = None # user-specified disklabel type +safe_mode = None # do not remove any existing devices or formatting class BlivetAnsibleError(Exception): @@ -163,7 +168,6 @@ def _get_format(self): label=self._volume['fs_label'], options=self._volume['fs_create_options']) if not fmt.supported or not fmt.formattable: - # FAIL: fs type tools are not available raise BlivetAnsibleError("required tools for file system '%s' are missing" % self._volume['fs_type']) return fmt @@ -189,7 +193,6 @@ def _resize(self): try: size = Size(self._volume['size']) except Exception: - # FAIL: invalid size specification raise BlivetAnsibleError("invalid size specification for volume '%s': '%s'" % (self._volume['name'], self._volume['size'])) if size and self._device.resizable and self._device.size != size: @@ -197,13 +200,11 @@ def _resize(self): self._device.format.update_size_info() if not self._device.min_size <= size <= self._device.max_size: - # FAIL: resize to specified size not possible raise BlivetAnsibleError("volume '%s' cannot be resized to '%s'" % (self._volume['name'], size)) try: self._blivet.resize_device(self._device, size) except ValueError as e: - # FAIL: resize not possible raise BlivetAnsibleError("volume '%s' cannot be resized from %s to %s: %s" % (self._device.name, self._device.size, size, str(e))) @@ -214,6 +215,9 @@ def _reformat(self): if self._device.format.type == fmt.type: return + if safe_mode and (self._device.format.type is not None or self._device.format.name != get_format(None).name): + raise BlivetAnsibleError("cannot remove existing formatting on volume '%s' in safe mode" % self._volume['name']) + if self._device.format.status: self._device.format.teardown() self._blivet.format_device(self._device, fmt) @@ -255,6 +259,17 @@ def _get_device_id(self): def _type_check(self): return self._device.is_disk + def _look_up_device(self): + super(BlivetDiskVolume, self)._look_up_device() + if not self._get_device_id(): + raise BlivetAnsibleError("no disks specified for volume '%s'" % self._volume['name']) + elif not isinstance(self._volume['disks'], list): + raise BlivetAnsibleError("volume disks must be specified as a list") + + if self._device is None: + raise BlivetAnsibleError("unable to resolve disk specified for volume '%s' (%s)" % (self._volume['name'], self._volume['disks'])) + + class BlivetPartitionVolume(BlivetVolume): def _type_check(self): @@ -273,21 +288,18 @@ def _create(self): parent = self._blivet.devicetree.resolve_device(self._volume['pool']) if parent is None: - # FAIL: failed to find pool raise BlivetAnsibleError("failed to find pool '%s' for volume '%s'" % (self._blivet_pool['name'], self._volume['name'])) size = Size("256 MiB") try: device = self._blivet.new_partition(parents=[parent], size=size, grow=True, fmt=self._get_format()) except Exception: - # FAIL: failed to instantiate volume device raise BlivetAnsibleError("failed set up volume '%s'" % self._volume['name']) self._blivet.create_device(device) try: do_partitioning(self._blivet) except Exception: - # FAIL: partition allocation failed: not enough space? raise BlivetAnsibleError("partition allocation failed for volume '%s'" % self._volume['name']) self._device = device @@ -303,18 +315,15 @@ def _create(self): parent = self._blivet_pool._device if parent is None: - # FAIL: failed to find pool raise BlivetAnsibleError("failed to find pool '%s' for volume '%s'" % (self._blivet_pool['name'], self._volume['name'])) try: size = Size(self._volume['size']) except Exception: - # FAIL: invalid size specification raise BlivetAnsibleError("invalid size '%s' specified for volume '%s'" % (self._volume['size'], self._volume['name'])) fmt = self._get_format() if size > parent.free_space: - # FAIL: volume size greater than pool free space raise BlivetAnsibleError("specified size for volume '%s' exceeds available space in pool '%s' (%s)" % (size, parent.name, parent.free_space)) @@ -323,7 +332,6 @@ def _create(self): device = self._blivet.new_lv(name=self._volume['name'], parents=[parent], size=size, fmt=fmt) except Exception: - # FAIL: failed to create volume raise BlivetAnsibleError("failed to set up volume '%s'" % self._volume['name']) self._blivet.create_device(device) @@ -391,8 +399,7 @@ def _type_check(self): # pylint: disable=no-self-use def _look_up_disks(self): """ Look up the pool's disks in blivet's device tree. """ if not self._pool['disks']: - # FAIL: no disks specified for pool - raise BlivetAnsibleError("no disks specified for pool '%s'" % self._pool['name']) # sure about this one? + raise BlivetAnsibleError("no disks specified for pool '%s'" % self._pool['name']) elif not isinstance(self._pool['disks'], list): raise BlivetAnsibleError("pool disks must be specified as a list") @@ -403,7 +410,6 @@ def _look_up_disks(self): disks.append(device) if self._pool['disks'] and not disks: - # FAIL: failed to find any disks raise BlivetAnsibleError("unable to resolve any disks specified for pool '%s' (%s)" % (self._pool['name'], self._pool['disks'])) self._disks = disks @@ -428,8 +434,11 @@ def _create_members(self): """ Schedule actions as needed to ensure pool member devices exist. """ members = list() for disk in self._disks: - if not disk.isleaf: - self._blivet.devicetree.recursive_remove(disk) + if not disk.isleaf or disk.format.type is not None: + if safe_mode: + raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode" % (disk.name, self._pool['name'])) + else: + self._blivet.devicetree.recursive_remove(disk) if use_partitions: label = get_format("disklabel", device=disk.path) @@ -446,7 +455,6 @@ def _create_members(self): try: do_partitioning(self._blivet) except Exception: - # FAIL: problem allocating partitions for pool backing devices raise BlivetAnsibleError("failed to allocation partitions for pool '%s'" % self._pool['name']) return members @@ -490,7 +498,11 @@ def _look_up_device(self): def _create(self): if self._device.format.type != "disklabel" or \ self._device.format.label_type != disklabel_type: - self._blivet.devicetree.recursive_remove(self._device, remove_device=False) + if safe_mode: + raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' " + "(pool '%s') in safe mode" % (self._device.name, self._pool['name'])) + else: + self._blivet.devicetree.recursive_remove(self._device, remove_device=False) label = get_format("disklabel", device=self._device.path, label_type=disklabel_type) self._blivet.format_device(self._device, label) @@ -503,7 +515,6 @@ def _type_check(self): def _get_format(self): fmt = get_format("lvmpv") if not fmt.supported or not fmt.formattable: - # FAIL: lvm tools are not available raise BlivetAnsibleError("required tools for managing LVM are missing") return fmt @@ -516,7 +527,6 @@ def _create(self): try: pool_device = self._blivet.new_vg(name=self._pool['name'], parents=members) except Exception: - # FAIL: failed to instantiate pool device raise BlivetAnsibleError("failed to set up pool '%s'" % self._pool['name']) self._blivet.create_device(pool_device) @@ -660,6 +670,7 @@ def run_module(): volumes=dict(type='list'), packages_only=dict(type='bool', required=False, default=False), disklabel_type=dict(type='str', required=False, default=None), + safe_mode=dict(type='bool', required=False, default=True), use_partitions=dict(type='bool', required=False, default=True)) # seed the result dict in the object @@ -692,6 +703,9 @@ def run_module(): global use_partitions use_partitions = module.params['use_partitions'] + global safe_mode + safe_mode = module.params['safe_mode'] + b = Blivet() b.reset() fstab = FSTab(b) diff --git a/tasks/main-blivet.yml b/tasks/main-blivet.yml index 061195c1..88a5a012 100644 --- a/tasks/main-blivet.yml +++ b/tasks/main-blivet.yml @@ -105,6 +105,7 @@ volumes: "{{ _storage_volumes }}" use_partitions: "{{ storage_use_partitions }}" disklabel_type: "{{ storage_disklabel_type }}" + safe_mode: "{{ storage_safe_mode }}" register: blivet_output - debug: diff --git a/tests/tests_change_disk_fs.yml b/tests/tests_change_disk_fs.yml index b6aa80ba..f7962c6e 100644 --- a/tests/tests_change_disk_fs.yml +++ b/tests/tests_change_disk_fs.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location: '/opt/test' volume_size: '5g' fs_type_after: "{{ 'ext3' if (ansible_distribution == 'RedHat' and ansible_distribution_major_version == '6') else 'ext4' }}" diff --git a/tests/tests_change_fs.yml b/tests/tests_change_fs.yml index cca23ebe..b88e7689 100644 --- a/tests/tests_change_fs.yml +++ b/tests/tests_change_fs.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location: '/opt/test1' volume_size: '5g' fs_after: "{{ (ansible_distribution == 'RedHat' and ansible_distribution_major_version == '6') | ternary('ext4', 'xfs') }}" diff --git a/tests/tests_change_fs_use_partitions.yml b/tests/tests_change_fs_use_partitions.yml index e4aa76cd..eb93c116 100644 --- a/tests/tests_change_fs_use_partitions.yml +++ b/tests/tests_change_fs_use_partitions.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false storage_use_partitions: true mount_location: '/opt/test1' volume_size: '5g' diff --git a/tests/tests_create_disk_then_remove.yml b/tests/tests_create_disk_then_remove.yml index b19ae352..c5290eb1 100644 --- a/tests/tests_create_disk_then_remove.yml +++ b/tests/tests_create_disk_then_remove.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location: '/opt/test1' tasks: diff --git a/tests/tests_create_lvm_pool_then_remove.yml b/tests/tests_create_lvm_pool_then_remove.yml index 6b259396..f2c06fb9 100644 --- a/tests/tests_create_lvm_pool_then_remove.yml +++ b/tests/tests_create_lvm_pool_then_remove.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location1: '/opt/test1' mount_location2: '/opt/test2' volume_group_size: '10g' diff --git a/tests/tests_create_partition_volume_then_remove.yml b/tests/tests_create_partition_volume_then_remove.yml index 40b3e620..01a7db7d 100644 --- a/tests/tests_create_partition_volume_then_remove.yml +++ b/tests/tests_create_partition_volume_then_remove.yml @@ -2,6 +2,7 @@ - hosts: all become: true vars: + storage_safe_mode: false mount_location: '/opt/test1' tasks: diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index 36eec41e..06e2fb4d 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -5,6 +5,14 @@ mount_location: '/opt/test1' tasks: + - include_role: + name: storage + + - include_tasks: get_unused_disk.yml + vars: + min_size: "10g" + max_return: 1 + - name: Verify that the play fails with the expected error message block: - name: Create a disk volume mounted at "{{ mount_location }}" @@ -22,3 +30,77 @@ that: "{{ blivet_output.failed }}" msg: "Expected error message not found for missing disk" ignore_errors: yes + + - name: Test for correct handling of safe_mode + block: + - name: Create a file system on disk + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext4' + disks: "{{ unused_disks }}" + + - name: Try to replace the file system on disk in safe mode + include_role: + name: storage + vars: + storage_safe_mode: true + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext3' + disks: "{{ unused_disks }}" + + - name: Verify the output + assert: + that: "{{ blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and + not blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Try to create a partition pool on the disk already containing a file system in safe_mode + include_role: + name: storage + vars: + storage_safe_mode: true + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: partition + + - name: Verify the output + assert: + that: "{{ blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and + not blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Create a partition pool on the disk already containing a file system w/o safe_mode + include_role: + name: storage + vars: + storage_safe_mode: false + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: partition + + - name: Verify the output + assert: + that: "{{ not blivet_output.failed }}" + msg: "failed to create partition pool over existing file system using default value of safe_mode" + + - name: Clean up + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + disks: "{{ unused_disks }}" + present: false + + ignore_errors: yes diff --git a/tests/tests_lvm_errors.yml b/tests/tests_lvm_errors.yml index ab236744..adcbb905 100644 --- a/tests/tests_lvm_errors.yml +++ b/tests/tests_lvm_errors.yml @@ -149,3 +149,87 @@ not blivet_output.changed }}" msg: "Unexpected behavior w/ LVM volume defined outside of any pool" ignore_errors: yes + + - name: Test for correct handling of safe_mode + block: + - name: Create a pool + include_role: + name: storage + vars: + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + volumes: + - name: testvol1 + fs_type: 'ext4' + size: '1g' + + - name: Try to replace file system in safe mode + include_role: + name: storage + vars: + storage_safe_mode: true + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + volumes: + - name: testvol1 + fs_type: 'ext3' + size: '1g' + + - name: Verify the output + assert: + that: "{{ blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and + not blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Try to resize in safe mode + include_role: + name: storage + vars: + storage_safe_mode: true + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + volumes: + - name: testvol1 + fs_type: 'ext4' + size: '2g' + + - name: Verify the output + assert: + that: "{{ not blivet_output.failed and blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Try to create LVM pool on disks that already belong to an existing pool + include_role: + name: storage + vars: + storage_safe_mode: true + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: lvm + + - name: Verify the output + assert: + that: "{{ blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and + not blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Clean up + include_role: + name: storage + vars: + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + state: absent + + ignore_errors: yes From 4abed48393436008acf8aa0072720053236c18c2 Mon Sep 17 00:00:00 2001 From: David Lehman Date: Fri, 18 Oct 2019 17:30:34 -0400 Subject: [PATCH 2/7] Don't crash when a pool contains no volumes. --- tasks/main-blivet.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tasks/main-blivet.yml b/tasks/main-blivet.yml index 88a5a012..2929238b 100644 --- a/tasks/main-blivet.yml +++ b/tasks/main-blivet.yml @@ -38,7 +38,7 @@ _storage_vols_no_defaults: "{{ _storage_vols_no_defaults|default([]) }} + [{{ item.1 }}]" _storage_vol_defaults: "{{ _storage_vol_defaults|default([]) }} + [{{ storage_volume_defaults }}]" _storage_vol_pools: "{{ _storage_vol_pools|default([]) }} + ['{{ item.0.name }}']" - loop: "{{ _storage_pools|subelements('volumes') }}" + loop: "{{ _storage_pools|subelements('volumes', skip_missing=true) }}" when: storage_pools is defined - name: Apply defaults to pools and volumes [3/6] From b3ee952d61f530889714f7b647e8ad82f005e1ad Mon Sep 17 00:00:00 2001 From: David Lehman Date: Tue, 15 Oct 2019 07:36:15 -0400 Subject: [PATCH 3/7] Fix key for partition pool class lookup. --- library/blivet.py | 2 +- tests/tests_create_partition_volume_then_remove.yml | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/library/blivet.py b/library/blivet.py index 1db95e4d..4c980970 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -534,7 +534,7 @@ def _create(self): _BLIVET_POOL_TYPES = { - "disk": BlivetPartitionPool, + "partition": BlivetPartitionPool, "lvm": BlivetLVMPool } diff --git a/tests/tests_create_partition_volume_then_remove.yml b/tests/tests_create_partition_volume_then_remove.yml index 01a7db7d..ae589d3d 100644 --- a/tests/tests_create_partition_volume_then_remove.yml +++ b/tests/tests_create_partition_volume_then_remove.yml @@ -19,7 +19,7 @@ vars: storage_pools: - name: "{{ unused_disks[0] }}" - type: disk + type: partition disks: "{{ unused_disks }}" volumes: - name: test1 @@ -34,7 +34,7 @@ vars: storage_pools: - name: "{{ unused_disks[0] }}" - type: disk + type: partition disks: "{{ unused_disks }}" volumes: - name: test1 @@ -49,7 +49,7 @@ vars: storage_pools: - name: "{{ unused_disks[0] }}" - type: disk + type: partition disks: "{{ unused_disks }}" state: absent volumes: @@ -66,7 +66,7 @@ vars: storage_pools: - name: "{{ unused_disks[0] }}" - type: disk + type: partition disks: "{{ unused_disks }}" state: absent volumes: From b47401da41a5a727898845fcc3b1b4adadd8de3c Mon Sep 17 00:00:00 2001 From: Pavel Cahyna Date: Tue, 15 Oct 2019 13:40:01 +0200 Subject: [PATCH 4/7] Various test fixes - Use correct array syntax for an invalid disk While both before and after it should trigger an error in the role, and therefore both are valid tests, a valid array with a nonexistent disk is more likely to be what was intended, and therefore preferable. (The former variant actually checked for bad syntax, not for handling of nonexistent disks.) - Test that the role fails, not that it sets blivet_output.failed The latter would be a valid test as well, but a less important one, and currently it does not work properly. Do not use ignore_errors: yes on the test block, it makes all assert pointless. Use a rescue task instead. - Create a file in the test fs to actually detect data loss - Do not use ignore_errors: yes in a test block This makes all test asserts pointless. Instead catch the error using rescue. Actually verify that data heve not been destroyed. - Remove last instance of ignore_errors: yes and check for errors properly. Verify again that data have not been lost. Do not use a pool type of partition, seems to be unimplemented. Create one volume in the pool, the role does not cope with empty pools. Note that even with those changes the role application fails - apparently it refuses to create a VG when there is already a filesystem, even with safe mode off. - Turn off safe mode in cleanup - Adjust names of tests to match reality. --- tests/tests_disk_errors.yml | 169 ++++++++++++++++++++++++++---------- 1 file changed, 123 insertions(+), 46 deletions(-) diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index 06e2fb4d..fab86937 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -3,6 +3,7 @@ become: true vars: mount_location: '/opt/test1' + testfile: "{{ mount_location }}/quux" tasks: - include_role: @@ -22,27 +23,52 @@ storage_volumes: - name: test1 type: disk - disks: "['/dev/surelyidonotexist']" + disks: ['/dev/surelyidonotexist'] mount_point: "{{ mount_location }}" - - name: Check the error output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed }}" - msg: "Expected error message not found for missing disk" - ignore_errors: yes + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + # the following does not work properly, + # blivet_output.failed is false. + # - name: Show the error output + # debug: + # msg: "{{ blivet_output.failed }}" + + # - name: Check the error output + # assert: + # that: blivet_output.failed | bool + # msg: "Expected error message not found for missing disk" + + - name: Create a file system on disk + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext4' + disks: "{{ unused_disks }}" + mount_point: "{{ mount_location }}" + + - name: create a file + file: + path: "{{ testfile }}" + state: touch - name: Test for correct handling of safe_mode block: - - name: Create a file system on disk - include_role: - name: storage - vars: - storage_volumes: - - name: test1 - type: disk - fs_type: 'ext4' - disks: "{{ unused_disks }}" - - name: Try to replace the file system on disk in safe mode include_role: name: storage @@ -54,13 +80,40 @@ fs_type: 'ext3' disks: "{{ unused_disks }}" - - name: Verify the output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and - not blivet_output.changed }}" - msg: "Unexpected behavior w/ existing data on specified disks" + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + - name: Verify the output + assert: + that: "blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and + not blivet_output.changed" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: stat the file + stat: + path: "{{ testfile }}" + register: stat_r + - name: assert file presence + assert: + that: + stat_r.stat.isreg is defined and stat_r.stat.isreg + msg: "data lost!" + + - name: Test for correct handling of safe_mode + block: - name: Try to create a partition pool on the disk already containing a file system in safe_mode include_role: name: storage @@ -71,36 +124,60 @@ disks: "{{ unused_disks }}" type: partition - - name: Verify the output - assert: - that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and - not blivet_output.changed }}" - msg: "Unexpected behavior w/ existing data on specified disks" + - name: UNREACH + fail: + msg: "this should be unreachable" - - name: Create a partition pool on the disk already containing a file system w/o safe_mode - include_role: - name: storage + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" vars: - storage_safe_mode: false - storage_pools: - - name: foo - disks: "{{ unused_disks }}" - type: partition + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet - name: Verify the output assert: - that: "{{ not blivet_output.failed }}" - msg: "failed to create partition pool over existing file system using default value of safe_mode" + that: "blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and + not blivet_output.changed" + msg: "Unexpected behavior w/ existing data on specified disks" - - name: Clean up - include_role: - name: storage - vars: - storage_volumes: - - name: test1 - type: disk - disks: "{{ unused_disks }}" - present: false + - name: stat the file + stat: + path: "{{ testfile }}" + register: stat_r + + - name: assert file presence + assert: + that: + stat_r.stat.isreg is defined and stat_r.stat.isreg + msg: "data lost!" - ignore_errors: yes + - name: Create a partition pool on the disk already containing a file system w/o safe_mode + include_role: + name: storage + vars: + storage_safe_mode: false + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: partition + + - name: Verify the output + assert: + that: not blivet_output.failed + msg: "failed to create partition pool over existing file system using default value of safe_mode" + + - name: Clean up + include_role: + name: storage + vars: + storage_safe_mode: false + storage_volumes: + - name: test1 + type: disk + disks: "{{ unused_disks }}" + present: false From 76823731ddf65c6142a414f44b03ba53ddc9fb54 Mon Sep 17 00:00:00 2001 From: David Lehman Date: Fri, 18 Oct 2019 16:11:58 -0400 Subject: [PATCH 5/7] Don't unmount anything if only getting package list. --- library/blivet.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/library/blivet.py b/library/blivet.py index 4c980970..1b500466 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -121,6 +121,7 @@ use_partitions = None # create partitions on pool backing device disks? disklabel_type = None # user-specified disklabel type safe_mode = None # do not remove any existing devices or formatting +packages_only = None # only set things up enough to get a list of required packages class BlivetAnsibleError(Exception): @@ -211,6 +212,8 @@ def _resize(self): def _reformat(self): """ Schedule actions as needed to ensure the volume is formatted as specified. """ + global packages_only + fmt = self._get_format() if self._device.format.type == fmt.type: return @@ -218,7 +221,7 @@ def _reformat(self): if safe_mode and (self._device.format.type is not None or self._device.format.name != get_format(None).name): raise BlivetAnsibleError("cannot remove existing formatting on volume '%s' in safe mode" % self._volume['name']) - if self._device.format.status: + if self._device.format.status and not packages_only: self._device.format.teardown() self._blivet.format_device(self._device, fmt) @@ -706,6 +709,9 @@ def run_module(): global safe_mode safe_mode = module.params['safe_mode'] + global packages_only + packages_only = module.params['packages_only'] + b = Blivet() b.reset() fstab = FSTab(b) From 63ff3ba607a3ecb86de71e19f2f7b3375c512e09 Mon Sep 17 00:00:00 2001 From: Pavel Cahyna Date: Mon, 21 Oct 2019 19:13:31 +0200 Subject: [PATCH 6/7] More test fixes - Check for errors properly in LVM error test, part 1 Remove ignore_errors in non-safe-mode tests and instead catch errors. Comment out the currently broken tests. - Check safe_mode properly in the LVM tests Do not use ignore_errors, catch the error instead. - Disable the resizing test, resizing does not seem to work at all currently. - Remove the explicit setting of storage_safe_mode to true True should be the default. - Safe mode should now be the default, account for this - Check that safe mode works even when the filesystem is unmounted. - Verify that replacing a fs by a LVM pool fails in safe mode - Cleanup properly. --- tests/tests_disk_errors.yml | 99 ++++++++++++- tests/tests_lvm_errors.yml | 278 ++++++++++++++++++++++++++++-------- 2 files changed, 311 insertions(+), 66 deletions(-) diff --git a/tests/tests_disk_errors.yml b/tests/tests_disk_errors.yml index fab86937..7112f6ed 100644 --- a/tests/tests_disk_errors.yml +++ b/tests/tests_disk_errors.yml @@ -73,7 +73,6 @@ include_role: name: storage vars: - storage_safe_mode: true storage_volumes: - name: test1 type: disk @@ -101,6 +100,61 @@ not blivet_output.changed" msg: "Unexpected behavior w/ existing data on specified disks" + - name: Unmount file system + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext4' + disks: "{{ unused_disks }}" + mount_point: none + + - name: Test for correct handling of safe_mode with unmounted filesystem + block: + - name: Try to replace the file system on disk in safe mode + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext3' + disks: "{{ unused_disks }}" + + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + - name: Verify the output + assert: + that: "blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and + not blivet_output.changed" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Remount file system + include_role: + name: storage + vars: + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext4' + disks: "{{ unused_disks }}" + mount_point: "{{ mount_location }}" + - name: stat the file stat: path: "{{ testfile }}" @@ -118,7 +172,6 @@ include_role: name: storage vars: - storage_safe_mode: true storage_pools: - name: foo disks: "{{ unused_disks }}" @@ -145,6 +198,38 @@ not blivet_output.changed" msg: "Unexpected behavior w/ existing data on specified disks" + - name: Test for correct handling of safe_mode with existing filesystem + block: + - name: Try to create LVM pool on disk that already belongs to an existing filesystem + include_role: + name: storage + vars: + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + type: lvm + + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + - name: Verify the output + assert: + that: "{{ blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting and/or devices on disk.*in safe mode') and + not blivet_output.changed }}" + msg: "Unexpected behavior w/ existing data on specified disks" + - name: stat the file stat: path: "{{ testfile }}" @@ -169,15 +254,15 @@ - name: Verify the output assert: that: not blivet_output.failed - msg: "failed to create partition pool over existing file system using default value of safe_mode" + msg: "failed to create partition pool over existing file system w/o safe_mode" - name: Clean up include_role: name: storage vars: storage_safe_mode: false - storage_volumes: - - name: test1 - type: disk + storage_pools: + - name: foo + type: partition disks: "{{ unused_disks }}" - present: false + state: absent diff --git a/tests/tests_lvm_errors.yml b/tests/tests_lvm_errors.yml index adcbb905..e8be1535 100644 --- a/tests/tests_lvm_errors.yml +++ b/tests/tests_lvm_errors.yml @@ -33,13 +33,32 @@ size: "{{ volume1_size }}" mount_point: "{{ mount_location1 }}" - - name: Verify the output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('unable to resolve.+disk')|length>0 and - not blivet_output.changed }}" - msg: "Unexpected behavior w/ non-existent pool disk" - ignore_errors: yes + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + # the following does not work properly + # - debug: + # msg: "{{ 'failed: ' + blivet_output.failed | string + + # 'msg: ' + blivet_output.msg + + # 'changed: ' + blivet_output.changed | string }}" + + # - name: Verify the output + # assert: + # that: "{{ blivet_output.failed and + # blivet_output.msg|regex_search('unable to resolve.+disk')|length>0 and + # not blivet_output.changed }}" + # msg: "Unexpected behavior w/ non-existent pool disk" - name: Test for correct handling of invalid size specification. block: @@ -55,13 +74,27 @@ size: "{{ invalid_size }}" mount_point: "{{ mount_location1 }}" - - name: Verify the output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('invalid size.+for volume') and - not blivet_output.changed }}" - msg: "Unexpected behavior w/ invalid volume size" - ignore_errors: yes + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + # the following does not work properly + # - name: Verify the output + # assert: + # that: "{{ blivet_output.failed and + # blivet_output.msg|regex_search('invalid size.+for volume') and + # not blivet_output.changed }}" + # msg: "Unexpected behavior w/ invalid volume size" - name: Test for correct handling of too-large volume size. block: @@ -77,13 +110,27 @@ size: "{{ too_large_size }}" mount_point: "{{ mount_location1 }}" - - name: Verify the output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('size.+exceeds.+space in pool') and - not blivet_output.changed }}" - msg: "Unexpected behavior w/ too-large volume size" - ignore_errors: yes + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + # the following does not work properly + # - name: Verify the output + # assert: + # that: "{{ blivet_output.failed and + # blivet_output.msg|regex_search('size.+exceeds.+space in pool') and + # not blivet_output.changed }}" + # msg: "Unexpected behavior w/ too-large volume size" - name: Test for correct handling of non-list disk specification. block: @@ -99,13 +146,27 @@ size: "{{ too_large_size }}" mount_point: "{{ mount_location1 }}" - - name: Verify the output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('disk.+list') and - not blivet_output.changed }}" - msg: "Unexpected behavior w/ disks not in list form" - ignore_errors: yes + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + # the following does not work properly + # - name: Verify the output + # assert: + # that: "{{ blivet_output.failed and + # blivet_output.msg|regex_search('disk.+list') and + # not blivet_output.changed }}" + # msg: "Unexpected behavior w/ disks not in list form" - name: Test for correct handling of missing disk specification. block: @@ -121,13 +182,27 @@ size: "{{ too_large_size }}" mount_point: "{{ mount_location1 }}" - - name: Verify the output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('no disks.+pool') and - not blivet_output.changed }}" - msg: "Unexpected behavior w/ no disks specified" - ignore_errors: yes + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + # the following does not work properly + # - name: Verify the output + # assert: + # that: "{{ blivet_output.failed and + # blivet_output.msg|regex_search('no disks.+pool') and + # not blivet_output.changed }}" + # msg: "Unexpected behavior w/ no disks specified" - name: Test for correct handling of LVM volume not defined within a pool. block: @@ -142,34 +217,47 @@ size: "{{ volume1_size }}" mount_point: "{{ mount_location1 }}" - - name: Verify the output + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role assert: - that: "{{ blivet_output.failed and - blivet_output.msg|regex_search('failed to find pool .+ for volume') and - not blivet_output.changed }}" - msg: "Unexpected behavior w/ LVM volume defined outside of any pool" - ignore_errors: yes + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + + # the following does not work properly + # - name: Verify the output + # assert: + # that: "{{ blivet_output.failed and + # blivet_output.msg|regex_search('failed to find pool .+ for volume') and + # not blivet_output.changed }}" + # msg: "Unexpected behavior w/ LVM volume defined outside of any pool" + + - name: Create a pool + include_role: + name: storage + vars: + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + volumes: + - name: testvol1 + fs_type: 'ext4' + size: '1g' - name: Test for correct handling of safe_mode block: - - name: Create a pool - include_role: - name: storage - vars: - storage_pools: - - name: testpool1 - type: lvm - disks: "{{ unused_disks }}" - volumes: - - name: testvol1 - fs_type: 'ext4' - size: '1g' - - name: Try to replace file system in safe mode include_role: name: storage vars: - storage_safe_mode: true storage_pools: - name: testpool1 type: lvm @@ -179,6 +267,20 @@ fs_type: 'ext3' size: '1g' + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + - name: Verify the output assert: that: "{{ blivet_output.failed and @@ -186,11 +288,12 @@ not blivet_output.changed }}" msg: "Unexpected behavior w/ existing data on specified disks" + - name: Test for correct handling of safe_mode with resize + block: - name: Try to resize in safe mode include_role: name: storage vars: - storage_safe_mode: true storage_pools: - name: testpool1 type: lvm @@ -205,16 +308,33 @@ that: "{{ not blivet_output.failed and blivet_output.changed }}" msg: "Unexpected behavior w/ existing data on specified disks" + when: false + + - name: Test for correct handling of safe_mode with existing pool + block: - name: Try to create LVM pool on disks that already belong to an existing pool include_role: name: storage vars: - storage_safe_mode: true storage_pools: - name: foo disks: "{{ unused_disks }}" type: lvm + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet + - name: Verify the output assert: that: "{{ blivet_output.failed and @@ -222,14 +342,54 @@ not blivet_output.changed }}" msg: "Unexpected behavior w/ existing data on specified disks" - - name: Clean up + - name: Test for correct handling of safe_mode + block: + - name: Try to replace a pool by a file system on disk in safe mode include_role: name: storage vars: - storage_pools: - - name: testpool1 - type: lvm - disks: "{{ unused_disks }}" - state: absent + storage_volumes: + - name: test1 + type: disk + fs_type: 'ext3' + disks: + - "{{ unused_disks[0] }}" + + - name: UNREACH + fail: + msg: "this should be unreachable" + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_task.name != 'UNREACH' + msg: "Role has not failed when it should have" + vars: + # Ugh! needed to expand ansible_failed_task + storage_provider: blivet - ignore_errors: yes + - name: Verify the output + assert: + that: "blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and + not blivet_output.changed" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Verify the output + assert: + that: "blivet_output.failed and + blivet_output.msg|regex_search('cannot remove existing formatting on volume.*in safe mode') and + not blivet_output.changed" + msg: "Unexpected behavior w/ existing data on specified disks" + + - name: Clean up + include_role: + name: storage + vars: + storage_safe_mode: false + storage_pools: + - name: testpool1 + type: lvm + disks: "{{ unused_disks }}" + state: absent From f9ce0dd91ebe1cfee854ae2a4f40eb6afd571e87 Mon Sep 17 00:00:00 2001 From: David Lehman Date: Fri, 8 Nov 2019 14:13:59 -0500 Subject: [PATCH 7/7] Don't raise safe_mode failures when packages_only is True. --- library/blivet.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/library/blivet.py b/library/blivet.py index 1b500466..858ca2f9 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -218,7 +218,8 @@ def _reformat(self): if self._device.format.type == fmt.type: return - if safe_mode and (self._device.format.type is not None or self._device.format.name != get_format(None).name): + if safe_mode and (self._device.format.type is not None or self._device.format.name != get_format(None).name) and \ + not packages_only: raise BlivetAnsibleError("cannot remove existing formatting on volume '%s' in safe mode" % self._volume['name']) if self._device.format.status and not packages_only: @@ -438,7 +439,7 @@ def _create_members(self): members = list() for disk in self._disks: if not disk.isleaf or disk.format.type is not None: - if safe_mode: + if safe_mode and not packages_only: raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' (pool '%s') in safe mode" % (disk.name, self._pool['name'])) else: self._blivet.devicetree.recursive_remove(disk) @@ -501,7 +502,7 @@ def _look_up_device(self): def _create(self): if self._device.format.type != "disklabel" or \ self._device.format.label_type != disklabel_type: - if safe_mode: + if safe_mode and not packages_only: raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' " "(pool '%s') in safe mode" % (self._device.name, self._pool['name'])) else: