From 23fd34216249564eba241825a2ca44272fb46e79 Mon Sep 17 00:00:00 2001 From: Jan Pokorny Date: Tue, 30 Apr 2024 16:00:00 +0200 Subject: [PATCH 1/2] test: Library checking script for tests Storage role development often relies on blivet library and changes in it. Whether or not is specific feature supported by blivet is usually determined by its version. That is a cumbersome process, especially when the feature has not yet been added into blivet and the version has to be guessed. Added script verifies existence of the feature by using python introspection and asking for existence of specific item in the library (e.g. 'blivet.formats.lvmpv.LVMPhysicalVolume.grow_to_fill'). The script is supposed to be used for the tests only. --- tests/scripts/does_library_support.py | 62 +++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100755 tests/scripts/does_library_support.py diff --git a/tests/scripts/does_library_support.py b/tests/scripts/does_library_support.py new file mode 100755 index 00000000..2044a87e --- /dev/null +++ b/tests/scripts/does_library_support.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python + +# This script checks for blivet compatibility by trying to access +# blivet parts specified by given parameter +# Returns True if part is found, False otherwise + +# The script is meant to be a supporting tool for the storage role tests +import sys +import importlib + + +def is_supported(var): + + parts = var.split('.') + imports = '' + obj = sys.modules[__name__] + + try: + # create a variable named parts[0] so the subsequent imports work + globals()[parts[0]] = importlib.import_module(parts[0]) + except ImportError: + return False + + # try to import each part + while parts: + part = parts.pop(0) + imports += part + '.' + + try: + importlib.import_module(imports.rstrip('.')) + except ImportError: + break + + # generate access to the object for later use + obj = getattr(obj, part) + + else: + # break did not happen in the cycle + # it means the whole string was importable + return True + + # part of the string was not importable, the rest can be attributes + + # get part back to parts to simplify the following loop + parts = [part] + parts + + while parts: + part = parts.pop(0) + obj = getattr(obj, part, None) + + if obj is None: + return False + + return True + + +if __name__ == "__main__": + if len(sys.argv) != 2: + print("Usage: python %s " % sys.argv[0]) + sys.exit(-1) + + print(is_supported(sys.argv[1])) From 0572eca9b2c86ceb4638c78ad20bca9e0f39983b Mon Sep 17 00:00:00 2001 From: Jan Pokorny Date: Tue, 30 Apr 2024 16:26:38 +0200 Subject: [PATCH 2/2] feat: PV resize support There is an usecase when the physical device size can change (e.g. on VM). We need to be able to change the size of the LVM PV to accomodate that. This adds a new pool parameter 'grow_to_fill'. When set, pool PVs will try to take all available space on their respective devices. Defaults to False. Requires blivet version that supports this feature. For tests this is checked by using 'does_library_support' script. --- README.md | 5 ++ defaults/main.yml | 1 + library/blivet.py | 26 +++++++- tests/test-verify-pool-members.yml | 19 ++++++ tests/tests_lvm_pool_pv_grow.yml | 100 ++++++++++++++++++++++++++++ tests/verify-pool-member-pvsize.yml | 24 +++++++ 6 files changed, 173 insertions(+), 2 deletions(-) create mode 100644 tests/tests_lvm_pool_pv_grow.yml create mode 100644 tests/verify-pool-member-pvsize.yml diff --git a/README.md b/README.md index 5398d30c..e13d2d42 100644 --- a/README.md +++ b/README.md @@ -48,6 +48,11 @@ keys: This specifies the type of pool to manage. Valid values for `type`: `lvm`. +- `grow_to_fill` + + When set, the pool Physical Volumes will be resized to match their respective device sizes. + (e.g. after Virtual Machine disk size increase) + - `shared` If set to `true`, the role creates or manages a shared volume group. Requires lvmlockd and diff --git a/defaults/main.yml b/defaults/main.yml index 755364ae..f57a4a94 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -13,6 +13,7 @@ storage_pool_defaults: type: lvm disks: [] volumes: [] + grow_to_fill: false encryption: false encryption_password: null diff --git a/library/blivet.py b/library/blivet.py index 78eb5aef..e784d90e 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -60,6 +60,9 @@ encryption_tang_thumbprint: description: encryption_tang_thumbprint type: str + grow_to_fill: + description: grow_to_fill + type: bool name: description: name type: str @@ -379,7 +382,7 @@ from blivet3.callbacks import callbacks from blivet3 import devicelibs from blivet3 import devices - from blivet3.deviceaction import ActionConfigureFormat, ActionAddMember, ActionRemoveMember + from blivet3.deviceaction import ActionConfigureFormat, ActionResizeFormat, ActionAddMember, ActionRemoveMember from blivet3.devicefactory import DEFAULT_THPOOL_RESERVE from blivet3.flags import flags as blivet_flags from blivet3.formats import fslib, get_format @@ -395,7 +398,7 @@ from blivet.callbacks import callbacks from blivet import devicelibs from blivet import devices - from blivet.deviceaction import ActionConfigureFormat, ActionAddMember, ActionRemoveMember + from blivet.deviceaction import ActionConfigureFormat, ActionResizeFormat, ActionAddMember, ActionRemoveMember from blivet.devicefactory import DEFAULT_THPOOL_RESERVE from blivet.flags import flags as blivet_flags from blivet.formats import fslib, get_format @@ -421,6 +424,7 @@ def __getattr__(self, val): blivet_flags.allow_online_fs_resize = True blivet_flags.gfs2 = True set_up_logging() + log = logging.getLogger(BLIVET_PACKAGE + ".ansible") # XXX add support for LVM RAID raid0 level @@ -1839,6 +1843,22 @@ def _manage_members(self): add_disks = [d for d in self._disks if d not in self._device.ancestors] remove_disks = [pv for pv in self._device.pvs if not any(d in pv.ancestors for d in self._disks)] + if self._pool['grow_to_fill']: + grow_pv_candidates = [pv for pv in self._device.pvs if pv not in remove_disks and pv not in add_disks] + + for pv in grow_pv_candidates: + if abs(self._device.size - self._device.current_size) < 2 * self._device.pe_size: + continue + + pv.format.update_size_info() # set pv to be resizable + + if pv.format.resizable: + pv.grow_to_fill = True + ac = ActionResizeFormat(pv, self._device.size) + self._blivet.devicetree.actions.add(ac) + else: + log.warning("cannot grow/resize PV '%s', format is not resizable", pv.name) + if not (add_disks or remove_disks): return @@ -2329,6 +2349,7 @@ def run_module(): encryption_clevis_pin=dict(type='str'), encryption_tang_url=dict(type='str'), encryption_tang_thumbprint=dict(type='str'), + grow_to_fill=dict(type='bool'), name=dict(type='str'), raid_level=dict(type='str'), raid_device_count=dict(type='int'), @@ -2473,6 +2494,7 @@ def action_dict(action): # execute the scheduled actions, committing changes to disk callbacks.action_executed.add(record_action) callbacks.action_executed.add(ensure_udev_update) + try: b.devicetree.actions.process(devices=b.devicetree.devices, dry_run=module.check_mode) except Exception as e: diff --git a/tests/test-verify-pool-members.yml b/tests/test-verify-pool-members.yml index 649078e5..1994c9a2 100644 --- a/tests/test-verify-pool-members.yml +++ b/tests/test-verify-pool-members.yml @@ -70,6 +70,25 @@ loop_var: pv when: storage_test_pool.type == 'lvm' +- name: Check that blivet supports PV grow to fill + ansible.builtin.script: >- + scripts/does_library_support.py + blivet.formats.lvmpv.LVMPhysicalVolume.grow_to_fill + args: + executable: "{{ ansible_python.executable }}" + register: grow_supported + changed_when: false + +- name: Verify that PVs fill the whole devices when they should + include_tasks: verify-pool-member-pvsize.yml + loop: "{{ _storage_test_pool_pvs | default([]) }}" + loop_control: + loop_var: st_pool_pv + when: + - grow_supported.stdout | trim == 'True' + - storage_test_pool.type == "lvm" + - storage_test_pool.grow_to_fill | bool + - name: Check MD RAID include_tasks: verify-pool-md.yml diff --git a/tests/tests_lvm_pool_pv_grow.yml b/tests/tests_lvm_pool_pv_grow.yml new file mode 100644 index 00000000..b0543060 --- /dev/null +++ b/tests/tests_lvm_pool_pv_grow.yml @@ -0,0 +1,100 @@ +--- +- name: Test create disk and remove + hosts: all + become: true + vars: + storage_safe_mode: false + mount_location1: '/opt/test1' + mount_location2: '/opt/test2' + pv_size: '8g' + volume1_size: '2g' + volume2_size: '3g' + tags: + - tests::lvm + + tasks: + - name: Run the role + include_role: + name: linux-system-roles.storage + + - name: Mark tasks to be skipped + set_fact: + storage_skip_checks: + - blivet_available + - service_facts + - "{{ (lookup('env', + 'SYSTEM_ROLES_REMOVE_CLOUD_INIT') in ['', 'false']) | + ternary('packages_installed', '') }}" + + - name: Get unused disks + include_tasks: get_unused_disk.yml + vars: + max_return: 1 + min_size: "10g" + + - name: Create PV with a space to grow + command: "pvcreate --setphysicalvolumesize {{ pv_size }} /dev/{{ unused_disks[0] }}" + register: pvcreate_output + changed_when: pvcreate_output.rc != 0 + + # VG has to be present, the role otherwise automatically reformats empty PV, + # taking all available space + - name: Create VG + command: "vgcreate foo /dev/{{ unused_disks[0] }}" + register: vgcreate_output + changed_when: vgcreate_output.rc != 0 + + - name: Create LVM + include_role: + name: linux-system-roles.storage + vars: + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + grow_to_fill: true + state: present + volumes: + - name: test1 + size: "{{ volume1_size }}" + mount_point: "{{ mount_location1 }}" + - name: test2 + size: "{{ volume2_size }}" + mount_point: "{{ mount_location2 }}" + + - name: Verify role results + include_tasks: verify-role-results.yml + + - name: Rerun the task to verify idempotence + include_role: + name: linux-system-roles.storage + vars: + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + grow_to_fill: true + state: present + volumes: + - name: test1 + size: "{{ volume1_size }}" + mount_point: "{{ mount_location1 }}" + - name: test2 + size: "{{ volume2_size }}" + mount_point: "{{ mount_location2 }}" + + - name: Verify role results + include_tasks: verify-role-results.yml + + - name: Remove 'foo' pool created above + include_role: + name: linux-system-roles.storage + vars: + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + state: "absent" + volumes: + - name: test1 + - name: test2 + + - name: Verify role results + include_tasks: verify-role-results.yml diff --git a/tests/verify-pool-member-pvsize.yml b/tests/verify-pool-member-pvsize.yml new file mode 100644 index 00000000..d156e164 --- /dev/null +++ b/tests/verify-pool-member-pvsize.yml @@ -0,0 +1,24 @@ +--- +- name: Get actual PV size + command: "pvs --noheadings --nosuffix --units b -o SIZE {{ st_pool_pv }}" + register: actual_pv_size + changed_when: false + +- name: Convert blkinfo size to bytes + bsize: + size: "{{ storage_test_blkinfo.info[st_pool_pv]['size'] }}" + register: dev_size + +- name: Verify each PV size + assert: + that: (dev_size.bytes - actual_pv_size.stdout | int) | + abs / actual_pv_size.stdout | int < 0.04 + msg: >- + PV resize failure; size difference too big + (device size: {{ dev_size.bytes }}) + (actual PV size: {{ actual_pv_size.stdout }}) + +- name: Clean up test variables + set_fact: + actual_pv_size: null + dev_size: null