From b63c88a94a0ac8cb902fc5cb30789523a6d1d68d Mon Sep 17 00:00:00 2001 From: Rich Megginson Date: Wed, 31 Jan 2024 17:44:00 -0700 Subject: [PATCH] fix: ensure user linger is enabled and disabled correctly Cause: The role was not always enabling user lingering before creating resources, and not always canceling lingering after removing resources. Consequence: The role would give errors if attempting to create a secret or other resource requiring lingering, or would leave lingering enabled after removing resources. Fix: Centralize linger handling and keep track of users which may need linger canceling. Ensure linger is canceled for all users if all of that user's resources are removed and linger is no longer needed. Result: Resources for rootless users are always created properly. Lingering is always canceled when no longer needed. Fix issue with toml.j2 - ensure non-string values are written as non-strings. Fix idempotency issue where you could not clean up twice. Allow testing rootless quadlet on EL8 by configuring settings and kernel parameters and rebooting. Fix several cleanup issues, and dump journal if there are test errors. Construct the __params dict to pass to `podman_secret` to fix the JSON string issue with `data` on both Ansible 2.9 and later. Signed-off-by: Rich Megginson --- defaults/main.yml | 5 + tasks/cancel_linger.yml | 62 ++++ tasks/cleanup_kube_spec.yml | 28 +- tasks/cleanup_quadlet_spec.yml | 45 +-- tasks/create_update_kube_spec.yml | 11 +- tasks/create_update_quadlet_spec.yml | 11 +- tasks/handle_secret.yml | 55 ++-- tasks/main.yml | 10 + tasks/manage_linger.yml | 29 ++ templates/toml.j2 | 2 + tests/tests_basic.yml | 11 +- tests/tests_config_files.yml | 7 + tests/tests_include_vars_from_parent.yml | 100 +++--- tests/tests_quadlet_basic.yml | 388 ++++++++++++++--------- tests/tests_quadlet_demo.yml | 256 ++++++++------- 15 files changed, 626 insertions(+), 394 deletions(-) create mode 100644 tasks/cancel_linger.yml create mode 100644 tasks/manage_linger.yml diff --git a/defaults/main.yml b/defaults/main.yml index bb44e487..92e4eb84 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -104,3 +104,8 @@ podman_pull_image: true # running the role. # You can do this on a per-spec basis using continue_if_pull_fails podman_continue_if_pull_fails: false + +# Retry failed pulls +# If true, if a pull attempt fails, it will be retried according +# to the default Ansible `until` behavior. +podman_pull_retry: false diff --git a/tasks/cancel_linger.yml b/tasks/cancel_linger.yml new file mode 100644 index 00000000..761778b9 --- /dev/null +++ b/tasks/cancel_linger.yml @@ -0,0 +1,62 @@ +--- +# Input: +# * __podman_linger_user - username +- name: Get user information + getent: + database: passwd + key: "{{ __podman_linger_user }}" + fail_key: true + when: "'getent_passwd' not in ansible_facts or + __podman_linger_user not in ansible_facts['getent_passwd']" + +- name: Set cancel linger vars + set_fact: + __podman_xdg_runtime_dir: >- + /run/user/{{ ansible_facts["getent_passwd"][__podman_linger_user][1] }} + +- name: Stat XDG_RUNTIME_DIR + stat: + path: "{{ __podman_xdg_runtime_dir }}" + register: __podman_xdg_stat + +- name: Gather facts for containers + containers.podman.podman_container_info: + environment: + XDG_RUNTIME_DIR: "{{ __podman_xdg_runtime_dir }}" + become: true + become_user: "{{ __podman_linger_user }}" + when: __podman_xdg_stat.stat.exists + register: __podman_container_info + +- name: Gather facts for networks + command: podman network ls -q + register: __podman_networks + changed_when: false + environment: + XDG_RUNTIME_DIR: "{{ __podman_xdg_runtime_dir }}" + become: true + become_user: "{{ __podman_linger_user }}" + when: __podman_xdg_stat.stat.exists + +- name: Gather secrets + command: podman secret ls -n -q + register: __podman_linger_secrets + changed_when: false + environment: + XDG_RUNTIME_DIR: "{{ __podman_xdg_runtime_dir }}" + become: true + become_user: "{{ __podman_linger_user }}" + when: __podman_xdg_stat.stat.exists + +- name: Cancel linger if no more resources are in use + command: loginctl disable-linger {{ __podman_linger_user }} + when: + - __podman_xdg_stat.stat.exists + - __podman_container_info.containers | length == 0 + - __podman_networks.stdout_lines | reject("match", "^podman$") | + reject("match", "^podman-default-kube-network$") | + list | length == 0 + - __podman_linger_secrets.stdout == "" + changed_when: true + args: + removes: /var/lib/systemd/linger/{{ __podman_user }} diff --git a/tasks/cleanup_kube_spec.yml b/tasks/cleanup_kube_spec.yml index 17e4705e..c8641796 100644 --- a/tasks/cleanup_kube_spec.yml +++ b/tasks/cleanup_kube_spec.yml @@ -1,4 +1,12 @@ --- +- name: Stat XDG_RUNTIME_DIR + stat: + path: "{{ __podman_xdg_runtime_dir }}" + register: __podman_xdg_stat + when: + - __podman_rootless | bool + - __podman_xdg_runtime_dir | d("") | length > 0 + - name: Stop and disable service systemd: name: "{{ __podman_service_name.stdout }}" @@ -10,6 +18,7 @@ environment: XDG_RUNTIME_DIR: "{{ __podman_xdg_runtime_dir }}" register: __podman_service_status + when: not __podman_rootless or __podman_xdg_stat.stat.exists failed_when: - __podman_service_status is failed - not __podman_service_status.stdout is search(__service_error) @@ -24,6 +33,7 @@ become: "{{ __podman_rootless | ternary(true, omit) }}" become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}" register: __podman_removed + when: not __podman_rootless or __podman_xdg_stat.stat.exists - name: Remove kubernetes yaml file file: @@ -39,17 +49,7 @@ when: __podman_removed is changed # noqa no-handler changed_when: true -- name: Gather facts for all containers - containers.podman.podman_container_info: - environment: - XDG_RUNTIME_DIR: "{{ __podman_xdg_runtime_dir }}" - become: "{{ __podman_rootless | ternary(true, omit) }}" - become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}" - register: __podman_container_info - -- name: Cancel linger if no more containers are running - command: loginctl disable-linger {{ __podman_user }} - when: - - __podman_rootless | bool - - __podman_container_info.containers | length == 0 - changed_when: true +- name: Manage linger + include_tasks: manage_linger.yml + vars: + __podman_item_state: absent diff --git a/tasks/cleanup_quadlet_spec.yml b/tasks/cleanup_quadlet_spec.yml index 4cbcae10..ba68771e 100644 --- a/tasks/cleanup_quadlet_spec.yml +++ b/tasks/cleanup_quadlet_spec.yml @@ -1,18 +1,29 @@ --- # NOTE: Stopping, disabling, and removing units should also stop # and remove any pods and containers as well. +- name: Stat XDG_RUNTIME_DIR + stat: + path: "{{ __podman_xdg_runtime_dir }}" + register: __podman_xdg_stat + when: + - __podman_rootless | bool + - __podman_xdg_runtime_dir | d("") | length > 0 + - name: Stop and disable service systemd: name: "{{ __podman_service_name }}" scope: "{{ __podman_systemd_scope }}" state: stopped enabled: false + force: true become: "{{ __podman_rootless | ternary(true, omit) }}" become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}" environment: XDG_RUNTIME_DIR: "{{ __podman_xdg_runtime_dir }}" register: __podman_service_status - when: __podman_service_name | length > 0 + when: + - __podman_service_name | length > 0 + - not __podman_rootless or __podman_xdg_stat.stat.exists failed_when: - __podman_service_status is failed - not __podman_service_status.msg is search(__service_error) @@ -25,6 +36,11 @@ state: absent register: __podman_file_removed +- name: Manage linger + include_tasks: manage_linger.yml + vars: + __podman_item_state: absent + - name: Cleanup container resources when: __podman_file_removed is changed # noqa no-handler block: @@ -53,30 +69,3 @@ XDG_RUNTIME_DIR: "{{ __podman_xdg_runtime_dir }}" become: "{{ __podman_rootless | ternary(true, omit) }}" become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}" - - - name: Gather facts for all containers - containers.podman.podman_container_info: - environment: - XDG_RUNTIME_DIR: "{{ __podman_xdg_runtime_dir }}" - become: "{{ __podman_rootless | ternary(true, omit) }}" - become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}" - register: __podman_container_info - no_log: true - - - name: Gather facts for networks - command: podman network ls -q - register: __podman_networks - changed_when: false - environment: - XDG_RUNTIME_DIR: "{{ __podman_xdg_runtime_dir }}" - become: "{{ __podman_rootless | ternary(true, omit) }}" - become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}" - - - name: Cancel linger if no more resources are in use - command: loginctl disable-linger {{ __podman_user }} - when: - - __podman_rootless | bool - - __podman_container_info.containers | length == 0 - - __podman_networks.stdout_lines | reject('match', '^podman$') | - list | length == 0 - changed_when: true diff --git a/tasks/create_update_kube_spec.yml b/tasks/create_update_kube_spec.yml index fca64981..95d7d35b 100644 --- a/tasks/create_update_kube_spec.yml +++ b/tasks/create_update_kube_spec.yml @@ -1,9 +1,8 @@ --- -- name: Enable lingering if needed - command: loginctl enable-linger {{ __podman_user }} - when: __podman_rootless | bool - args: - creates: /var/lib/systemd/linger/{{ __podman_user }} +- name: Manage linger + include_tasks: manage_linger.yml + vars: + __podman_item_state: present - name: Get the host mount volumes set_fact: @@ -47,6 +46,8 @@ password: "{{ container_image_password | default(omit) }}" register: __podman_image_updated when: __podman_pull_image | bool + until: __podman_image_updated is success + retries: "{{ podman_pull_retry | ternary(3, 0) }}" failed_when: - __podman_image_updated is failed - not __podman_continue_if_pull_fails diff --git a/tasks/create_update_quadlet_spec.yml b/tasks/create_update_quadlet_spec.yml index f76f232c..c3e00950 100644 --- a/tasks/create_update_quadlet_spec.yml +++ b/tasks/create_update_quadlet_spec.yml @@ -1,9 +1,8 @@ --- -- name: Enable lingering if needed - command: loginctl enable-linger {{ __podman_user }} - when: __podman_rootless | bool - args: - creates: /var/lib/systemd/linger/{{ __podman_user }} +- name: Manage linger + include_tasks: manage_linger.yml + vars: + __podman_item_state: present - name: Create host directories file: "{{ __defaults | combine(podman_host_directories[__hostitem]) @@ -31,6 +30,8 @@ password: "{{ container_image_password | default(omit) }}" register: __podman_image_updated when: __podman_pull_image | bool + until: __podman_image_updated is success + retries: "{{ podman_pull_retry | ternary(3, 0) }}" failed_when: - __podman_image_updated is failed - not __podman_continue_if_pull_fails diff --git a/tasks/handle_secret.yml b/tasks/handle_secret.yml index ecbe604f..b3677ef9 100644 --- a/tasks/handle_secret.yml +++ b/tasks/handle_secret.yml @@ -1,14 +1,5 @@ # SPDX-License-Identifier: MIT --- -- name: Set variables part 0 - set_fact: - __podman_secret: "{{ __podman_secret_item | - dict2items | rejectattr('key', 'match', __del_params) | - list | items2dict }}" - vars: - __del_params: "^(run_as_user)$" - no_log: true - - name: Set variables part 1 set_fact: __podman_user: "{{ __podman_secret_item['run_as_user'] | @@ -20,19 +11,43 @@ __podman_xdg_runtime_dir: >- /run/user/{{ ansible_facts["getent_passwd"][__podman_user][1] }} +- name: Manage linger + include_tasks: manage_linger.yml + vars: + __podman_item_state: "{{ __podman_secret_item.state | d('present') }}" + +- name: Stat XDG_RUNTIME_DIR + stat: + path: "{{ __podman_xdg_runtime_dir }}" + register: __podman_xdg_stat + when: + - __podman_rootless | bool + - __podman_xdg_runtime_dir | d("") | length > 0 + +# if XDG_RUNTIME_DIR does not exist, this means linger +# was already canceled, which means the user is attempting +# to remove more than once +# We use __params here because the Ansible module code will convert a `data` +# parameter string that looks like JSON e.g. {"test": "string"} to a dict or +# list - there seems to be no way to prevent that - but if we construct the +# parameter dict to pass to podman_secret, it seems to preserve the original +# data types - name: Manage each secret - containers.podman.podman_secret: - data: "{{ __podman_secret.data | string - if 'data' in __podman_secret else omit }}" - driver: "{{ __podman_secret.driver | d(omit) }}" - driver_opts: "{{ __podman_secret.driver_opts | d(omit) }}" - executable: "{{ __podman_secret.executable | d(omit) }}" - force: "{{ __podman_secret.force | d(omit) }}" - name: "{{ __podman_secret.name }}" - skip_existing: "{{ __podman_secret.skip_existing | d(omit) }}" - state: "{{ __podman_secret.state | d(omit) }}" + containers.podman.podman_secret: "{{ __params }}" environment: XDG_RUNTIME_DIR: "{{ __podman_xdg_runtime_dir }}" become: "{{ __podman_rootless | ternary(true, omit) }}" become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}" - no_log: true + when: not __podman_rootless or __podman_xdg_stat.stat.exists + no_log: false + vars: + __params: | + {% set rc = {} %} + {% set supported_params = ['data', 'driver', 'driver_opts', 'executable', + 'force', 'name', 'skip_existing', 'state'] %} + {% for key in supported_params %} + {% if key in __podman_secret_item %} + {% set _ = rc.__setitem__(key, __podman_secret_item[key]) %} + {% endif %} + {% endfor %} + {{ rc }} diff --git a/tasks/main.yml b/tasks/main.yml index a34068f7..1b9ca4ab 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -106,6 +106,10 @@ selinux_ports: "{{ podman_selinux_ports }}" when: podman_selinux_ports | length > 0 +- name: Keep track of users that need to cancel linger + set_fact: + __podman_cancel_user_linger: [] + - name: Handle secrets include_tasks: handle_secret.yml loop: "{{ podman_secrets }}" @@ -124,3 +128,9 @@ loop: "{{ podman_quadlet_specs }}" loop_control: loop_var: __podman_quadlet_spec_item + +- name: Cancel linger + include_tasks: cancel_linger.yml + loop: "{{ __podman_cancel_user_linger }}" + loop_control: + loop_var: __podman_linger_user diff --git a/tasks/manage_linger.yml b/tasks/manage_linger.yml new file mode 100644 index 00000000..b506b703 --- /dev/null +++ b/tasks/manage_linger.yml @@ -0,0 +1,29 @@ +--- +# Input: +# * __podman_rootless - true or false +# * __podman_user - name of user +# * __podman_item_state - present or absent +# Globals: __podman_cancel_user_linger +- name: Enable linger if needed + when: + - __podman_rootless | bool + - __podman_item_state | d('present') != 'absent' + block: + - name: Enable linger if needed + command: loginctl enable-linger {{ __podman_user }} + when: __podman_rootless | bool + args: + creates: /var/lib/systemd/linger/{{ __podman_user }} + + - name: Mark user as not yet needing to cancel linger + set_fact: + __podman_cancel_user_linger: "{{ __podman_cancel_user_linger | + difference([__podman_user]) }}" + +- name: Mark user for possible linger cancel + set_fact: + __podman_cancel_user_linger: "{{ __podman_cancel_user_linger | + union([__podman_user]) }}" + when: + - __podman_rootless | bool + - __podman_item_state | d('present') == 'absent' diff --git a/templates/toml.j2 b/templates/toml.j2 index 2f38bd1c..59991263 100644 --- a/templates/toml.j2 +++ b/templates/toml.j2 @@ -5,6 +5,8 @@ {{ key }}={{ value }} {% elif value is mapping and value is not string %} {{ key }} = [{%- for k in value %} "{{k}}={{value[k]}}", {%- endfor %}] +{% elif value is not string %} +{{ key }} = {{ value }} {% else %} {{ key }}="{{ value }}" {% endif %} diff --git a/tests/tests_basic.yml b/tests/tests_basic.yml index acc01c08..a9f01c9c 100644 --- a/tests/tests_basic.yml +++ b/tests/tests_basic.yml @@ -234,7 +234,8 @@ spec: containers: - name: bogus - image: this_is_a_bogus_image + image: >- + quay.io/linux-system-roles/this_is_a_bogus_image:latest rescue: - name: Verify image not pulled @@ -273,6 +274,7 @@ vars: podman_kube_specs: "{{ __podman_kube_specs | union([__podman_use_kube_file]) | list }}" + podman_pull_retry: true - name: Check if pods are running command: podman pod inspect {{ item[0] }} --format {{ __fmt | quote }} @@ -407,6 +409,12 @@ ^[ ]*podman-kube@.+-{{ item[0] }}[.]yml[.]service[ ]+loaded[ ]+active + rescue: + - name: Dump journal + command: journalctl -ex + changed_when: false + failed_when: true + always: # have to clean up storage.conf - otherwise, get this message: # A storage.conf file exists at /etc/containers/storage.conf @@ -430,3 +438,4 @@ file: path: "{{ __kube_file_src.path }}" state: absent + delegate_to: localhost diff --git a/tests/tests_config_files.yml b/tests/tests_config_files.yml index bd25f896..28eef41b 100644 --- a/tests/tests_config_files.yml +++ b/tests/tests_config_files.yml @@ -140,6 +140,13 @@ loop_var: __file vars: __fingerprint: "system_role:podman" + + rescue: + - name: Dump journal + command: journalctl -ex + changed_when: false + failed_when: true + always: - name: Remove test config files file: diff --git a/tests/tests_include_vars_from_parent.yml b/tests/tests_include_vars_from_parent.yml index 3431cf17..a6821743 100644 --- a/tests/tests_include_vars_from_parent.yml +++ b/tests/tests_include_vars_from_parent.yml @@ -3,53 +3,57 @@ hosts: all gather_facts: true tasks: - - name: Create var file in caller that can override the one in called role - delegate_to: localhost - copy: - # usually the fake file will cause the called role to crash of - # overriding happens, but if not, set a variable that will - # allow to detect the bug - content: "__caller_override: true" - # XXX ugly, self-modifying code - changes the "caller" role on - # the controller - dest: "{{ playbook_dir }}/roles/caller/vars/{{ item }}.yml" - mode: preserve - loop: "{{ varfiles | unique }}" - # In case the playbook is executed against multiple hosts, use - # only the first one. Otherwise the hosts would stomp on each - # other since they are changing files on the controller. - when: inventory_hostname == ansible_play_hosts_all[0] - vars: - # change to hostvars['localhost']['ansible_facts'] to use the - # information for localhost - facts: "{{ ansible_facts }}" - versions: - - "{{ facts['distribution_version'] }}" - - "{{ facts['distribution_major_version'] }}" - separators: ["-", "_"] - # create all variants like CentOS, CentOS_8.1, CentOS-8.1, - # CentOS-8, CentOS-8.1 - # more formally: - # {{ ansible_distribution }}-{{ ansible_distribution_version }} - # {{ ansible_distribution }}-{{ ansible_distribution_major_version }} - # {{ ansible_distribution }} - # {{ ansible_os_family }} - # and the same for _ as separator. - varfiles: "{{ [facts['distribution']] | product(separators) | - map('join') | product(versions) | map('join') | list + - [facts['distribution'], facts['os_family']] }}" - register: __varfiles_created + - name: Run test + block: + - name: >- + Create var file in caller that can override the one in called role + delegate_to: localhost + copy: + # usually the fake file will cause the called role to crash of + # overriding happens, but if not, set a variable that will + # allow to detect the bug + content: "__caller_override: true" + # XXX ugly, self-modifying code - changes the "caller" role on + # the controller + dest: "{{ playbook_dir }}/roles/caller/vars/{{ item }}.yml" + mode: preserve + loop: "{{ varfiles | unique }}" + # In case the playbook is executed against multiple hosts, use + # only the first one. Otherwise the hosts would stomp on each + # other since they are changing files on the controller. + when: inventory_hostname == ansible_play_hosts_all[0] + vars: + # change to hostvars['localhost']['ansible_facts'] to use the + # information for localhost + facts: "{{ ansible_facts }}" + versions: + - "{{ facts['distribution_version'] }}" + - "{{ facts['distribution_major_version'] }}" + separators: ["-", "_"] + # create all variants like CentOS, CentOS_8.1, CentOS-8.1, + # CentOS-8, CentOS-8.1 + varfiles: "{{ [facts['distribution']] | product(separators) | + map('join') | product(versions) | map('join') | list + + [facts['distribution'], facts['os_family']] }}" + register: __varfiles_created - - name: Import role - import_role: - name: caller - vars: - roletoinclude: linux-system-roles.podman + - name: Import role + import_role: + name: caller + vars: + roletoinclude: linux-system-roles.podman - - name: Cleanup - file: - path: "{{ item.dest }}" - state: absent - loop: "{{ __varfiles_created.results }}" - delegate_to: localhost - when: inventory_hostname == ansible_play_hosts_all[0] + rescue: + - name: Dump journal + command: journalctl -ex + changed_when: false + failed_when: true + + always: + - name: Cleanup + file: + path: "{{ item.dest }}" + state: absent + loop: "{{ __varfiles_created.results }}" + delegate_to: localhost + when: inventory_hostname == ansible_play_hosts_all[0] diff --git a/tests/tests_quadlet_basic.yml b/tests/tests_quadlet_basic.yml index 94c1b66c..a7f8811a 100644 --- a/tests/tests_quadlet_basic.yml +++ b/tests/tests_quadlet_basic.yml @@ -7,13 +7,16 @@ vars: podman_use_copr: false # disable copr for CI testing podman_fail_if_too_old: false + __json_secret_data: '{"test": "json"}' __secret_password_env: "{{ lookup('env', 'SYSTEM_ROLES_PODMAN_PASSWORD') }}" __podman_secrets: - name: mysql_container_root_password state: present - skip_existing: true data: "{{ (__secret_password_env | length > 0) | ternary(__secret_password_env, mysql_container_root_password) }}" + - name: json_secret + state: present + data: "{{ __json_secret_data | string }}" __podman_quadlet_specs: - name: quadlet-basic type: network @@ -34,168 +37,253 @@ Volume: quadlet-basic-mysql.volume:/var/lib/mysql Network: quadlet-basic.network # Once 4.5 is released change this line to use the quadlet Secret key - PodmanArgs: "--secret=mysql_container_root_password,type=env,\ - target=MYSQL_ROOT_PASSWORD" + PodmanArgs: >- + --secret=mysql_container_root_password,type=env,target=MYSQL_ROOT_PASSWORD + --secret=json_secret,type=mount,target=/tmp/test.json Environment: - FOO=/bin/busybox-extras - BAZ=test tasks: - - name: See if not pulling images fails + - name: Run test block: - - name: Run role - do not pull images + - name: See if not pulling images fails + block: + - name: Run role - do not pull images + include_role: + name: linux-system-roles.podman + vars: + podman_quadlet_specs: + - name: nopull + type: container + state: created + pull_image: false + activate_systemd_unit: false + Install: + WantedBy: default.target + Container: + Image: "{{ test_image }}" + ContainerName: nopull + + - name: Verify image not pulled + assert: + that: __podman_image_updated.results[0] is skipped + + - name: Run role - try to pull bogus image + include_role: + name: linux-system-roles.podman + vars: + podman_quadlet_specs: + - name: bogus + type: container + state: created + continue_if_pull_fails: true + activate_systemd_unit: false + Install: + WantedBy: default.target + Container: + Image: this_is_a_bogus_image + ContainerName: bogus + + - name: Verify image not pulled and no error + assert: + that: + - not __podman_image_updated.results[0] is changed + - not __podman_image_updated.results[0] is skipped + + always: + - name: Cleanup + include_role: + name: linux-system-roles.podman + vars: + podman_quadlet_specs: + - state: absent + name: "{{ item }}" + type: container + loop: + - nopull + - bogus + + - name: Create user for testing + user: + name: user_quadlet_basic + uid: 1111 + + # try to workaround the rootless containers error + # Error: + # mkdir /sys/fs/cgroup/devices/user.slice/runtime: permission denied + - name: Enable EL8 system to support rootless quadlets + when: + - ansible_facts["os_family"] == "RedHat" + - ansible_facts["distribution_version"] is version("9", "<") + block: + - name: Get local machine ID + slurp: + path: /etc/machine-id + register: __local_mach_id_enc + delegate_to: localhost + + - name: Skip test if cannot reboot + meta: end_host + when: ansible_facts["machine_id"] == __local_mac_id + vars: + __local_mac_id: "{{ __local_mach_id_enc.content | b64decode | + trim }}" + + - name: Enable cgroup controllers + changed_when: true + shell: | + set -euxo pipefail + cat > /etc/systemd/system/user-0.slice < /etc/systemd/system/user@.service.d/delegate.conf < /etc/systemd/system/user-.slice.d/override.conf <- + grubby --update-kernel=ALL + --args=systemd.unified_cgroup_hierarchy=1 + changed_when: true + + - name: Reboot + reboot: + + - name: Run the role - user include_role: name: linux-system-roles.podman vars: - podman_quadlet_specs: - - name: nopull - type: container - state: created - pull_image: false - activate_systemd_unit: false - Install: - WantedBy: default.target - Container: - Image: "{{ test_image }}" - ContainerName: nopull - - - name: Verify image not pulled - assert: - that: __podman_image_updated.results[0] is skipped - - - name: Run role - try to pull bogus image + podman_run_as_user: user_quadlet_basic + podman_secrets: "{{ __podman_secrets }}" + podman_quadlet_specs: "{{ __podman_quadlet_specs }}" + podman_pull_retry: true + + - name: Check files + command: cat {{ __dir }}/{{ item }} + changed_when: false + vars: + __dir: /home/user_quadlet_basic/.config/containers/systemd + loop: + - quadlet-basic-mysql.container + - quadlet-basic.network + - quadlet-basic-mysql.volume + + - name: Ensure linger + stat: + path: /var/lib/systemd/linger/user_quadlet_basic + register: __stat + failed_when: not __stat.stat.exists + + # must clean up networks last - cannot remove a network + # in use by a container + - name: Cleanup user include_role: name: linux-system-roles.podman vars: - podman_quadlet_specs: - - name: bogus - type: container - state: created - continue_if_pull_fails: true - activate_systemd_unit: false - Install: - WantedBy: default.target - Container: - Image: this_is_a_bogus_image - ContainerName: bogus - - - name: Verify image not pulled and no error - assert: - that: - - not __podman_image_updated.results[0] is changed - - not __podman_image_updated.results[0] is skipped + podman_run_as_user: user_quadlet_basic + __absent: {"state":"absent"} + podman_secrets: "{{ __podman_secrets | map('combine', __absent) | + list }}" + podman_quadlet_specs: "{{ ((__podman_quadlet_specs | + rejectattr('type', 'match', '^network$') | list) + + (__podman_quadlet_specs | + selectattr('type', 'match', '^network$') | list)) | + map('combine', __absent) | list }}" - always: - - name: Cleanup + - name: Ensure no linger + stat: + path: /var/lib/systemd/linger/user_quadlet_basic + register: __stat + failed_when: __stat.stat.exists + + - name: Run the role - root include_role: name: linux-system-roles.podman vars: - podman_quadlet_specs: - - state: absent - name: "{{ item }}" - type: container + podman_secrets: "{{ __podman_secrets }}" + podman_quadlet_specs: "{{ __podman_quadlet_specs }}" + + - name: Check files + command: cat {{ __dir }}/{{ item }} + changed_when: false + vars: + __dir: /etc/containers/systemd loop: - - nopull - - bogus - - - name: Create user for testing - user: - name: user_quadlet_basic - uid: 1111 - - - name: Run the role - user - include_role: - name: linux-system-roles.podman - vars: - podman_run_as_user: user_quadlet_basic - podman_secrets: "{{ __podman_secrets }}" - podman_quadlet_specs: "{{ __podman_quadlet_specs }}" - - - name: Check files - command: cat {{ __dir }}/{{ item }} - changed_when: false - vars: - __dir: /home/user_quadlet_basic/.config/containers/systemd - loop: - - quadlet-basic-mysql.container - - quadlet-basic.network - - quadlet-basic-mysql.volume - - # must clean up networks last - cannot remove a network - # in use by a container - - name: Cleanup user - include_role: - name: linux-system-roles.podman - vars: - podman_run_as_user: user_quadlet_basic - __absent: {"state":"absent"} - podman_secrets: "{{ __podman_secrets | map('combine', __absent) | - list }}" - podman_quadlet_specs: "{{ ((__podman_quadlet_specs | - rejectattr('type', 'match', '^network$') | list) + - (__podman_quadlet_specs | selectattr('type', 'match', '^network$') | - list)) | map('combine', __absent) | list }}" - - - name: Set secret var for root testing - set_fact: - __root_podman_secrets: "{{ __podman_secrets + __json_secret }}" - __root_json_data: '{"test": "json"}' - vars: - __json_secret: - - name: json_secret - state: present - data: '{"test": "json"}' - no_log: true - - - name: Set container vars for root testing - set_fact: - __root_podman_quadlet_specs: "{{ __podman_quadlet_specs + - __json_container }}" - vars: - __json_container: - - name: json_container - type: container - Install: - WantedBy: default.target - Container: - Image: "{{ mysql_image }}" - ContainerName: json_container - # Once 4.5 is released change this line to use the quadlet Secret - PodmanArgs: "--secret=mysql_container_root_password,type=env,\ - target=MYSQL_ROOT_PASSWORD --secret=json_secret,type=mount,\ - target=/tmp/test.json" - - - name: Run the role - root - include_role: - name: linux-system-roles.podman - vars: - podman_secrets: "{{ __root_podman_secrets }}" - podman_quadlet_specs: "{{ __root_podman_quadlet_specs }}" - - - name: Check files - command: cat {{ __dir }}/{{ item }} - changed_when: false - vars: - __dir: /etc/containers/systemd - loop: - - quadlet-basic-mysql.container - - quadlet-basic.network - - quadlet-basic-mysql.volume - - - name: Check JSON - command: podman exec json_container cat /tmp/test.json - register: __result - failed_when: __result.stdout != __root_json_data - changed_when: false - - - name: Cleanup system - root - include_role: - name: linux-system-roles.podman - vars: - __absent: {"state":"absent"} - podman_secrets: "{{ __root_podman_secrets | map('combine', __absent) | - list }}" - podman_quadlet_specs: "{{ ((__root_podman_quadlet_specs | - rejectattr('type', 'match', '^network$') | list) + - (__root_podman_quadlet_specs | - selectattr('type', 'match', '^network$') | list)) | - map('combine', __absent) | list }}" + - quadlet-basic-mysql.container + - quadlet-basic.network + - quadlet-basic-mysql.volume + + - name: Check JSON + command: podman exec quadlet-basic-mysql cat /tmp/test.json + register: __result + failed_when: __result.stdout != __json_secret_data + changed_when: false + + rescue: + - name: Dump journal + command: journalctl -ex + changed_when: false + failed_when: true + + always: + - name: Cleanup + block: + - name: Cleanup user + include_role: + name: linux-system-roles.podman + vars: + podman_run_as_user: user_quadlet_basic + __absent: {"state":"absent"} + podman_secrets: "{{ __podman_secrets | + map('combine', __absent) | list }}" + podman_quadlet_specs: "{{ ((__podman_quadlet_specs | + rejectattr('type', 'match', '^network$') | list) + + (__podman_quadlet_specs | + selectattr('type', 'match', '^network$') | list)) | + map('combine', __absent) | list }}" + + - name: Remove test user + user: + name: user_quadlet_basic + uid: 1111 + state: absent + + - name: Cleanup system - root + include_role: + name: linux-system-roles.podman + vars: + __absent: {"state":"absent"} + podman_secrets: "{{ __podman_secrets | + map('combine', __absent) | list }}" + podman_quadlet_specs: "{{ ((__podman_quadlet_specs | + rejectattr('type', 'match', '^network$') | list) + + (__podman_quadlet_specs | + selectattr('type', 'match', '^network$') | list)) | + map('combine', __absent) | list }}" + + rescue: + - name: Dump journal + command: journalctl -ex + changed_when: false + failed_when: true diff --git a/tests/tests_quadlet_demo.yml b/tests/tests_quadlet_demo.yml index c388a311..a719f9c4 100644 --- a/tests/tests_quadlet_demo.yml +++ b/tests/tests_quadlet_demo.yml @@ -28,140 +28,150 @@ "/tmp/quadlet_demo": mode: "0777" tasks: - - name: Generate certificates - include_role: - name: fedora.linux_system_roles.certificate - vars: - certificate_requests: - - name: "{{ __test_cert_name }}" - dns: ["localhost"] - ca: self-sign - certificate_test_mode: true - certificate_test_remove_files: true + - name: Run tests + block: + - name: Generate certificates + include_role: + name: fedora.linux_system_roles.certificate + vars: + certificate_requests: + - name: "{{ __test_cert_name }}" + dns: ["localhost"] + ca: self-sign + certificate_test_mode: true + certificate_test_remove_files: true - - name: Run the role - include_role: - name: linux-system-roles.podman - vars: - podman_secrets: - - name: mysql-root-password-container - state: present - skip_existing: true - data: "{{ mysql_container_root_password }}" - - name: mysql-root-password-kube - state: present - skip_existing: true - data: | - apiVersion: v1 - data: - password: "{{ mysql_container_root_password | b64encode }}" - kind: Secret - metadata: - name: mysql-root-password-kube - - name: envoy-certificates - state: present - skip_existing: true - data: | - apiVersion: v1 - data: - certificate.key: {{ - certificate_test_certs[__test_cert_name]['key_content'] | - b64encode }} - certificate.pem: {{ - certificate_test_certs[__test_cert_name]['cert_content'] | - b64encode }} - kind: Secret - metadata: - name: envoy-certificates + - name: Run the role + include_role: + name: linux-system-roles.podman + vars: + podman_pull_retry: true + podman_secrets: + - name: mysql-root-password-container + state: present + data: "{{ mysql_container_root_password }}" + - name: mysql-root-password-kube + state: present + data: | + apiVersion: v1 + data: + password: "{{ mysql_container_root_password | b64encode }}" + kind: Secret + metadata: + name: mysql-root-password-kube + - name: envoy-certificates + state: present + data: | + apiVersion: v1 + data: + certificate.key: {{ + certificate_test_certs[__test_cert_name]['key_content'] | + b64encode }} + certificate.pem: {{ + certificate_test_certs[__test_cert_name]['cert_content'] | + b64encode }} + kind: Secret + metadata: + name: envoy-certificates - - name: Check - command: ls -alrtF /etc/containers/systemd - changed_when: false + - name: Check quadlet files + command: ls -alrtF /etc/containers/systemd + changed_when: false - - name: Check containers - command: podman ps -a - changed_when: false - failed_when: false + - name: Check containers + command: podman ps -a + changed_when: false + failed_when: false - - name: Check pods - command: podman pod ps --ctr-ids --ctr-names --ctr-status - changed_when: false - failed_when: false + - name: Check pods + command: podman pod ps --ctr-ids --ctr-names --ctr-status + changed_when: false + failed_when: false - - name: Check systemd - # noqa command-instead-of-module - shell: set -euo pipefail; systemctl list-units | grep quadlet - changed_when: false - failed_when: false + - name: Check systemd + # noqa command-instead-of-module + shell: set -euo pipefail; systemctl list-units | grep quadlet + changed_when: false + failed_when: false - - name: Check web - get_url: - url: https://localhost:8000 - dest: /run/out - mode: 0600 - validate_certs: false - register: __web_status - until: __web_status is success - retries: 6 - delay: 5 + - name: Check web + get_url: + url: https://localhost:8000 + dest: /run/out + mode: 0600 + validate_certs: false + register: __web_status + until: __web_status is success + retries: 6 + delay: 5 - - name: Show web - command: cat /run/out - changed_when: false - when: __web_status is success + - name: Show web + command: cat /run/out + changed_when: false + when: __web_status is success - - name: Show errors - command: journalctl -xe - changed_when: false - when: __web_status is failed + - name: Error + fail: + when: __web_status is failed - - name: Exit - fail: - when: __web_status is failed + rescue: + - name: Dump journal + command: journalctl -ex + changed_when: false + failed_when: true - - name: Cleanup - include_role: - name: linux-system-roles.podman - vars: - podman_quadlet_specs: - - file_src: quadlet-demo-mysql.volume - state: absent - - template_src: quadlet-demo-mysql.container.j2 - state: absent - - file_src: envoy-proxy-configmap.yml - state: absent - - template_src: quadlet-demo.yml.j2 - state: absent - - file_src: quadlet-demo.kube - state: absent - - file_src: quadlet-demo.network - state: absent - podman_secrets: - - name: mysql-root-password-container - state: absent - - name: mysql-root-password-kube - state: absent - - name: envoy-certificates - state: absent + always: + - name: Check + command: podman ps -a + changed_when: false - - name: Check - command: podman ps -a - changed_when: false + - name: Check pods + command: podman pod ps --ctr-ids --ctr-names --ctr-status + changed_when: false + failed_when: false - - name: Check pods - command: podman pod ps --ctr-ids --ctr-names --ctr-status - changed_when: false - failed_when: false + - name: Check systemd + # noqa command-instead-of-module + shell: >- + set -euo pipefail; + systemctl list-units --all | grep quadlet + changed_when: false + failed_when: false - - name: Check systemd - # noqa command-instead-of-module - shell: >- - set -euo pipefail; - systemctl list-units --all --with-dependencies | grep quadlet - changed_when: false - failed_when: false + - name: LS + command: ls -alrtF /etc/systemd/system + changed_when: false + failed_when: false - - name: LS - command: ls -alrtF /etc/systemd/system - changed_when: false - failed_when: false + - name: Cleanup + block: + - name: Cleanup + include_role: + name: linux-system-roles.podman + vars: + podman_quadlet_specs: + - template_src: quadlet-demo-mysql.container.j2 + state: absent + - file_src: quadlet-demo-mysql.volume + state: absent + - file_src: envoy-proxy-configmap.yml + state: absent + - file_src: quadlet-demo.kube + state: absent + - template_src: quadlet-demo.yml.j2 + state: absent + - file_src: quadlet-demo.network + state: absent + podman_secrets: + - name: mysql-root-password-container + state: absent + - name: mysql-root-password-kube + state: absent + - name: envoy-certificates + state: absent + + rescue: + - name: Get journald + command: journalctl -ex + changed_when: false + failed_when: true