From fcdfcc7bef37cf457fb3273273dca0d4ca2a1bbd Mon Sep 17 00:00:00 2001 From: Dave Demlow Date: Wed, 28 Feb 2024 10:49:27 -0500 Subject: [PATCH] in process upload sync --- simple_nfs_vm_create.yml | 113 +++++++++++++++ simple_vm_create_kairos.yml | 261 ++++++++++++++++++++++++++++++++++ simple_vm_deploy_avassa.yml | 5 +- simple_vm_deploy_ecs.yml | 101 +++++++++++++ simple_vm_deploy_microk8s.yml | 1 - vm_snapshot_clone_older.yml | 72 ++++++++++ vm_snapshot_info.yml | 14 ++ 7 files changed, 564 insertions(+), 3 deletions(-) create mode 100644 simple_nfs_vm_create.yml create mode 100644 simple_vm_create_kairos.yml create mode 100644 simple_vm_deploy_ecs.yml create mode 100644 vm_snapshot_clone_older.yml diff --git a/simple_nfs_vm_create.yml b/simple_nfs_vm_create.yml new file mode 100644 index 0000000..9ef0588 --- /dev/null +++ b/simple_nfs_vm_create.yml @@ -0,0 +1,113 @@ +--- +- name: Simple vm deploy #edit vmname variable - use -l filter to specify cluster vs. full inventory + hosts: edge + vars: + - vmname: "nfs-{{site_name}}" + connection: local + gather_facts: false + strategy: host_pinned # free #allows each cluster to start next task before all clusters have finished current task + environment: #if set here - hypercore modules will automatically use this for each remote cluster - avoiding need to specify cluster_instance for each test + SC_HOST: "https://{{ inventory_hostname }}" + SC_USERNAME: "{{ scale_user }}" + SC_PASSWORD: "{{ scale_pass }}" + SC_TIMEOUT: 60 + + tasks: + - name: Ubuntu20_04 template - Ubuntu 20.04 - import if not present # used to clone and cloud-init target VM + scale_computing.hypercore.vm_import: + cluster_instance: + host: "https://{{inventory_hostname }}" + username: "{{scale_user}}" + password: "{{scale_pass}}" + vm_name: ubuntu20_04 + http_uri: + path: 'https://github.com/ddemlow/RestAPIExamples/raw/master/ubuntu20_04-cloud-init' + file_name: ubuntu20_04-cloud-init.xml +# until: ubuntu20_04.msg is search("import complete") #doesnt' check if vm already exists + retries: 5 + delay: 1 + ignore_errors: false # import errors are not uncommon depdending on network connection to smb / https source + register: ubuntu20_04 + + + - name: Clone and configure ad hoc "{{ vmname }}" + scale_computing.hypercore.vm_clone: + cluster_instance: + host: "https://{{inventory_hostname }}" + username: "{{scale_user}}" + password: "{{scale_pass}}" + vm_name: "{{vmname}}" + source_vm_name: ubuntu20_04 + cloud_init: + user_data: | + #cloud-config + password: "password" + chpasswd: { expire: False } + ssh_pwauth: True + ssh_authorized_keys: # Add your ssh public key for publickey authentication + - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDihWWhjoPj8KVLtdLDwNJQ71zi9An0iUFjefRWu2Eju ddemlow@scalecomputing.com + disable_root: false # allow ssh root login + ssh_import_id: gh:ddemlow + package_update: true + package_upgrade: true + packages: [nfs-kernel-server, rpcbind, nfs-common, qemu-guest-agent, unzip] + runcmd: + # Ensure rpcbind is enabled and started + - systemctl enable rpcbind + - systemctl start rpcbind + # Create a directory to share via NFS + - mkdir -p /export/data + - chmod 777 /export/data + - chown nobody:nogroup /export/data + + # Configure NFS exports + - echo "/export/data *(rw,sync,no_subtree_check)" >> /etc/exports + + # Apply the NFS export configuration + - exportfs -a + + # Enable and start the NFS server + - systemctl enable nfs-kernel-server + - systemctl start nfs-kernel-server + - [ systemctl, restart, --no-block, qemu-guest-agent ] + final_message: "The NFS server is ready!" + bootcmd: + - [ sh, -c, 'sudo echo GRUB_CMDLINE_LINUX="nomodeset" >> /etc/default/grub' ] + - [ sh, -c, 'sudo echo GRUB_GFXPAYLOAD_LINUX="1024x768" >> /etc/default/grub' ] + - [ sh, -c, 'sudo echo GRUB_DISABLE_LINUX_UUID=true >> /etc/default/grub' ] + - [ sh, -c, 'sudo update-grub' ] + write_files: + - content: "{{ inventory_hostname }}" + path: /clusterip.txt + meta_data: | + dsmode: local + local-hostname: "{{ vmname }}" + + - name: Disk desired configuration for "{{ vmname }}" + scale_computing.hypercore.vm_disk: + cluster_instance: + host: "https://{{inventory_hostname }}" + username: "{{scale_user}}" + password: "{{scale_pass}}" + vm_name: "{{ vmname }}" + items: + - disk_slot: 0 + type: virtio_disk + size: "{{ '400 GB' | human_to_bytes }}" # 50GB | human to bytes results in 53.7GB VSD in Hypercore + state: present + + - name: Vm desired configuration and state for "{{ vmname }}" + scale_computing.hypercore.vm_params: + cluster_instance: + host: "https://{{inventory_hostname }}" + username: "{{scale_user}}" + password: "{{scale_pass}}" + vm_name: "{{vmname}}" + memory: "{{ '4 GB' | human_to_bytes }}" + description: + tags: + - ansible + - "{{ site_name }}" + - ansible_group__"{{vmname}}" # this will create tag used by hypercore inventory plugin when executing towards VM hosts + vcpu: 4 + power_state: start diff --git a/simple_vm_create_kairos.yml b/simple_vm_create_kairos.yml new file mode 100644 index 0000000..8be546f --- /dev/null +++ b/simple_vm_create_kairos.yml @@ -0,0 +1,261 @@ +--- +- name: Simple vm deploy #edit vmname variable - use -l filter to specify cluster vs. full inventory + hosts: all + vars: + - vmname: kairos-243 + connection: ansible.builtin.local + gather_facts: false + strategy: host_pinned # free #allows each cluster to start next task before all clusters have finished current task + environment: #if set here - hypercore modules will automatically use this for each remote cluster - avoiding need to specify cluster_instance for each test + SC_HOST: "https://{{ inventory_hostname }}" + SC_USERNAME: "{{ scale_user }}" + SC_PASSWORD: "{{ scale_pass }}" + + tasks: + # - name: Create project directory /tmp (if it doesn't exist already) + # file: state=directory path=~/tmp + + # - name: Download ISO image from http://tinycorelinux.net/13.x/x86/release/TinyCore-current.iso and save it into /tmp/TinyCore-vm-integration.iso + # get_url: url=http://tinycorelinux.net/13.x/x86/release/TinyCore-current.iso dest=~/tmp/TinyCore-vm-integration.iso + + # - name: Upload ISO image TinyCore-vm-integration.iso to HyperCore API + # scale_computing.hypercore.iso: + # cluster_instance: + # host: "https://{{inventory_hostname }}" + # username: "{{scale_user}}" + # password: "{{scale_pass}}" + # name: "TinyCore-current.iso" + # source: "/Users/davedemlow/Downloads/TinyCore-current.iso" + # state: present + # register: result + + - name: Create and start the VM with disks, nics and boot devices set. Attach ISO onto the VM. Add cloud init data + scale_computing.hypercore.vm: + cluster_instance: + host: "https://{{inventory_hostname }}" + username: "{{scale_user}}" + password: "{{scale_pass}}" + vm_name: "{{ vmname }}" + description: kairos test + state: present + tags: + - kairos + memory: "{{ '6 GB' | human_to_bytes }}" + vcpu: 4 + power_state: start + disks: + # - type: ide_cdrom + # disk_slot: 0 + # iso_name: "" + - type: ide_cdrom + disk_slot: 0 + iso_name: "kairos-opensuse-leap-15.5-standard-amd64-generic-v2.4.3-k3sv1.26.9+k3s1.iso" + - type: virtio_disk + disk_slot: 0 + size: "{{ '50 GB' | human_to_bytes }}" + - type: nvram + disk_slot: 1 + nics: + - vlan: 0 + type: virtio + boot_devices: + - type: virtio_disk + disk_slot: 0 + - type: ide_cdrom + disk_slot: 0 + cloud_init: + user_data: | + #cloud-config + install: + device: "/dev/vda" + reboot: true + poweroff: false + auto: true + encrypted_partitions: + - COS_PERSISTENT + users: + - name: "kairos" + passwd: "kairos" + ssh_authorized_keys: + - github:ddemlow + - name: "testuser" + passwd: "testuser" + ssh_authorized_keys: + - github:ddemlow + groups: + - "admin" + k3s: + enabled: true + hostname: "{{ vmname }}" + bundles: + - targets: + - run://quay.io/kairos/community-bundles:system-upgrade-controller_latest + name: "Deploy fleet out of the box" + stages: + boot: + - name: "Copy fleet deployment files" + files: + - path: /var/lib/rancher/k3s/server/manifests/fleet-config.yaml + content: | + apiVersion: v1 + kind: Namespace + metadata: + name: cattle-system + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: fleet-crd + namespace: cattle-system + spec: + chart: https://github.com/rancher/fleet/releases/download/v0.3.8/fleet-crd-0.3.8.tgz + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: fleet + namespace: cattle-system + spec: + chart: https://github.com/rancher/fleet/releases/download/v0.3.8/fleet-0.3.8.tgz + - path: /var/lib/rancher/k3s/server/manifests/portainer-agent-edge-k8s.yaml + content: | + apiVersion: v1 + kind: Namespace + metadata: + name: portainer + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: portainer-sa-clusteradmin + namespace: portainer + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: portainer-crb-clusteradmin + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin + subjects: + - kind: ServiceAccount + name: portainer-sa-clusteradmin + namespace: portainer + # Optional: can be added to expose the agent port 80 to associate an Edge key. + # --- + # apiVersion: v1 + # kind: Service + # metadata: + # name: portainer-agent + # namespace: portainer + # spec: + # type: LoadBalancer + # selector: + # app: portainer-agent + # ports: + # - name: http + # protocol: TCP + # port: 80 + # targetPort: 80 + --- + apiVersion: v1 + kind: Service + metadata: + name: portainer-agent + namespace: portainer + spec: + clusterIP: None + selector: + app: portainer-agent + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: portainer-agent + namespace: portainer + spec: + selector: + matchLabels: + app: portainer-agent + template: + metadata: + labels: + app: portainer-agent + spec: + serviceAccountName: portainer-sa-clusteradmin + containers: + - name: portainer-agent + image: portainer/agent:2.19.1 + imagePullPolicy: Always + env: + - name: LOG_LEVEL + value: INFO + - name: KUBERNETES_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: EDGE + value: "1" + - name: AGENT_CLUSTER_ADDR + value: "portainer-agent" + - name: AGENT_SECRET + valueFrom: + configMapKeyRef: + name: portainer-agent-edge + key: EDGE_SECRET + optional: true + - name: EDGE_KEY + valueFrom: + secretKeyRef: + name: portainer-agent-edge-key + key: edge.key + envFrom: + - configMapRef: + name: portainer-agent-edge + ports: + - containerPort: 9001 + protocol: TCP + - containerPort: 80 + protocol: TCP + runcmd: + - export PORTAINER_EDGE_ID=$(hostname) + - curl https://downloads.portainer.io/ee2-19/portainer-edge-agent-setup.sh -o /oem/portainer-edge-agent-setup.sh + - curl https://downloads.portainer.io/ee2-19/portainer-edge-agent-setup.sh -o /usr/local/portainer-edge-agent-setup.sh + - /bin/bash /oem/portainer-edge-agent-setup.sh "{{ vmname }}" "aHR0cHM6Ly8yMC44OC4yMi4yMjc6OTQ0M3wyMC44OC4yMi4yMjc6ODAwMHwydXByOUtuYTd6ZHBNWExNMm9meDNubHZEOHh2THpLVjN5WnlXM1lsWFdvPXww" "1" "" "" + - /bin/bash /usr/local/portainer-edge-agent-setup.sh "{{ vmname }}" "aHR0cHM6Ly8yMC44OC4yMi4yMjc6OTQ0M3wyMC44OC4yMi4yMjc6ODAwMHwydXByOUtuYTd6ZHBNWExNMm9meDNubHZEOHh2THpLVjN5WnlXM1lsWFdvPXww" "1" "" "" + # p2p: + # disable_dht: true + # auto: + # ha: + # # Enables HA control-plane + # enable: true + # # number of HA master node (beside the one used for init) for the control-plane + # master_nodes: 2 + # network_token: "b3RwOgogIGRodDoKICAgIGludGVydmFsOiA5MDAwCiAgICBrZXk6IFFJbVpQVkhoMXhXZVl4TDZRMnRSYTN5dGVNZlhhTzJIeHJRZzdRNmZ3UGgKICAgIGxlbmd0aDogNDMKICBjcnlwdG86CiAgICBpbnRlcnZhbDogOTAwMAogICAga2V5OiBEQUhZblI0WFExWjMwanN2OW04bklzU2JVU0gzamo3REY0dUpEU3ZRWkw2CiAgICBsZW5ndGg6IDQzCnJvb206IEpkMmZTN21NblhwOXluQlNZN1RZMzJYSm9sZUV1c3NVOGFYaU9DdEcyS1IKcmVuZGV6dm91czogMzFXeGpSQk1SM2JSalNKMFpSSHZoOW1rNGNybkpTZElGRDdMMlNJd3pvVwptZG5zOiA4MGc1S1dTTk5pQXRCT1ZvRGJYMXpESGVHMnI3MEFvZmlQYVVhdHEwWHZFCm1heF9tZXNzYWdlX3NpemU6IDIwOTcxNTIwCg==" + machine_type: BIOS + operating_system: os_other + register: vm_created + + + # - name: Disk desired configuration for "{{ vmname }}" + # scale_computing.hypercore.vm_disk: + # vm_name: "{{ vmname }}" + # items: + # - disk_slot: 0 + # type: virtio_disk + # size: "{{ '100 GB' | human_to_bytes }}" # 50GB | human to bytes results in 53.7GB VSD in Hypercore + # state: present + + # - name: Vm desired configuration and state for "{{ vmname }}" + # scale_computing.hypercore.vm_params: + # vm_name: "{{vmname}}" + # memory: "{{ '4 GB' | human_to_bytes }}" + # description: + # tags: + # - "{{vmname}}" + # - ansible + # - "{{ site_name }}" + # - ansible_group__"{{vmname}}" # this will create tag used by hypercore inventory plugin when executing towards VM hosts + # vcpu: 4 + # power_state: start diff --git a/simple_vm_deploy_avassa.yml b/simple_vm_deploy_avassa.yml index f1c067d..f72caae 100644 --- a/simple_vm_deploy_avassa.yml +++ b/simple_vm_deploy_avassa.yml @@ -53,7 +53,7 @@ loop: "{{ image_url }}" ignore_errors: false - #TODO - could use a handler to force update virtual disk attached to template only if there is a new download or upload? + # TODO #2 - could use a handler to force update virtual disk attached to template only if there is a new download or upload? - name: Get info about template VM {{ image_name }} scale_computing.hypercore.vm_info: @@ -83,7 +83,8 @@ when: vm_info_result.records | length == 0 #only create VM if it doesn't already exist - else would delete existing template disk register: template - - name: Attach uploaded virtual disk to "{{ image_name }}" template # this will attach latest image every time - should there be way to only attach if not exist? +#TODO - may want a way to force update of virtual disk IN TEMPLATE vm - maybe as simple as delete template if the image is updated? + - name: Attach uploaded virtual disk to "{{ image_name }}" template # this will NOT attach / update latest image scale_computing.hypercore.virtual_disk_attach: name: "{{ image_name }}" vm_name: "{{ image_name }}" diff --git a/simple_vm_deploy_ecs.yml b/simple_vm_deploy_ecs.yml new file mode 100644 index 0000000..d5794f4 --- /dev/null +++ b/simple_vm_deploy_ecs.yml @@ -0,0 +1,101 @@ +--- +- name: Simple vm deploy #edit vmname variable - use -l filter to specify cluster vs. full inventory + hosts: edge + vars: + - vmname: ubuntu-ecs10 + connection: local + gather_facts: false + strategy: host_pinned # free #allows each cluster to start next task before all clusters have finished current task + environment: #if set here - hypercore modules will automatically use this for each remote cluster - avoiding need to specify cluster_instance for each test + SC_HOST: "https://{{ inventory_hostname }}" + SC_USERNAME: "{{ scale_user }}" + SC_PASSWORD: "{{ scale_pass }}" + SC_TIMEOUT: 60 + + tasks: + - name: Ubuntu20_04 template - Ubuntu 20.04 - import if not present # used to clone and cloud-init target VM + scale_computing.hypercore.vm_import: + cluster_instance: + host: "https://{{inventory_hostname }}" + username: "{{scale_user}}" + password: "{{scale_pass}}" + vm_name: ubuntu20_04 + http_uri: + path: 'https://github.com/ddemlow/RestAPIExamples/raw/master/ubuntu20_04-cloud-init' + file_name: ubuntu20_04-cloud-init.xml +# until: ubuntu20_04.msg is search("import complete") #doesnt' check if vm already exists + retries: 5 + delay: 1 + ignore_errors: false # import errors are not uncommon depdending on network connection to smb / https source + register: ubuntu20_04 + + + - name: Clone and configure ad hoc "{{ vmname }}" + scale_computing.hypercore.vm_clone: + cluster_instance: + host: "https://{{inventory_hostname }}" + username: "{{scale_user}}" + password: "{{scale_pass}}" + vm_name: "{{vmname}}" + source_vm_name: ubuntu20_04 + cloud_init: + user_data: | + #cloud-config + password: "password" + chpasswd: { expire: False } + ssh_pwauth: True + ssh_authorized_keys: # Add your ssh public key for publickey authentication + - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDihWWhjoPj8KVLtdLDwNJQ71zi9An0iUFjefRWu2Eju ddemlow@scalecomputing.com + disable_root: false # allow ssh root login + ssh_import_id: gh:ddemlow + apt: {sources: {docker.list: {source: 'deb [arch=amd64] https://download.docker.com/linux/ubuntu $RELEASE stable', keyid: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88}}} + package_update: true + package_upgrade: true + packages: [qemu-guest-agent, docker-ce, docker-ce-cli, docker-compose, ansible, git, unzip] + bootcmd: + - [ sh, -c, 'sudo echo GRUB_CMDLINE_LINUX="nomodeset" >> /etc/default/grub' ] + - [ sh, -c, 'sudo echo GRUB_GFXPAYLOAD_LINUX="1024x768" >> /etc/default/grub' ] + - [ sh, -c, 'sudo echo GRUB_DISABLE_LINUX_UUID=true >> /etc/default/grub' ] + - [ sh, -c, 'sudo update-grub' ] + runcmd: + - [ systemctl, restart, --no-block, qemu-guest-agent ] + - curl --proto "https" -o "/tmp/ecs-anywhere-install.sh" "https://amazon-ecs-agent.s3.amazonaws.com/ecs-anywhere-install-latest.sh" && bash /tmp/ecs-anywhere-install.sh --region "us-east-2" --cluster "ecs-westfield1" --activation-id "f7a24624-973e-4651-8d88-5ed3a344846f" --activation-code "HUTTHch9a2e7qE3Atlrf" + write_files: + # configure docker daemon to be accessible remotely via TCP on socket 2375 + - content: | + [Service] + ExecStart= + ExecStart=/usr/bin/dockerd -H unix:// -H tcp://0.0.0.0:2375 + path: /etc/systemd/system/docker.service.d/options.conf + meta_data: | + dsmode: local + local-hostname: "{{ vmname }}" + + - name: Disk desired configuration for "{{ vmname }}" + scale_computing.hypercore.vm_disk: + cluster_instance: + host: "https://{{inventory_hostname }}" + username: "{{scale_user}}" + password: "{{scale_pass}}" + vm_name: "{{ vmname }}" + items: + - disk_slot: 0 + type: virtio_disk + size: "{{ '400 GB' | human_to_bytes }}" # 50GB | human to bytes results in 53.7GB VSD in Hypercore + state: present + + - name: Vm desired configuration and state for "{{ vmname }}" + scale_computing.hypercore.vm_params: + cluster_instance: + host: "https://{{inventory_hostname }}" + username: "{{scale_user}}" + password: "{{scale_pass}}" + vm_name: "{{vmname}}" + memory: "{{ '4 GB' | human_to_bytes }}" + description: + tags: + - ansible + - "{{ site_name }}" + - ansible_group__ecs # this will create tag used by hypercore inventory plugin when executing towards VM hosts + vcpu: 4 + power_state: start diff --git a/simple_vm_deploy_microk8s.yml b/simple_vm_deploy_microk8s.yml index 5ee75ef..885ff7f 100644 --- a/simple_vm_deploy_microk8s.yml +++ b/simple_vm_deploy_microk8s.yml @@ -55,7 +55,6 @@ disable_root: false # allow ssh root login ssh_pwauth: True ssh_import_id: gh:ddemlow - apt: {sources: {docker.list: {source: 'deb [arch=amd64] https://download.docker.com/linux/ubuntu $RELEASE stable', keyid: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88}}} packages: [snapd, qemu-guest-agent, ansible, git, unzip] snap: commands: diff --git a/vm_snapshot_clone_older.yml b/vm_snapshot_clone_older.yml new file mode 100644 index 0000000..e0f2c97 --- /dev/null +++ b/vm_snapshot_clone_older.yml @@ -0,0 +1,72 @@ +--- +- name: clone all snapshots older than use_date + hosts: all + become: false + gather_facts: false + connection: ansible.builtin.local + strategy: host_pinned + environment: #if set here - hypercore modules will automatically use this for each remote cluster - avoiding need to specify cluster_instance for each test + SC_HOST: "https://{{ inventory_hostname }}" + SC_USERNAME: "{{ scale_user }}" + SC_PASSWORD: "{{ scale_pass }}" + SC_TIMEOUT: 600 + vars: + # Format: 'YYYY-MM-DD hh:mm:ss' + # All snapshots older than this date will be deleted. + # use_date timezone should match the Scale cluster timezone + use_date: '2022-01-01 12:52:00' + + tasks: + # ------------------------------------------------------ + - name: List all snapshots + scale_computing.hypercore.vm_snapshot_info: + register: snapshot_results + + - name: Convert date to unix timestamp 'epoch' + ansible.builtin.set_fact: + epoch_timestamp: "{{ (use_date | to_datetime).strftime('%s') }}" + + - name: Show epoch_timestamp + ansible.builtin.debug: + var: epoch_timestamp + + - name: Create filtered_snapshots list + ansible.builtin.set_fact: + filtered_snapshots: [] + + - name: Loop through snapshots and add snapshots that are older than 'use_date' + ansible.builtin.set_fact: + filtered_snapshots: "{{ filtered_snapshots + [item] }}" + when: item.timestamp < epoch_timestamp | int + loop: "{{ snapshot_results.records }}" + no_log: true + + - name: Show only snapshots that are older than 'use_date' + ansible.builtin.debug: + var: filtered_snapshots + + # We could reuse "filtered_snapshots" here instead of "snapshot_results" and avoid the "when" statement. + # But leaving it as is for example purposes. + # Since this is the only mandatory task of the playbook, can be copy-pasted and reused as standalone task. + - name: Loop through list of snapshots and clone all older than the 'use_date' to vm_name+serial_number + scale_computing.hypercore.vm_clone: + vm_name: "{{ item.vm.name }}-{{ item.vm.snapshot_serial_number }}" + source_vm_name: "{{ item.vm.name }}" + source_snapshot_uuid: "{{ item.snapshot_uuid }}" + when: item.timestamp < epoch_timestamp | int + loop: "{{ snapshot_results.records }}" + + # - name: Create filtered_snapshots list - second time + # ansible.builtin.set_fact: + # filtered_snapshots: [] + + # - name: Loop through snapshots and add snapshots that are older than 'use_date' - second time + # ansible.builtin.set_fact: + # filtered_snapshots: "{{ filtered_snapshots + [item] }}" + # when: item.timestamp < epoch_timestamp | int + # loop: "{{ snapshot_results.records }}" + # no_log: true + + # - name: Show only snapshots that are older than 'use_date' - second time + # ansible.builtin.debug: + # var: filtered_snapshots diff --git a/vm_snapshot_info.yml b/vm_snapshot_info.yml index 3c99dc4..01c2714 100644 --- a/vm_snapshot_info.yml +++ b/vm_snapshot_info.yml @@ -19,3 +19,17 @@ - name: debug debug: var: snapshot + + - name: Convert snapshot data to CSV format + set_fact: + snapshot_csv: "{{ snapshot.records | map('json_query', '{label: label, vm_name: vm_name, snapshot_uuid: snapshot_uuid}') | list | map('to_json') | join('\n') }}" + + - name: Write snapshot data to a CSV file + copy: + content: "{{ snapshot_csv }}" + dest: "snapshot_info.csv" + + - name: Write snapshot data to a JSON file + copy: + content: "{{ snapshot.records | to_nice_json }}" + dest: "snapshot_info.json"