Skip to content

Commit

Permalink
Add a test for the StorageClass
Browse files Browse the repository at this point in the history
  • Loading branch information
mvalsecc committed Apr 16, 2024
1 parent b66ce1e commit 3685de5
Show file tree
Hide file tree
Showing 6 changed files with 129 additions and 20 deletions.
63 changes: 63 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,67 @@ $ cp vars/config.yml.sample vars/config.yml
$ cp inventory/hosts.sample inventory/hosts
```

### How to use the Persistent Volume Claims

Make sure to have set the provide storage feature flag before running the playbook.

- `vars/vm_settings.yml`
- `ff_provision_storage`: `true`

Confirm that `nfs-subdir-external-provisioner` plugin is working correctly

```bash
$ oc get storageclass
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
nfs-client cluster.local/nfs-subdir-external-provisioner Retain Immediate true 26m <=== the ngf-client class must be there

$ oc get all -n nfs-subdir-external-provisioner
NAME READY STATUS RESTARTS AGE
pod/nfs-subdir-external-provisioner-8b5cd6b78-nw4zf 1/1 Running 0 14s <==== This pod must be running

NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/nfs-subdir-external-provisioner 1/1 1 1 19s

NAME DESIRED CURRENT READY AGE
replicaset.apps/nfs-subdir-external-provisioner-8b5cd6b78 1 1 1 14s
replicaset.apps/nfs-subdir-external-provisioner-f64f74cfc 0 0 0 19s


```

Deploy an application requiring a Persistent Volume, and confirm it works

```bash
$ oc get storageclass
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
nfs-client cluster.local/nfs-subdir-external-provisioner Retain Immediate true 26m

$ oc apply -f tests/persistent-busybox.yml [-n <your-namespace]

$ oc get pv -o wide [-n <your-namespace]
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE
pvc-6142a868-ed9d-455e-b9f2-12badeceb9bc 1Mi RWX Retain Bound nfs-subdir-external-provisioner/test-claim nfs-client 13m Filesystem

$ oc get pvc [-n <your-namespace]
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
test-claim Bound pvc-6142a868-ed9d-455e-b9f2-12badeceb9bc 1Mi RWX nfs-client 15m

$ oc get pod demo-busybox-789874f84c-fzbm6 [-n <your-namespace]
NAME READY STATUS RESTARTS AGE
demo-busybox-789874f84c-fzbm6 1/1 Running 0 17m

$ oc get pod demo-busybox-789874f84c-fzbm6 [-n <your-namespace] -o yaml | grep volume -A3
volumeMounts:
- mountPath: /data/db
name: voldb1
[...]
--
volumes:
- name: voldb1
persistentVolumeClaim:
claimName: test-claim
```

### Run

```bash
Expand Down Expand Up @@ -101,6 +162,8 @@ $ ansible-galaxy collection install community.general
$ ansible-galaxy collection install community.libvirt
```

###

## Special Thanks

This project started with inspiration from openshift-fast-install. ( [the original version](https://github.com/konono/openshift-fast-install) and [the forked version](https://github.com/masaki-furuta/openshift-fast-install) )
10 changes: 5 additions & 5 deletions tasks/kvm_host/cleanup-all.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -82,18 +82,18 @@
- chk_file.stat.exists == true
- cleanup.conf == true

- name: check rocky files dir
- name: Check for rocky files folder existence
file:
path: "{{ files.rocky }}"
register: chk_rocky

- name: remove rocky dir # TODO: fix me
- name: Remove rocky files folder entirely
file:
path: "{{ files.rocky }}"
state: absent
when:
- chk_rocky.stat.exists == true
- cleanup.conf == true
- chk_rocky.state == "directory"
- cleanup.images == true

- name: check old hosts.openshift for dnsmasq
stat:
Expand Down Expand Up @@ -142,7 +142,7 @@
- kubectl
- oc
- openshift-install
- helm # TODO: actually move helm to /root/bin inside get_materials.yaml
- helm
when:
- cleanup.bin == true
ignore_errors: yes
17 changes: 11 additions & 6 deletions tasks/kvm_host/install-storage.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
virt:
command: list_vms
register: all_vms
tags: nfs

# want fix: move to using the virt module...
- name: destroy old storage
Expand All @@ -11,7 +10,6 @@
virsh undefine --remove-all-storage {{ item.name }}
when: 'item.name in all_vms.list_vms'
with_items: "{{ storage }}"
tags: nfs

- name: Set libvirt image base
set_fact:
Expand Down Expand Up @@ -115,9 +113,8 @@
--serial pty
--cdrom /var/lib/libvirt/images/my-seed.iso
with_items: "{{ storage }}"
tags: nfs

- name: Wait for vm {{ item.name }} to get an IP
- name: Wait for storage-0 VM to get an IP
become: yes
shell: >
virsh domifaddr {{ item.name }} | awk '/ipv4/ {split($NF,a,"/"); print a[1]}'
Expand All @@ -126,9 +123,17 @@
retries: 10
delay: 5
with_items: "{{ storage }}"
tags: nfs

- name: Display NFS server login details
debug:
msg: "The NFS server is now ready!: You can log in into it with cloud@{{ IPinfo.results[0].stdout }}"
tags: nfs

# Display again kubeadmin password, as the storage tasks likely have hid it already
- name: check kubeadmin-password
slurp:
src: "{{ files.kvm }}/bare-metal/auth/kubeadmin-password"
register: kubeadmin_password

- name: show kubeadmin-password
debug:
msg: "{{ kubeadmin_password.content | b64decode }}"
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,15 @@
# https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner#without-helm
- name: Install nfs-subdir-external-provisioner
shell: >-
/root/bin/helm repo add nfs-subdir-external-provisioner https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
/root/bin/helm repo add nfs-subdir-external-provisioner https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/ 2>/dev/null;
/root/bin/oc create namespace nfs-subdir-external-provisioner 2>/dev/null
/root/bin/oc create namespace nfs-subdir-external-provisioner 2>/dev/null;
/root/bin/helm install nfs-subdir-external-provisioner nfs-subdir-external-provisioner/nfs-subdir-external-provisioner \
--set nfs.server=172.16.0.106 --set nfs.path=/export --set storageClass.reclaimPolicy=Retain -n nfs-subdir-external-provisioner
--set nfs.server=172.16.0.106 --set nfs.path=/export --set storageClass.reclaimPolicy=Retain -n nfs-subdir-external-provisioner;
/root/bin/oc adm policy add-scc-to-user privileged -z nfs-subdir-external-provisioner -n nfs-subdir-external-provisioner
/root/bin/oc adm policy add-scc-to-user hostmount-anyuid -z nfs-subdir-external-provisioner -n nfs-subdir-external-provisioner
/root/bin/oc rollout restart deployment nfs-subdir-external-provisioner -n nfs-subdir-external-provisioner
/root/bin/oc adm policy add-scc-to-user privileged -z nfs-subdir-external-provisioner -n nfs-subdir-external-provisioner;
/root/bin/oc adm policy add-scc-to-user hostmount-anyuid -z nfs-subdir-external-provisioner -n nfs-subdir-external-provisioner;
/root/bin/oc rollout restart deployment nfs-subdir-external-provisioner -n nfs-subdir-external-provisioner;
environment:
KUBECONFIG: "{{ files.kvm }}/bare-metal/auth/kubeconfig"
40 changes: 40 additions & 0 deletions tests/persistent-busybox.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
labels:
app: demo
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Mi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: demo-busybox
spec:
replicas: 2 # Having two replicas allows us to test that the ReadWriteMany access mode is working as intended
selector:
matchLabels:
app: demo
template:
metadata:
labels:
app: demo
spec:
volumes:
- name: voldb1
persistentVolumeClaim:
claimName: test-claim
containers:
- name: busybox
image: busybox
command: ["/bin/sh"]
args: ["-c", "sleep infinity"]
volumeMounts:
- name: voldb1
mountPath: /data/db
7 changes: 4 additions & 3 deletions vars/vm_setting.yml
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,11 @@ spec_worker:
disk: 128
disk_cache: unsafe

# Actually deploy the storage node, download helm binary and install the plugin to use the NFS export as backend for PVs
ff_provision_storage: true

# Currently we allow only 1 or 0 storage node. To prevent provisiong storage set ff_provision_storage to false
spec_storage:
num_storage: 0
cpu: 2
ram: 4096
disk: 50 # Size of the disk that includes the NFS export
Expand All @@ -104,5 +107,3 @@ spec_storage:
# If this value is "true", the bootstrap node will be preserved.
keep_bootstrap: false

# Actually deploy the storage node(s), download helm binary and install the plugin to use the NFS export as backend for PVs
ff_provision_storage: true

0 comments on commit 3685de5

Please sign in to comment.