diff --git a/test/regional-dr.yaml b/test/regional-dr.yaml index 5c9d2e50fb..dcd33f5bec 100644 --- a/test/regional-dr.yaml +++ b/test/regional-dr.yaml @@ -11,12 +11,14 @@ profiles: disk_size: "50g" scripts: - file: olm/start + - file: rook/start - name: "dr2" network: default extra_disks: 1 disk_size: "50g" scripts: - file: olm/start + - file: rook/start - name: "hub" network: default scripts: diff --git a/test/rook/cluster-test-kustomization.yaml b/test/rook/cluster-test-kustomization.yaml new file mode 100644 index 0000000000..929b68916b --- /dev/null +++ b/test/rook/cluster-test-kustomization.yaml @@ -0,0 +1,24 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# +# SPDX-License-Identifier: Apache-2.0 + +--- +resources: + - ${rook_base_url}/cluster-test.yaml +patchesJson6902: + - target: + kind: CephCluster + name: my-cluster + namespace: rook-ceph + patch: |- + # Minikube does not persist /var/lib/rook, but it persists /data/* + # https://minikube.sigs.k8s.io/docs/handbook/persistent_volumes/#a-note-on-mounts-persistence-and-minikube-hosts + - op: replace + path: /spec/dataDirHostPath + value: /data/rook + # Enable host networking - ceph monitors will be available on the host + # network, exposed outside of the cluster. + - op: add + path: /spec/network + value: + provider: host diff --git a/test/rook/operator-kustomization.yaml b/test/rook/operator-kustomization.yaml new file mode 100644 index 0000000000..1a7623ed8e --- /dev/null +++ b/test/rook/operator-kustomization.yaml @@ -0,0 +1,28 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# +# SPDX-License-Identifier: Apache-2.0 + +--- +resources: + - ${rook_base_url}/operator.yaml +patchesJson6902: + - target: + kind: ConfigMap + name: rook-ceph-operator-config + namespace: rook-ceph + patch: |- + - op: add + path: /data/CSI_ENABLE_CSIADDONS + value: 'true' + - op: add + path: /data/ROOK_CSIADDONS_IMAGE + value: quay.io/csiaddons/k8s-sidecar:latest + - op: add + path: /data/CSI_ENABLE_OMAP_GENERATOR + value: 'true' + - op: add + path: /data/ROOK_CSI_ALLOW_UNSUPPORTED_VERSION + value: 'true' + - op: add + path: /data/ROOK_CSI_CEPH_IMAGE + value: quay.io/cephcsi/cephcsi:canary diff --git a/test/rook/replica-pool.yaml b/test/rook/replica-pool.yaml new file mode 100644 index 0000000000..18d50c64be --- /dev/null +++ b/test/rook/replica-pool.yaml @@ -0,0 +1,20 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# +# SPDX-License-Identifier: Apache-2.0 + +--- +apiVersion: ceph.rook.io/v1 +kind: CephBlockPool +metadata: + name: replicapool + namespace: rook-ceph +spec: + replicated: + size: 1 + requireSafeReplicaSize: false + mirroring: + enabled: true + mode: image + snapshotSchedules: + - interval: 2m + startTime: 14:00:00-05:00 diff --git a/test/rook/start b/test/rook/start new file mode 100755 index 0000000000..c9857b0f83 --- /dev/null +++ b/test/rook/start @@ -0,0 +1,156 @@ +#!/usr/bin/env -S python3 -u + +# SPDX-FileCopyrightText: The RamenDR authors +# +# SPDX-License-Identifier: Apache-2.0 + +import sys + +import drenv + +# Update this when upgrading rook. +ROOK_BASE_URL = "https://raw.githubusercontent.com/rook/rook/release-1.10/deploy/examples" + +# Using main till a release is available with lastSyncTime. +CSI_ADDON_BASE_URL = "https://raw.githubusercontent.com/csi-addons/kubernetes-csi-addons/main/deploy/controller" + +if len(sys.argv) != 2: + print(f"Usage: {sys.argv[0]} cluster") + sys.exit(1) + +cluster = sys.argv[1] + +drenv.log_progress("Deploying rook ceph crds") +drenv.kubectl( + "apply", + "--filename", f"{ROOK_BASE_URL}/crds.yaml", + profile=cluster, +) + +drenv.log_progress("Deploying rook common") +drenv.kubectl( + "apply", + "--filename", f"{ROOK_BASE_URL}/common.yaml", + profile=cluster, +) + +drenv.log_progress("Deploying csi addon for volume replication") +drenv.kubectl( + "apply", + "--filename", f"{CSI_ADDON_BASE_URL}/crds.yaml", + profile=cluster, +) +drenv.kubectl( + "apply", + "--filename", f"{CSI_ADDON_BASE_URL}/rbac.yaml", + profile=cluster, +) +drenv.kubectl( + "apply", + "--filename", f"{CSI_ADDON_BASE_URL}/setup-controller.yaml", + profile=cluster, +) + +drenv.log_progress("Deploying kustomized rook operator") +with drenv.kustomization( + "rook/operator-kustomization.yaml", + rook_base_url=ROOK_BASE_URL, +) as kustomization: + drenv.kubectl("apply", "--kustomize", kustomization, profile=cluster) + +drenv.log_progress("Waiting until rook ceph operator is rolled out") +drenv.kubectl( + "rollout", "status", "deployment/rook-ceph-operator", + "--namespace", "rook-ceph", + "--timeout", "300s", + profile=cluster, +) + +drenv.log_progress("Waiting until rook ceph operator is ready") +drenv.kubectl( + "wait", "pod", + "--for", "condition=Ready", + "--namespace", "rook-ceph", + "--selector", "app=rook-ceph-operator", + "--timeout", "300s", + profile=cluster, +) + +drenv.log_progress("Deploying kustomized rook ceph cluster") +with drenv.kustomization( + "rook/cluster-test-kustomization.yaml", + rook_base_url=ROOK_BASE_URL, +) as kustomization: + drenv.kubectl("apply", "--kustomize", kustomization, profile=cluster) + +drenv.log_progress("Creating a mirroring enabled RBD pool") +drenv.kubectl( + "apply", + "--filename", "rook/replica-pool.yaml", + profile=cluster, +) + +drenv.log_progress("Creating a storage class") +drenv.kubectl( + "apply", + "--filename", "rook/storage-class.yaml", + profile=cluster, +) + +drenv.wait_for( + "cephcluster/my-cluster", + output="jsonpath={.status.phase}", + namespace="rook-ceph", + timeout=60, + profile=cluster, +) + +drenv.log_progress("Waiting until rook ceph cluster is ready") +drenv.kubectl( + "wait", "CephCluster", "my-cluster", + "--for", "jsonpath={.status.phase}=Ready", + "--namespace", "rook-ceph", + "--timeout", "300s", + profile=cluster, +) + +drenv.wait_for( + "cephblockpool/replicapool", + output="jsonpath={.status.phase}", + namespace="rook-ceph", + timeout=60, + profile=cluster, +) + +drenv.log_progress("Waiting until ceph block pool is ready") +drenv.kubectl( + "wait", "CephBlockPool", "replicapool", + "--for", "jsonpath={.status.phase}=Ready", + "--namespace", "rook-ceph", + "--timeout", "300s", + profile=cluster, +) + +drenv.log_progress("Waiting for replica pool peer token") +drenv.kubectl( + "wait", "CephBlockPool", "replicapool", + "--for", "jsonpath={.status.info.rbdMirrorBootstrapPeerSecretName}=pool-peer-token-replicapool", + "--namespace", "rook-ceph", + "--timeout", "300s", + profile=cluster, +) + +drenv.log_progress("Deploying rook ceph toolbox") +drenv.kubectl( + "apply", + "--filename", f"{ROOK_BASE_URL}/toolbox.yaml", + profile=cluster, +) + +drenv.log_progress("Waiting until toolbox is rolled out") +drenv.kubectl( + "rollout", "status", "deployment/rook-ceph-tools", + "--namespace", "rook-ceph", + "--timeout", "300s", + profile=cluster, +) diff --git a/test/rook/storage-class.yaml b/test/rook/storage-class.yaml new file mode 100644 index 0000000000..0622a17f58 --- /dev/null +++ b/test/rook/storage-class.yaml @@ -0,0 +1,23 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# +# SPDX-License-Identifier: Apache-2.0 + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: rook-ceph-block +provisioner: rook-ceph.rbd.csi.ceph.com +parameters: + clusterID: rook-ceph + pool: replicapool + imageFormat: "2" + imageFeatures: layering + csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph + csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node + csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph + csi.storage.k8s.io/fstype: ext4 +reclaimPolicy: Delete