diff --git a/cluster-provision/k8s/1.31/bind_device_to_vfio.sh b/cluster-provision/k8s/1.31/bind_device_to_vfio.sh deleted file mode 100755 index 599d979cbc..0000000000 --- a/cluster-provision/k8s/1.31/bind_device_to_vfio.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash - -set -ex - -if [ "$1" != "--vendor" ]; then - echo "No vendor provided" - exit 1 -fi -vendor=$2 - -function get_device_driver() { - local dev_driver=$(readlink $driver_path) - echo "${dev_driver##*/}" -} - -# find the PCI address of the device by vendor_id:product_id -pci_address=(`lspci -D -d ${vendor}`) -pci_address="${pci_address[0]}" -dev_sysfs_path="/sys/bus/pci/devices/$pci_address" - -if [[ ! -d $dev_sysfs_path ]]; then - echo "Error: PCI address ${pci_address} does not exist!" 1>&2 - exit 1 -fi - -if [[ ! -d "$dev_sysfs_path/iommu/" ]]; then - echo "Error: No vIOMMU found in the VM" 1>&2 - exit 1 -fi - -# set device driver path -driver_path="${dev_sysfs_path}/driver" -driver_override="${dev_sysfs_path}/driver_override" - -# load the vfio-pci module -modprobe -i vfio-pci - - -driver=$(get_device_driver) - -if [[ "$driver" != "vfio-pci" ]]; then - - # unbind from the original device driver - echo ${pci_address} > "${driver_path}/unbind" - # bind the device to vfio-pci driver - echo "vfio-pci" > ${driver_override} - echo $pci_address > /sys/bus/pci/drivers/vfio-pci/bind -fi - -# The device should now be using the vfio-pci driver -new_driver=$(get_device_driver) -if [[ $new_driver != "vfio-pci" ]]; then - echo "Error: Failed to bind to vfio-pci driver" 1>&2 - exit 1 -fi diff --git a/cluster-provision/k8s/1.31/nfs-csi.sh b/cluster-provision/k8s/1.31/nfs-csi.sh deleted file mode 100644 index fb2304da89..0000000000 --- a/cluster-provision/k8s/1.31/nfs-csi.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash -set -xe - -# Deploy NFS CSI manifests -kubectl --kubeconfig /etc/kubernetes/admin.conf create ns nfs-csi -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/nfs-csi/nfs-service.yaml -n nfs-csi -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/nfs-csi/nfs-server.yaml -n nfs-csi -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/nfs-csi/csi-nfs-controller-rbac.yaml -n nfs-csi -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/nfs-csi/csi-nfs-driverinfo.yaml -n nfs-csi -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/nfs-csi/csi-nfs-controller.yaml -n nfs-csi -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/nfs-csi/csi-nfs-node.yaml -n nfs-csi -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/nfs-csi/csi-nfs-sc.yaml -n nfs-csi - -# Deploy test PVC and wait for it to get bound -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/nfs-csi/csi-nfs-test-pvc.yaml -n nfs-csi -until kubectl --kubeconfig /etc/kubernetes/admin.conf get pvc -n nfs-csi pvc-nfs-dynamic -o jsonpath='{.status.phase}' | grep Bound; do - ((count++)) && ((count == 120)) && echo "NFS CSI test PVC not ready on time" && exit 1 - if ! ((count % 6 )); then - kubectl --kubeconfig /etc/kubernetes/admin.conf describe pvc -n nfs-csi - fi - echo "Waiting for NFS CSI test PVC to be Bound, sleeping 5s and rechecking" - sleep 5 -done -kubectl --kubeconfig /etc/kubernetes/admin.conf delete -f /tmp/nfs-csi/csi-nfs-test-pvc.yaml -n nfs-csi diff --git a/cluster-provision/k8s/1.31/node01.sh b/cluster-provision/k8s/1.31/node01.sh deleted file mode 100755 index d5798737b8..0000000000 --- a/cluster-provision/k8s/1.31/node01.sh +++ /dev/null @@ -1,93 +0,0 @@ -#!/bin/bash - -set -ex - -kubeadm_conf="/etc/kubernetes/kubeadm.conf" -cni_manifest="/provision/cni.yaml" -if [ -f /home/vagrant/single_stack ]; then - kubeadm_conf="/etc/kubernetes/kubeadm_ipv6.conf" - cni_manifest="/provision/cni_ipv6.yaml" -fi - -if [ -f /home/vagrant/enable_audit ]; then - apiVer=$(head -1 /etc/kubernetes/audit/adv-audit.yaml) - echo $apiVer > /etc/kubernetes/audit/adv-audit.yaml - - cat <> /etc/kubernetes/audit/adv-audit.yaml -kind: Policy -rules: -- level: Metadata -EOF -fi - -timeout=30 -interval=5 -while ! hostnamectl |grep Transient ; do - echo "Waiting for dhclient to set the hostname from dnsmasq" - sleep $interval - timeout=$(( $timeout - $interval )) - if [ $timeout -le 0 ]; then - exit 1 - fi -done - -# Configure cgroup v2 settings -if [ -f /sys/fs/cgroup/cgroup.controllers ]; then - echo "Configuring cgroup v2" - - CRIO_CONF_DIR=/etc/crio/crio.conf.d - mkdir -p ${CRIO_CONF_DIR} - cat << EOF > ${CRIO_CONF_DIR}/00-cgroupv2.conf -[crio.runtime] -conmon_cgroup = "pod" -cgroup_manager = "systemd" -EOF - - systemctl stop kubelet - systemctl restart crio - systemctl start kubelet -fi - - -# Wait for crio, else network might not be ready yet -while [[ `systemctl status crio | grep active | wc -l` -eq 0 ]] -do - sleep 2 -done - -# Disable swap -sudo swapoff -a - -until ip address show dev eth0 | grep global | grep inet6; do sleep 1; done - -# 1.23 has deprecated --experimental-patches /provision/kubeadm-patches/, we now mention the patch directory in kubeadm.conf -kubeadm init --config "$kubeadm_conf" -v5 - -kubectl --kubeconfig=/etc/kubernetes/admin.conf patch deployment coredns -n kube-system -p "$(cat /provision/kubeadm-patches/add-security-context-deployment-patch.yaml)" -# cni manifest is already configured at provision stage. -kubectl --kubeconfig=/etc/kubernetes/admin.conf create -f "$cni_manifest" - -kubectl --kubeconfig=/etc/kubernetes/admin.conf taint nodes node01 node-role.kubernetes.io/control-plane:NoSchedule- - -# Wait for api server to be up. -kubectl --kubeconfig=/etc/kubernetes/admin.conf get nodes --no-headers -kubectl_rc=$? -retry_counter=0 -while [[ $retry_counter -lt 20 && $kubectl_rc -ne 0 ]]; do - sleep 10 - echo "Waiting for api server to be available..." - kubectl --kubeconfig=/etc/kubernetes/admin.conf get nodes --no-headers - kubectl_rc=$? - retry_counter=$((retry_counter + 1)) -done - -echo "Printing kuberenetes version" -kubectl --kubeconfig=/etc/kubernetes/admin.conf version - - -local_volume_manifest="/provision/local-volume.yaml" -kubectl --kubeconfig=/etc/kubernetes/admin.conf create -f "$local_volume_manifest" - -# ceph mon permission -mkdir -p /var/lib/rook -chcon -t container_file_t /var/lib/rook diff --git a/cluster-provision/k8s/1.31/nodes.sh b/cluster-provision/k8s/1.31/nodes.sh deleted file mode 100755 index 7fb1f5a120..0000000000 --- a/cluster-provision/k8s/1.31/nodes.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/bin/bash - -set -ex - -source /var/lib/kubevirtci/shared_vars.sh - -nodeip= -control_ip=192.168.66.101 -if [ -f /home/vagrant/single_stack ]; then - nodeip="--node-ip=::" - control_ip=[fd00::101] -fi - -timeout=30 -interval=5 -while ! hostnamectl |grep Transient ; do - echo "Waiting for dhclient to set the hostname from dnsmasq" - sleep $interval - timeout=$(( $timeout - $interval )) - if [ $timeout -le 0 ]; then - exit 1 - fi -done - -# Configure cgroup v2 settings -if [ -f /sys/fs/cgroup/cgroup.controllers ]; then - echo "Configuring cgroup v2" - - CRIO_CONF_DIR=/etc/crio/crio.conf.d - mkdir -p ${CRIO_CONF_DIR} - cat << EOF > ${CRIO_CONF_DIR}/00-cgroupv2.conf -[crio.runtime] -conmon_cgroup = "pod" -cgroup_manager = "systemd" -EOF - - # kubelet will be started later on - systemctl stop kubelet - systemctl restart crio -fi - -# Wait for crio, else network might not be ready yet -while [[ `systemctl status crio | grep active | wc -l` -eq 0 ]] -do - sleep 2 -done - -if [ -f /etc/sysconfig/kubelet ]; then - # TODO use config file! this is deprecated - cat <>/etc/sysconfig/kubelet -KUBELET_EXTRA_ARGS=${KUBELET_CGROUP_ARGS} --fail-swap-on=false ${nodeip} --feature-gates=CPUManager=true,NodeSwap=true --cpu-manager-policy=static --kube-reserved=cpu=500m --system-reserved=cpu=500m -EOT -else - cat <>/etc/systemd/system/kubelet.service.d/09-kubeadm.conf -Environment="KUBELET_CPUMANAGER_ARGS=--fail-swap-on=false --feature-gates=CPUManager=true,NodeSwap=true ${nodeip} --cpu-manager-policy=static --kube-reserved=cpu=500m --system-reserved=cpu=500m" -EOT -sed -i 's/$KUBELET_EXTRA_ARGS/$KUBELET_EXTRA_ARGS $KUBELET_CPUMANAGER_ARGS/' /etc/systemd/system/kubelet.service.d/10-kubeadm.conf -fi - -systemctl daemon-reload -service kubelet restart -kubelet_rc=$? -if [[ $kubelet_rc -ne 0 ]]; then - rm -rf /var/lib/kubelet/cpu_manager_state - service kubelet restart -fi - -# Disable swap -sudo swapoff -a - -until ip address show dev eth0 | grep global | grep inet6; do sleep 1; done - -kubeadm join --token abcdef.1234567890123456 ${control_ip}:6443 --ignore-preflight-errors=all --discovery-token-unsafe-skip-ca-verification=true - -# ceph mon permission -mkdir -p /var/lib/rook -chcon -t container_file_t /var/lib/rook diff --git a/cluster-provision/k8s/1.31/prometheus.sh b/cluster-provision/k8s/1.31/prometheus.sh deleted file mode 100755 index 8564e7e998..0000000000 --- a/cluster-provision/k8s/1.31/prometheus.sh +++ /dev/null @@ -1,128 +0,0 @@ -#!/bin/bash -# set -xe - -function usage() -{ - echo "Script to run an experiment" - echo "" - echo -e "\t-h --help" - echo -e "\t-a --alertmanager deploy prometheus alertmanager (default false)" - echo -e "\t-g --grafana deploy grafana with dashboards (default false)" - echo "" -} - -ALERTMANAGER="false" -GRAFANA="false" -while [ "$1" != "" ]; do - PARAM=$1; shift - VALUE=$1; shift - case $PARAM in - -h | --help) - usage - exit 0 - ;; - -a | --alertmanager) - ALERTMANAGER=$VALUE - ;; - -g | --grafana) - GRAFANA=$VALUE - ;; - *) - echo "ERROR: unknown parameter \"$PARAM\"" - usage - exit 1 - ;; - esac -done - -# Deploy Prometheus operator -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/prometheus-operator/0namespace-namespace.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/prometheus-operator/prometheus-operator-clusterRoleBinding.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/prometheus-operator/prometheus-operator-clusterRole.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/prometheus-operator/prometheus-operator-serviceAccount.yaml -### Prometheus operator CRDs -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/prometheus-operator/prometheus-operator-0prometheusCustomResourceDefinition.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/prometheus-operator/prometheus-operator-0servicemonitorCustomResourceDefinition.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/prometheus-operator/prometheus-operator-0podmonitorCustomResourceDefinition.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/prometheus-operator/prometheus-operator-0probeCustomResourceDefinition.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/prometheus-operator/prometheus-operator-0prometheusruleCustomResourceDefinition.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/prometheus-operator/prometheus-operator-0thanosrulerCustomResourceDefinition.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/prometheus-operator/prometheus-operator-0alertmanagerCustomResourceDefinition.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/prometheus-operator/prometheus-operator-0alertmanagerConfigCustomResourceDefinition.yaml -### Prometheus operator deployment -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/prometheus-operator/prometheus-operator-service.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/prometheus-operator/prometheus-operator-deployment.yaml - -while [[ $(kubectl --kubeconfig /etc/kubernetes/admin.conf -n monitoring get pods -l app.kubernetes.io/name=prometheus-operator -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; do - echo "Waiting for prometheus operator to be Ready, sleeping 20s and rechecking" && sleep 20; -done - -# Deploy Prometheus -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/prometheus/prometheus-clusterRole.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/prometheus/prometheus-clusterRoleBinding.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/prometheus/prometheus-roleBindingConfig.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/prometheus/prometheus-roleBindingSpecificNamespaces.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/prometheus/prometheus-roleConfig.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/prometheus/prometheus-roleSpecificNamespaces.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/prometheus/prometheus-serviceAccount.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/prometheus/prometheus-podDisruptionBudget.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/prometheus/prometheus-service.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/prometheus/prometheus-prometheus.yaml - -# Deploy Monitors -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/monitors/kubernetes-serviceMonitorApiserver.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/monitors/kubernetes-serviceMonitorCoreDNS.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/monitors/kubernetes-serviceMonitorKubeControllerManager.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/monitors/kubernetes-serviceMonitorKubeScheduler.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/monitors/kubernetes-serviceMonitorKubelet.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/monitors/prometheus-operator-serviceMonitor.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/monitors/prometheus-serviceMonitor.yaml - -# Deploy kube-state-metrics -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/kube-state-metrics/kube-state-metrics-clusterRole.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/kube-state-metrics/kube-state-metrics-clusterRoleBinding.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/kube-state-metrics/kube-state-metrics-prometheusRule.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/kube-state-metrics/kube-state-metrics-serviceAccount.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/kube-state-metrics/kube-state-metrics-serviceMonitor.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/kube-state-metrics/kube-state-metrics-service.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/kube-state-metrics/kube-state-metrics-deployment.yaml - -# Deploy node-exporter -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/node-exporter/node-exporter-clusterRole.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/node-exporter/node-exporter-clusterRoleBinding.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/node-exporter/node-exporter-prometheusRule.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/node-exporter/node-exporter-serviceAccount.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/node-exporter/node-exporter-serviceMonitor.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/node-exporter/node-exporter-daemonset.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/node-exporter/node-exporter-service.yaml - -# Deploy alertmanager -if [[ ($ALERTMANAGER != "false") && ($ALERTMANAGER != "FALSE") ]]; then - kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/alertmanager/alertmanager-secret.yaml - kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/alertmanager/alertmanager-serviceAccount.yaml - kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/alertmanager/alertmanager-serviceMonitor.yaml - kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/alertmanager/alertmanager-podDisruptionBudget.yaml - kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/alertmanager/alertmanager-service.yaml - kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/alertmanager/alertmanager-alertmanager.yaml - - # Deploy alertmanager-rules - kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/alertmanager-rules/alertmanager-prometheusRule.yaml - kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/alertmanager-rules/kube-prometheus-prometheusRule.yaml - kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/alertmanager-rules/prometheus-operator-prometheusRule.yaml - kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/alertmanager-rules/prometheus-prometheusRule.yaml -fi - -# Deploy grafana -if [[ ($GRAFANA != "false") && ($GRAFANA != "FALSE") ]]; then - kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/grafana/grafana-dashboardDatasources.yaml - kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/grafana/grafana-dashboardDefinitions.yaml - kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/grafana/grafana-dashboardSources.yaml - kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/grafana/grafana-deployment.yaml - kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/grafana/grafana-service.yaml - kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/grafana/grafana-serviceAccount.yaml - kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/grafana/grafana-serviceMonitor.yaml - kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/prometheus/grafana/grafana-config.yaml -fi - -# Deploy nodeports -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/nodeports/monitoring.yaml diff --git a/cluster-provision/k8s/1.31/psa.sh b/cluster-provision/k8s/1.31/psa.sh deleted file mode 100644 index 01a2ac60af..0000000000 --- a/cluster-provision/k8s/1.31/psa.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash -set -xe - -rm /etc/kubernetes/psa.yaml -cat > /etc/kubernetes/psa.yaml < /etc/sysctl.d/realtime.conf -sysctl --system diff --git a/cluster-provision/k8s/1.31/rook-ceph.sh b/cluster-provision/k8s/1.31/rook-ceph.sh deleted file mode 100755 index 3d18087414..0000000000 --- a/cluster-provision/k8s/1.31/rook-ceph.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash -set -xe - -# Deploy common snapshot controller and CRDs -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/ceph/snapshot.storage.k8s.io_volumesnapshots.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/ceph/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/ceph/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/ceph/rbac-snapshot-controller.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/ceph/setup-snapshot-controller.yaml - -# Deploy Rook/Ceph operator -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/ceph/common.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/ceph/crds.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/ceph/operator.yaml - -# Create cluster -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/ceph/cluster-test.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/ceph/pool-test.yaml - -until kubectl --kubeconfig /etc/kubernetes/admin.conf get cephblockpools -n rook-ceph replicapool -o jsonpath='{.status.phase}' | grep Ready; do - ((count++)) && ((count == 120)) && echo "Ceph not ready in time" && exit 1 - if ! ((count % 6 )); then - kubectl --kubeconfig /etc/kubernetes/admin.conf get pods -n rook-ceph - fi - echo "Waiting for Ceph to be Ready, sleeping 5s and rechecking" - sleep 5 -done - -# k8s resources -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/ceph/storageclass-test.yaml -kubectl --kubeconfig /etc/kubernetes/admin.conf create -f /tmp/ceph/snapshotclass.yaml - -# set default storageclass -kubectl --kubeconfig /etc/kubernetes/admin.conf patch storageclass local -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}' -kubectl --kubeconfig /etc/kubernetes/admin.conf patch storageclass rook-ceph-block -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'