Skip to content

Commit

Permalink
Prompt when adding delete
Browse files Browse the repository at this point in the history
Signed-off-by: “Forest-L <[email protected]>
  • Loading branch information
Forest-L committed Jan 14, 2021
1 parent e1b78e1 commit 3202346
Showing 1 changed file with 41 additions and 26 deletions.
67 changes: 41 additions & 26 deletions scripts/kubesphere-delete.sh
Original file line number Diff line number Diff line change
@@ -1,58 +1,73 @@
#!/usr/bin/env bash

# set -x: Print commands and their arguments as they are executed.
# set -e: Exit immediately if a command exits with a non-zero status.
function delete_sure(){
cat << eof
$(echo -e "\033[1;36mNote:\033[0m")
# set -xe
Delete the KubeSphere cluster, including the module kubesphere-system kubesphere-devops-system kubesphere-monitoring-system kubesphere-logging-system openpitrix-system.
eof

read -p "Please reconfirm that you want to delete the KubeSphere cluster. (yes/no) " ans
while [[ "x"$ans != "xyes" && "x"$ans != "xno" ]]; do
read -p "Please reconfirm that you want to delete the KubeSphere cluster. (yes/no) " ans
done

if [[ "x"$ans == "xno" ]]; then
exit
fi
}


delete_sure

# delete ks-install
kubectl delete deploy ks-installer -n kubesphere-system
kubectl delete deploy ks-installer -n kubesphere-system 2>/dev/null

# delete helm
for namespaces in kubesphere-system kubesphere-devops-system kubesphere-monitoring-system kubesphere-logging-system openpitrix-system
do
helm list -n $namespaces | grep -v NAME | awk '{print $1}' | sort -u | xargs -r -L1 helm uninstall -n $namespaces
helm list -n $namespaces | grep -v NAME | awk '{print $1}' | sort -u | xargs -r -L1 helm uninstall -n $namespaces 2>/dev/null
done

# delete kubefed
kubectl get cc -n kubesphere-system ks-installer -o jsonpath="{.status.multicluster}" | grep enable
if [[ $? -eq 0 ]]; then
helm uninstall -n kube-federation-system kubefed
kubectl delete ns kube-federation-system
helm uninstall -n kube-federation-system kubefed 2>/dev/null
kubectl delete ns kube-federation-system 2>/dev/null
fi


helm uninstall -n kube-system snapshot-controller
helm uninstall -n kube-system snapshot-controller 2>/dev/null

# delete kubesphere deployment
kubectl delete deployment -n kubesphere-system `kubectl get deployment -n kubesphere-system -o jsonpath="{.items[*].metadata.name}"`
kubectl delete deployment -n kubesphere-system `kubectl get deployment -n kubesphere-system -o jsonpath="{.items[*].metadata.name}"` 2>/dev/null

# delete monitor statefulset
kubectl delete prometheus -n kubesphere-monitoring-system k8s
kubectl delete statefulset -n kubesphere-monitoring-system `kubectl get statefulset -n kubesphere-monitoring-system -o jsonpath="{.items[*].metadata.name}"`
kubectl --no-headers=true get pvc -n kubesphere-monitoring-system -o custom-columns=:metadata.namespace,:metadata.name | grep -E kubesphere-monitoring-system | xargs -n2 kubectl delete pvc -n
kubectl delete prometheus -n kubesphere-monitoring-system k8s 2>/dev/null
kubectl delete statefulset -n kubesphere-monitoring-system `kubectl get statefulset -n kubesphere-monitoring-system -o jsonpath="{.items[*].metadata.name}"` 2>/dev/null
kubectl --no-headers=true get pvc -n kubesphere-monitoring-system -o custom-columns=:metadata.namespace,:metadata.name | grep -E kubesphere-monitoring-system | xargs -n2 kubectl delete pvc -n 2>/dev/null

# delete pvc
pvcs="kubesphere-system|openpitrix-system|kubesphere-devops-system|kubesphere-logging-system"
kubectl --no-headers=true get pvc --all-namespaces -o custom-columns=:metadata.namespace,:metadata.name | grep -E $pvcs | xargs -n2 kubectl delete pvc -n
kubectl --no-headers=true get pvc --all-namespaces -o custom-columns=:metadata.namespace,:metadata.name | grep -E $pvcs | xargs -n2 kubectl delete pvc -n 2>/dev/null


# delete rolebindings
delete_role_bindings() {
for rolebinding in `kubectl -n $1 get rolebindings -l iam.kubesphere.io/user-ref -o jsonpath="{.items[*].metadata.name}"`
do
kubectl -n $1 delete rolebinding $rolebinding
kubectl -n $1 delete rolebinding $rolebinding 2>/dev/null
done
}

# delete roles
delete_roles() {
kubectl -n $1 delete role admin
kubectl -n $1 delete role operator
kubectl -n $1 delete role viewer
kubectl -n $1 delete role admin 2>/dev/null
kubectl -n $1 delete role operator 2>/dev/null
kubectl -n $1 delete role viewer 2>/dev/null
for role in `kubectl -n $1 get roles -l iam.kubesphere.io/role-template -o jsonpath="{.items[*].metadata.name}"`
do
kubectl -n $1 delete role $role
kubectl -n $1 delete role $role 2>/dev/null
done
}

Expand All @@ -71,14 +86,14 @@ for cluster in `kubectl get clusters -o jsonpath="{.items[*].metadata.name}"`
do
kubectl patch cluster $cluster -p '{"metadata":{"finalizers":null}}' --type=merge
done
kubectl delete clusters --all
kubectl delete clusters --all 2>/dev/null

# delete workspaces
for ws in `kubectl get workspaces -o jsonpath="{.items[*].metadata.name}"`
do
kubectl patch workspace $ws -p '{"metadata":{"finalizers":null}}' --type=merge
done
kubectl delete workspaces --all
kubectl delete workspaces --all 2>/dev/null

# delete devopsprojects
for devopsproject in `kubectl get devopsprojects -o jsonpath="{.items[*].metadata.name}"`
Expand All @@ -91,37 +106,37 @@ do
kubectl patch pipeline $pip -n `kubectl get pipeline -A | grep $pip | awk '{print $1}'` -p '{"metadata":{"finalizers":null}}' --type=merge
done

kubectl delete devopsprojects --all
kubectl delete devopsprojects --all 2>/dev/null


# delete validatingwebhookconfigurations
for webhook in ks-events-admission-validate users.iam.kubesphere.io validating-webhook-configuration
do
kubectl delete validatingwebhookconfigurations.admissionregistration.k8s.io $webhook
kubectl delete validatingwebhookconfigurations.admissionregistration.k8s.io $webhook 2>/dev/null
done

# delete mutatingwebhookconfigurations
for webhook in ks-events-admission-mutate logsidecar-injector-admission-mutate mutating-webhook-configuration
do
kubectl delete mutatingwebhookconfigurations.admissionregistration.k8s.io $webhook
kubectl delete mutatingwebhookconfigurations.admissionregistration.k8s.io $webhook 2>/dev/null
done

# delete users
for user in `kubectl get users -o jsonpath="{.items[*].metadata.name}"`
do
kubectl patch user $user -p '{"metadata":{"finalizers":null}}' --type=merge
done
kubectl delete users --all
kubectl delete users --all 2>/dev/null

# delete crds
for crd in `kubectl get crds -o jsonpath="{.items[*].metadata.name}"`
do
if [[ $crd == *kubesphere.io ]]; then kubectl delete crd $crd; fi
if [[ $crd == *kubesphere.io ]]; then kubectl delete crd $crd 2>/dev/null; fi
done

# delete relevance ns
for ns in kubesphere-alerting-system kubesphere-controls-system kubesphere-devops-system kubesphere-logging-system kubesphere-monitoring-system openpitrix-system kubesphere-system
do
kubectl delete ns $ns
kubectl delete ns $ns 2>/dev/null
done

0 comments on commit 3202346

Please sign in to comment.