Skip to content

Commit

Permalink
Manually scale down Confluence/Jira statefulsets (#435)
Browse files Browse the repository at this point in the history
* Scale down sts before terraform destroys PVCs

* Fix label selector to get confluence sts too

* Add comment to function

---------

Co-authored-by: Yevhen Ivantsov <[email protected]>
  • Loading branch information
bianchi2 and Yevhen Ivantsov authored Oct 17, 2024
1 parent 59af825 commit 4f7a1f4
Show file tree
Hide file tree
Showing 2 changed files with 82 additions and 0 deletions.
32 changes: 32 additions & 0 deletions install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -393,6 +393,35 @@ set_current_context_k8s() {
fi
}

# this function will call an external script that will check if existing StatefulSet replicas > desired
# StatefulSet replicas (as stated in tfvars) and scale down to desired StatefulSet replicas to prevent Terraform
# from deleting PVCs attached to running pods
scale_down() {
set +e
PRODUCTS=$(grep -o '^[^#]*' "${CONFIG_ABS_PATH}" | grep "products" | sed 's/ //g')
PRODUCTS="${PRODUCTS#*=}"
PRODUCTS_ARRAY=($(echo $PRODUCTS | sed 's/\[//g' | sed 's/\]//g' | sed 's/,/ /g' | sed 's/"//g'))
if echo "$PRODUCTS" | grep -qE 'jira|confluence'; then
SNAPSHOTS_JSON_FILE_PATH=$(get_variable 'snapshots_json_file_path' "${CONFIG_ABS_PATH}")
if [ "${PATH}" ]; then
local EKS_PREFIX="atlas-"
local EKS_SUFFIX="-cluster"
local EKS_CLUSTER_NAME=${EKS_PREFIX}${ENVIRONMENT_NAME}${EKS_SUFFIX}
local EKS_CLUSTER="${EKS_CLUSTER_NAME:0:38}"
aws eks update-kubeconfig --name "${EKS_CLUSTER}" --region "${REGION}" &> /dev/null
if [ $? -eq 0 ]; then
for PRODUCT in "${PRODUCTS_ARRAY[@]}"; do
REPLICAS_VAR=$PRODUCT'_replica_count'
DESIRED_REPLICAS=$(grep -E "^\s*${REPLICAS_VAR}\s*=" "${CONFIG_ABS_PATH}" | awk -F= '{gsub(/ /, "", $2); print $2}')
./scripts/scale.sh "${PRODUCT}" "${DESIRED_REPLICAS}"
done
fi
fi
fi
set -e
}


resume_bamboo_server() {
# Please note that if you import the dataset, make sure admin credential in config file (config.tfvars)
# is matched with admin info stored in dataset you import.
Expand Down Expand Up @@ -498,6 +527,9 @@ generate_terraform_backend_variables
# Create S3 bucket and dynamodb table to keep state
create_tfstate_resources

# check if it's a scale down event and scale down manually (Jira and Confluence only)
scale_down | tee -a "${LOG_FILE}"

# Deploy the infrastructure
create_update_infrastructure

Expand Down
50 changes: 50 additions & 0 deletions scripts/scale.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
#!/usr/bin/env bash

source "./scripts/common.sh"

NAMESPACE="atlassian"
TIMEOUT=120
INTERVAL=5
PRODUCT=$1
DESIRED_REPLICAS=$2

# check if this is the initial release and exit
STS=$(kubectl get sts -l=app.kubernetes.io/name="${PRODUCT}" -n "${NAMESPACE}" -o jsonpath='{.items[*].metadata.name}')
if [ -z "$STS" ]; then
log "No StatefulSets found"
exit 0
fi

# get existing sts replicas
STS_REPLICAS=$(kubectl get sts $STS -n "${NAMESPACE}" -ojsonpath='{.spec.replicas}')
if [ -z "${STS_REPLICAS}" ]; then
log "Failed to get StatefulSets replicas"
exit 0
fi
# Check if DESIRED_REPLICAS is less than STS_REPLICAS and manually scale down
# before Terraform attempts destroying local-home PVC, PV and EBS vol
if [ "${DESIRED_REPLICAS}" -lt "${STS_REPLICAS}" ]; then
log "Scaling down ${PRODUCT} StatefulSet to ${DESIRED_REPLICAS} replicas"
kubectl scale sts "${PRODUCT}" -n "${NAMESPACE}" --replicas="${DESIRED_REPLICAS}"

# make sure pods are gone
START_TIME=$(date +%s)
while true; do
TERMINATING_PODS=$(kubectl get pods -n "${NAMESPACE}" | grep Terminating)
if [ -z "${TERMINATING_PODS}" ]; then
log "No pods in Terminating state"
exit 0
else
log "Terminating pods found: ${TERMINATING_PODS}"
fi
CURRENT_TIME=$(date +%s)
ELAPSED_TIME=$((CURRENT_TIME - START_TIME))
if [ $ELAPSED_TIME -ge $TIMEOUT ]; then
log "ERROR" "Timeout reached. Pods are still in Terminating state."
exit 1
fi
sleep ${INTERVAL}
done
else
log "No need to scale down"
fi

0 comments on commit 4f7a1f4

Please sign in to comment.