-
Notifications
You must be signed in to change notification settings - Fork 20
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Manually scale down Confluence/Jira statefulsets #435
Changes from 2 commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -393,6 +393,32 @@ set_current_context_k8s() { | |
fi | ||
} | ||
|
||
scale_down() { | ||
set +e | ||
PRODUCTS=$(grep -o '^[^#]*' "${CONFIG_ABS_PATH}" | grep "products" | sed 's/ //g') | ||
PRODUCTS="${PRODUCTS#*=}" | ||
PRODUCTS_ARRAY=($(echo $PRODUCTS | sed 's/\[//g' | sed 's/\]//g' | sed 's/,/ /g' | sed 's/"//g')) | ||
if echo "$PRODUCTS" | grep -qE 'jira|confluence'; then | ||
SNAPSHOTS_JSON_FILE_PATH=$(get_variable 'snapshots_json_file_path' "${CONFIG_ABS_PATH}") | ||
if [ "${PATH}" ]; then | ||
local EKS_PREFIX="atlas-" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Are these fixed constants? Any chance we can refer to them rather than magical string? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Cluster name is build on top of environment_name, I have reused a chunk of existing code from install.sh :) |
||
local EKS_SUFFIX="-cluster" | ||
local EKS_CLUSTER_NAME=${EKS_PREFIX}${ENVIRONMENT_NAME}${EKS_SUFFIX} | ||
local EKS_CLUSTER="${EKS_CLUSTER_NAME:0:38}" | ||
aws eks update-kubeconfig --name "${EKS_CLUSTER}" --region "${REGION}" &> /dev/null | ||
if [ $? -eq 0 ]; then | ||
for PRODUCT in "${PRODUCTS_ARRAY[@]}"; do | ||
REPLICAS_VAR=$PRODUCT'_replica_count' | ||
DESIRED_REPLICAS=$(grep -E "^\s*${REPLICAS_VAR}\s*=" "${CONFIG_ABS_PATH}" | awk -F= '{gsub(/ /, "", $2); print $2}') | ||
./scripts/scale.sh "${PRODUCT}" "${DESIRED_REPLICAS}" | ||
done | ||
fi | ||
fi | ||
fi | ||
set -e | ||
} | ||
|
||
|
||
resume_bamboo_server() { | ||
# Please note that if you import the dataset, make sure admin credential in config file (config.tfvars) | ||
# is matched with admin info stored in dataset you import. | ||
|
@@ -498,6 +524,9 @@ generate_terraform_backend_variables | |
# Create S3 bucket and dynamodb table to keep state | ||
create_tfstate_resources | ||
|
||
# check if it's a scale down event and scale down manually (Jira and Confluence only) | ||
scale_down | tee -a "${LOG_FILE}" | ||
|
||
# Deploy the infrastructure | ||
create_update_infrastructure | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,50 @@ | ||
#!/usr/bin/env bash | ||
|
||
source "./scripts/common.sh" | ||
|
||
NAMESPACE="atlassian" | ||
TIMEOUT=120 | ||
INTERVAL=5 | ||
PRODUCT=$1 | ||
DESIRED_REPLICAS=$2 | ||
|
||
# check if this is the initial release and exit | ||
STS=$(kubectl get sts -l=app.kubernetes.io/name="${PRODUCT}" -n "${NAMESPACE}" -o jsonpath='{.items[*].metadata.name}') | ||
if [ -z "$STS" ]; then | ||
log "No StatefulSets found" | ||
exit 0 | ||
fi | ||
|
||
# get existing sts replicas | ||
STS_REPLICAS=$(kubectl get sts $STS -n "${NAMESPACE}" -ojsonpath='{.spec.replicas}') | ||
if [ -z "${STS_REPLICAS}" ]; then | ||
log "Failed to get StatefulSets replicas" | ||
exit 0 | ||
fi | ||
# Check if DESIRED_REPLICAS is less than STS_REPLICAS and manually scale down | ||
# before Terraform attempts destroying local-home PVC, PV and EBS vol | ||
if [ "${DESIRED_REPLICAS}" -lt "${STS_REPLICAS}" ]; then | ||
log "Scaling down ${PRODUCT} StatefulSet to ${DESIRED_REPLICAS} replicas" | ||
kubectl scale sts "${PRODUCT}" -n "${NAMESPACE}" --replicas="${DESIRED_REPLICAS}" | ||
|
||
# make sure pods are gone | ||
START_TIME=$(date +%s) | ||
while true; do | ||
TERMINATING_PODS=$(kubectl get pods -n "${NAMESPACE}" | grep Terminating) | ||
if [ -z "${TERMINATING_PODS}" ]; then | ||
log "No pods in Terminating state" | ||
exit 0 | ||
else | ||
log "Terminating pods found: ${TERMINATING_PODS}" | ||
fi | ||
CURRENT_TIME=$(date +%s) | ||
ELAPSED_TIME=$((CURRENT_TIME - START_TIME)) | ||
if [ $ELAPSED_TIME -ge $TIMEOUT ]; then | ||
log "ERROR" "Timeout reached. Pods are still in Terminating state." | ||
exit 1 | ||
fi | ||
sleep ${INTERVAL} | ||
done | ||
else | ||
log "No need to scale down" | ||
fi |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is not the easiest read, would be useful to have some comments describing at least the flow.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks, added a comment.