Skip to content

Generated the kubeconfig_data dynamically #41

Generated the kubeconfig_data dynamically

Generated the kubeconfig_data dynamically #41

Workflow file for this run

name: CI/CD Pipeline
on:
push:
branches:
- main
jobs:
build:
permissions:
id-token: write # allows creation of an OIDC token
contents: read
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Authenticate with OIDC
uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/eks-federated-deployer
role-session-name: federated-role-temporal-session
aws-region: ${{ secrets.AWS_REGION }}
mask-aws-account-id: true
- name: Set up Docker Build
uses: docker/setup-buildx-action@v3
- name: Log in to Amazon ECR
id: login-ecr # <-- This is the job ID `build`
uses: aws-actions/amazon-ecr-login@v2
- name: Build, tag, and push image to ECR
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }} # <-- This takes from the outputs of login-ecr the variable registry
ECR_REPOSITORY: ${{ secrets.ECR_REPOSITORY }}
IMAGE_TAG: ${{ github.sha }}
run: |
# Build the Docker image
docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG .
# Push the image to Amazon ECR
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
- name: Output Image URI for downstream jobs
id: image-details
run: echo "image=$ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG" >> $GITHUB_ENV
outputs:
image: ${{ steps.image-details.outputs.image }}
deploy:
needs: build
permissions:
id-token: write # allows creation of an OIDC token
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set image URI
run: echo "IMAGE_URI=${{ needs.build.outputs.image }}" >> $GITHUB_ENV
- name: Set up AWS CLI
uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/eks-federated-deployer
role-session-name: eks-deployer-temporal-session
aws-region: ${{ secrets.AWS_REGION }}
mask-aws-account-id: true
- name: Generate kubeconfig for EKS
env:
CLUSTER_NAME: my-eks-cluster # Replace with your EKS cluster name
AWS_REGION: ${{ secrets.AWS_REGION }}
run: |
aws eks update-kubeconfig --name $CLUSTER_NAME --region $AWS_REGION --kubeconfig ./kubeconfig
base64 ./kubeconfig > kubeconfig_base64.txt
- name: Decode and set kubeconfig
run: |
cat kubeconfig_base64.txt | base64 --decode > $HOME/.kube/config
shell: bash
- name: Install kubectl
run: |
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
chmod +x kubectl
sudo mv kubectl /usr/local/bin/
kubectl version
- name: Validate AWS Configuration
run: |
which aws
aws --version
aws sts get-caller-identity
- name: Replace environment variables in Kubernetes manifest
run: |
export AWS_ACCOUNT_ID=${{ secrets.AWS_ACCOUNT_ID }}
export AWS_REGION=${{ secrets.AWS_REGION }}
export ECR_REPOSITORY=${{ secrets.ECR_REPOSITORY }}
export IMAGE_TAG=${{ github.sha }}
envsubst < k8s/deployment.yaml | tee k8s/deployment-resolved.yaml
- name: Deploy Deployment
run: kubectl apply --validate=false -f k8s/deployment-resolved.yaml
- name: Deploy Service
run: kubectl apply --validate=false -f k8s/service.yaml
- name: Deploy Service
run: kubectl apply --validate=false -f k8s/fargate-test-pod-deployment.yaml
- name: Check Installation
run: |
kubectl get pods -l app=marco-nico-app
kubectl describe deployment marco-nico
- name: Wait for LoadBalancer IP
id: get_lb
run: |
echo "Waiting for LoadBalancer to be provisioned..."
while true; do
LB_HOSTNAME=$(kubectl get svc marco-nico-service -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')
if [ ! -z "$LB_HOSTNAME" ]; then
echo "LB_HOSTNAME=$LB_HOSTNAME" >> $GITHUB_ENV
break
fi
echo "Still waiting..."
sleep 10
done
echo "Application is accessible at http://$LB_HOSTNAME"
- name: Output LoadBalancer Hostname
run: |
echo "LoadBalancer Hostname: ${{ env.LB_HOSTNAME }}"