Skip to content

temp using eks-node-role name for testing #52

temp using eks-node-role name for testing

temp using eks-node-role name for testing #52

Workflow file for this run

name: CI/CD Pipeline
on:
push:
branches:
- main
jobs:
build:
permissions:
id-token: write # allows creation of an OIDC token
contents: read
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Authenticate with OIDC
uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/eks-node-role
role-session-name: federated-role-temporal-session
aws-region: ${{ secrets.AWS_REGION }}
mask-aws-account-id: true
- name: Set up Docker Build
uses: docker/setup-buildx-action@v3
- name: Log in to Amazon ECR
id: login-ecr # <-- This is the job ID `build`
uses: aws-actions/amazon-ecr-login@v2
- name: Build, tag, and push image to ECR
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
ECR_REPOSITORY: ${{ secrets.ECR_REPOSITORY }}
IMAGE_TAG: ${{ github.sha }}
run: |
# Build the Docker image
docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG .
# Tag the image also as 'latest'
docker tag $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG $ECR_REGISTRY/$ECR_REPOSITORY:latest
# Push both the SHA-tagged image and the 'latest' image to ECR
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
docker push $ECR_REGISTRY/$ECR_REPOSITORY:latest
- name: Output Image URI for downstream jobs
id: image-details
run: echo "image=$ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG" >> $GITHUB_ENV
outputs:
image: ${{ steps.image-details.outputs.image }}
deploy:
needs: build
permissions:
id-token: write # allows creation of an OIDC token
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set image URI
run: echo "IMAGE_URI=${{ needs.build.outputs.image }}" >> $GITHUB_ENV
- name: Set up AWS CLI
uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/eks-node-role
role-session-name: eks-deployer-temporal-session
aws-region: ${{ secrets.AWS_REGION }}
mask-aws-account-id: true
- name: Generate kubeconfig for EKS
env:
CLUSTER_NAME: my-cluster # Replace with your EKS cluster name
AWS_REGION: ${{ secrets.AWS_REGION }}
run: |
aws eks update-kubeconfig --name my-cluster --region $AWS_REGION --kubeconfig ./kubeconfig
base64 -i ./kubeconfig > kubeconfig_base64.txt
- name: Decode and set kubeconfig
run: |
mkdir -p $HOME/.kube
cat kubeconfig_base64.txt | base64 --decode > $HOME/.kube/config
shell: bash
- name: Install kubectl
run: |
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
chmod +x kubectl
sudo mv kubectl /usr/local/bin/
kubectl version
- name: Replace environment variables in Kubernetes manifest
run: |
export AWS_ACCOUNT_ID=${{ secrets.AWS_ACCOUNT_ID }}
export AWS_REGION=${{ secrets.AWS_REGION }}
export ECR_REPOSITORY=${{ secrets.ECR_REPOSITORY }}
export IMAGE_TAG=latest
envsubst < infrastructure/k8s/deployment.yaml | tee infrastructure/k8s/deployment-resolved.yaml
- name: Deploy Deployment
run: kubectl apply --validate=false -f infrastructure/k8s/deployment-resolved.yaml
- name: Deploy Service
run: kubectl apply --validate=false -f infrastructure/k8s/service.yaml
- name: Deploy additional resources
run: kubectl apply --validate=false -f infrastructure/k8s/fargate-test-pod-deployment.yaml
- name: Check Installation
run: |
kubectl get pods -l app=marco-nico-app
kubectl describe deployment marco-nico
- name: Wait for LoadBalancer IP
id: get_lb
run: |
echo "Waiting for LoadBalancer to be provisioned..."
while true; do
LB_HOSTNAME=$(kubectl get svc marco-nico-service -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')
if [ ! -z "$LB_HOSTNAME" ]; then
echo "LB_HOSTNAME=$LB_HOSTNAME" >> $GITHUB_ENV
break
fi
echo "Still waiting..."
sleep 10
done
echo "Application is accessible at http://$LB_HOSTNAME"
- name: Output LoadBalancer Hostname
run: |
echo "LoadBalancer Hostname: ${{ env.LB_HOSTNAME }}"