Skip to content

Commit

Permalink
Migrate Windows helm-chart tests. (#32)
Browse files Browse the repository at this point in the history
Migrate Windows helm-chart tests.
  • Loading branch information
musa-asad authored and mitali-salvi committed May 8, 2024
1 parent 6d463b3 commit 05b76a5
Show file tree
Hide file tree
Showing 8 changed files with 442 additions and 2 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -78,4 +78,116 @@ jobs:
retry_wait_seconds: 5
command: |
cd integration-tests/amazon-cloudwatch-observability/terraform/helm
terraform destroy --auto-approve
HelmChartsIntegrationTestWindows-2022:
name: HelmChartsIntegrationTestWindows-2022
runs-on: ubuntu-latest
strategy:
fail-fast: false
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0

- name: Generate testing id
run: echo TESTING_ID="${{ github.run_id }}-${{ github.run_number }}" >> $GITHUB_ENV

- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v2
with:
role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }}
aws-region: ${{ env.AWS_DEFAULT_REGION }}

# local directory to store the kubernetes config
- name: Create kubeconfig directory
run: mkdir -p ${{ github.workspace }}/../../../.kube

- name: Set KUBECONFIG environment variable
run: echo KUBECONFIG="${{ github.workspace }}/../../../.kube/config" >> $GITHUB_ENV

- name: Verify Terraform version
run: terraform --version

- name: Terraform apply
uses: nick-fields/retry@v2
with:
max_attempts: 1
timeout_minutes: 60 # EKS takes about 20 minutes to spin up a cluster and service on the cluster
retry_wait_seconds: 5
command: |
cd integration-tests/amazon-cloudwatch-observability/terraform/helm-windows
terraform init
if terraform apply -auto-approve \
-var="windows_os_version=WINDOWS_CORE_2022_x86_64" -var="kube_dir=${{ github.workspace }}/../../../.kube"; then
terraform destroy -auto-approve
else
terraform destroy -auto-approve && exit 1
fi
- name: Terraform destroy
if: ${{ cancelled() || failure() }}
uses: nick-fields/retry@v2
with:
max_attempts: 3
timeout_minutes: 8
retry_wait_seconds: 5
command: |
cd integration-tests/amazon-cloudwatch-observability/terraform/helm-windows
terraform destroy --auto-approve
HelmChartsIntegrationTestWindows-2019:
name: HelmChartsIntegrationTestWindows-2019
runs-on: ubuntu-latest
strategy:
fail-fast: false
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0

- name: Generate testing id
run: echo TESTING_ID="${{ github.run_id }}-${{ github.run_number }}" >> $GITHUB_ENV

- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v2
with:
role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }}
aws-region: ${{ env.AWS_DEFAULT_REGION }}

# local directory to store the kubernetes config
- name: Create kubeconfig directory
run: mkdir -p ${{ github.workspace }}/../../../.kube

- name: Set KUBECONFIG environment variable
run: echo KUBECONFIG="${{ github.workspace }}/../../../.kube/config" >> $GITHUB_ENV

- name: Verify Terraform version
run: terraform --version

- name: Terraform apply
uses: nick-fields/retry@v2
with:
max_attempts: 1
timeout_minutes: 60 # EKS takes about 20 minutes to spin up a cluster and service on the cluster
retry_wait_seconds: 5
command: |
cd integration-tests/amazon-cloudwatch-observability/terraform/helm-windows
terraform init
if terraform apply -auto-approve \
-var="windows_os_version=WINDOWS_CORE_2019_x86_64" -var="kube_dir=${{ github.workspace }}/../../../.kube"; then
terraform destroy -auto-approve
else
terraform destroy -auto-approve && exit 1
fi
- name: Terraform destroy
if: ${{ cancelled() || failure() }}
uses: nick-fields/retry@v2
with:
max_attempts: 3
timeout_minutes: 8
retry_wait_seconds: 5
command: |
cd integration-tests/amazon-cloudwatch-observability/terraform/helm-windows
terraform destroy --auto-approve
Original file line number Diff line number Diff line change
@@ -0,0 +1,239 @@
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: MIT

module "common" {
source = "../common"
}

module "basic_components" {
source = "../basic_components"
}

locals {
aws_eks = "aws eks --region ${var.region}"
cluster_name = var.cluster_name != "" ? var.cluster_name : "cwagent-helm-chart-integ"
}

data "aws_eks_cluster_auth" "this" {
name = aws_eks_cluster.this.name
}

data "aws_caller_identity" "account_id" {}

data "aws_eks_cluster" "eks_windows_cluster_ca" {
name = aws_eks_cluster.this.name
}

output "account_id" {
value = data.aws_caller_identity.account_id.account_id
}

resource "aws_eks_cluster" "this" {
name = "${local.cluster_name}-${module.common.testing_id}"
role_arn = module.basic_components.role_arn
version = var.k8s_version
vpc_config {
subnet_ids = module.basic_components.public_subnet_ids
security_group_ids = [module.basic_components.security_group]
}
}

## EKS Cluster Addon

resource "aws_eks_addon" "eks_windows_addon" {
cluster_name = aws_eks_cluster.this.name
addon_name = "vpc-cni"
}

## Enable VPC CNI Windows Support

resource "kubernetes_config_map_v1_data" "amazon_vpc_cni_windows" {
depends_on = [
aws_eks_cluster.this,
aws_eks_addon.eks_windows_addon
]
metadata {
name = "amazon-vpc-cni"
namespace = "kube-system"
}

force = true

data = {
enable-windows-ipam : "true"
}
}

## AWS CONFIGMAP

resource "kubernetes_config_map" "configmap" {
data = {
"mapRoles" = <<EOT
- groups:
- system:bootstrappers
- system:nodes
rolearn: arn:aws:iam::${data.aws_caller_identity.account_id.account_id}:role/${local.cluster_name}-Worker-Role-${module.common.testing_id}
username: system:node:{{EC2PrivateDNSName}}
- groups:
- eks:kube-proxy-windows
- system:bootstrappers
- system:nodes
rolearn: arn:aws:iam::${data.aws_caller_identity.account_id.account_id}:role/${local.cluster_name}-Worker-Role-${module.common.testing_id}
username: system:node:{{EC2PrivateDNSName}}
EOT
}

metadata {
name = "aws-auth"
namespace = "kube-system"
}
}

# EKS Node Groups
resource "aws_eks_node_group" "this" {
cluster_name = aws_eks_cluster.this.name
node_group_name = "${local.cluster_name}-node"
node_role_arn = aws_iam_role.node_role.arn
subnet_ids = module.basic_components.public_subnet_ids

scaling_config {
desired_size = 1
max_size = 1
min_size = 1
}

ami_type = "AL2_x86_64"
capacity_type = "ON_DEMAND"
disk_size = 20
instance_types = ["t3a.medium"]

depends_on = [
aws_iam_role_policy_attachment.node_CloudWatchAgentServerPolicy,
aws_iam_role_policy_attachment.node_AmazonEC2ContainerRegistryReadOnly,
aws_iam_role_policy_attachment.node_AmazonEKS_CNI_Policy,
aws_iam_role_policy_attachment.node_AmazonEKSWorkerNodePolicy
]
}

# EKS Windows Node Groups
resource "aws_eks_node_group" "node_group_windows" {
cluster_name = aws_eks_cluster.this.name
node_group_name = "${local.cluster_name}-windows-node"
node_role_arn = aws_iam_role.node_role.arn
subnet_ids = module.basic_components.public_subnet_ids

scaling_config {
desired_size = 1
max_size = 1
min_size = 1
}

ami_type = var.windows_os_version
capacity_type = "ON_DEMAND"
disk_size = 50
instance_types = ["t3.large"]

depends_on = [
aws_iam_role_policy_attachment.node_CloudWatchAgentServerPolicy,
aws_iam_role_policy_attachment.node_AmazonEC2ContainerRegistryReadOnly,
aws_iam_role_policy_attachment.node_AmazonEKS_CNI_Policy,
aws_iam_role_policy_attachment.node_AmazonEKSWorkerNodePolicy
]
}

# EKS Node IAM Role
resource "aws_iam_role" "node_role" {
name = "${local.cluster_name}-Worker-Role-${module.common.testing_id}"

assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
POLICY
}

resource "aws_iam_role_policy_attachment" "node_AmazonEKSWorkerNodePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = aws_iam_role.node_role.name
}

resource "aws_iam_role_policy_attachment" "node_AmazonEKS_CNI_Policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = aws_iam_role.node_role.name
}

resource "aws_iam_role_policy_attachment" "node_AmazonEC2ContainerRegistryReadOnly" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = aws_iam_role.node_role.name
}

resource "aws_iam_role_policy_attachment" "node_CloudWatchAgentServerPolicy" {
policy_arn = "arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy"
role = aws_iam_role.node_role.name
}

resource "null_resource" "kubectl" {
depends_on = [
aws_eks_cluster.this,
aws_eks_node_group.this,
aws_eks_node_group.node_group_windows
]
provisioner "local-exec" {
command = <<-EOT
${local.aws_eks} update-kubeconfig --name ${aws_eks_cluster.this.name}
${local.aws_eks} list-clusters --output text
${local.aws_eks} describe-cluster --name ${aws_eks_cluster.this.name} --output text
EOT
}
}

resource "helm_release" "this" {
depends_on = [
null_resource.kubectl
]
name = "amazon-cloudwatch-observability"
namespace = "amazon-cloudwatch"
create_namespace = true
chart = "${var.helm_dir}"
set {
name = "region"
value = "${var.region}"
}
set {
name = "clusterName"
value = "${aws_eks_cluster.this.name}"
}
}

resource "null_resource" "deployment_wait" {
depends_on = [
helm_release.this,
]
provisioner "local-exec" {
command = <<-EOT
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
chmod +x kubectl
./kubectl rollout status daemonset fluent-bit-windows -n amazon-cloudwatch --timeout 1200s
./kubectl rollout status daemonset cloudwatch-agent-windows -n amazon-cloudwatch --timeout 1200s
EOT
}
}

resource "null_resource" "validator" {
depends_on = [
helm_release.this,
null_resource.deployment_wait
]
provisioner "local-exec" {
command = "go test ${var.test_dir} -v --tags=windowslinux"
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: MIT

provider "aws" {
region = var.region
}

provider "helm" {
kubernetes {
config_path = "${var.kube_dir}/config"
}
}

provider "kubernetes" {
host = aws_eks_cluster.this.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.eks_windows_cluster_ca.certificate_authority[0].data)
token = data.aws_eks_cluster_auth.this.token
}
Loading

0 comments on commit 05b76a5

Please sign in to comment.