Skip to content

Add workflow to use loadbalancer as a tunnel to juju controller on k8s #8

Add workflow to use loadbalancer as a tunnel to juju controller on k8s

Add workflow to use loadbalancer as a tunnel to juju controller on k8s #8

Workflow file for this run

name: Tunnel to Juju controller via load balancer on k8s
on:
pull_request:
paths-ignore:
- "README.md"
- "project-docs/**"
push:
branches:
- "main"
paths-ignore:
- "README.md"
- "project-docs/**"
# Testing only needs permissions to read the repository contents.
permissions:
contents: read
jobs:
# Ensure project builds before running testing matrix
build:
name: Build
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v4
- run: go build -v .
# Run acceptance tests in a matrix with Terraform CLI versions
add-machine-test:
name: Add Machine
needs: build
runs-on: ubuntu-latest
env:
ACTIONS_ALLOW_IPV6: false
strategy:
fail-fast: false
matrix:
# Only on lxd
cloud:
- "microk8s"
terraform:
- "1.5.*"
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v4
with:
go-version-file: "go.mod"
cache: true
# set up terraform
- uses: hashicorp/setup-terraform@v2
with:
terraform_version: ${{ matrix.terraform }}
terraform_wrapper: false
# set up snap, lxd, tox, Juju, bootstrap a controller, etc.
- name: Setup operator environment
uses: charmed-kubernetes/actions-operator@main
with:
provider: ${{ matrix.cloud }}
juju-channel: 2.9/stable
- name: "Set environment to configure provider"
# language=bash
run: |
CONTROLLER=$(juju whoami --format yaml | yq .controller)
echo "JUJU_CONTROLLER_ADDRESSES=$(juju show-controller | yq .$CONTROLLER.details.api-endpoints | yq -r '. | join(",")')" >> $GITHUB_ENV
echo "JUJU_USERNAME=$(juju show-controller | yq .$CONTROLLER.account.user)" >> $GITHUB_ENV
echo "JUJU_PASSWORD=$(cat ~/.local/share/juju/accounts.yaml | yq .controllers.$CONTROLLER.password)" >> $GITHUB_ENV
echo "JUJU_CA_CERT<<EOF" >> $GITHUB_ENV
juju show-controller | yq .$CONTROLLER.details.ca-cert >> $GITHUB_ENV
echo "EOF" >> $GITHUB_ENV
- run: go mod download
- run: |
CONTROLLER=$(juju whoami --format yaml | yq .controller)
# enable ingress and metallb to
sudo microk8s enable ingress
echo "Ingress enabled."
# determine a subnet for metallb
subnet="$(ip route get 1 | head -n 1 | awk '{print $7}' | awk -F. '{print $1 "." $2 "." $3 ".240/24"}')"; echo "$subnet"
# enable metallb
sudo microk8s enable metallb:$subnet
# Make sure we have sufficient permissions to access microk8s
echo "Adding runner user microk8s group"
sudo usermod -a -G microk8s runner
sudo chown -R runner ~/.kube
newgrp microk8s
# Make sure controller name is resolved
echo "Controller name: $CONTROLLER"
echo "Juju Username : $JUJU_USERNAME"
echo "Services layout:"
sudo microk8s.kubectl get services -n controller-$CONTROLLER controller-service
# bring up the load balancer service
sudo microk8s.kubectl apply -f - <<EOF
apiVersion: v1
kind: Service
metadata:
name: controller-service-lb
namespace: controller-$CONTROLLER
spec:
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: api-server
port: 17070
protocol: TCP
targetPort: 17070
selector:
app.kubernetes.io/name: controller
sessionAffinity: None
type: LoadBalancer
EOF
# get the external IP of the load balancer service
export LB_IP = $(microk8s.kubectl get services -n controller-$CONTROLLER | grep -o 'controller-service-lb.*' | awk '{print $4}')
# write a plan using this IP
cat <<EOF > ./terraform_config.tf
provider "juju" {
controller_addresses = \$LB_IP
username = \$JUJU_USERNAME
password = \$JUJU_PASSWORD
ca_certificate = \$JUJU_CA_CERT
}
resource "juju_model" "testmodel" {
name = "test-model"
}
resource "juju_application" "testapp" {
name = "ubuntu"
model = juju_model.testmodel.name
charm {
name = "ubuntu"
}
}
EOF
terraform init && terraform plan && terraform apply --auto-approve