Skip to content

Commit

Permalink
ci for express
Browse files Browse the repository at this point in the history
  • Loading branch information
DanBezalelpx committed Jun 26, 2023
1 parent 9ad325e commit 072e114
Show file tree
Hide file tree
Showing 29 changed files with 1,882 additions and 0 deletions.
121 changes: 121 additions & 0 deletions .github/workflows/E2E_CI.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
name: E2E Build

on:
pull_request

jobs:

extract_metadata:
runs-on: ubuntu-latest
name: Extract supported_features
outputs:
supported-features: ${{ steps.supported-features.outputs.value }}
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Setup Node.js
uses: actions/setup-node@v3
with:
node-version: '18.x'
- name: extract supported features
id: supported-features
run: echo "value=$(node -p -e "require('./px_metadata.json').supported_features?.join(' or ') || ''")" >> "$GITHUB_OUTPUT"


CI:
runs-on: ubuntu-latest
timeout-minutes: 60
needs:
- extract_metadata

steps:

- name: build local cluster
uses: actions/checkout@v2
- run: ./ci_files/build_cluster.sh

- name: Set up Docker
uses: docker/setup-buildx-action@v1

- name: Build Sample-site Docker image
run: |
docker build -t localhost:5001/node-sample-site:1.0.0 . && docker images && docker push localhost:5001/node-sample-site:1.0.0
env:
DOCKER_BUILDKIT: 1


- name: install helm
run: |
curl https://baltocdn.com/helm/signing.asc | gpg --dearmor | sudo tee /usr/share/keyrings/helm.gpg > /dev/null
sudo apt-get install apt-transport-https --yes
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list
sudo apt-get update
sudo apt-get install helm
- name: Checkout enforcer repo
uses: actions/checkout@v2

- name: Clone helm charts repo
uses: actions/checkout@v2
with:
repository: PerimeterX/connect-helm-charts
token: ${{ secrets.CONNECT_PULL_TOKEN }}
ref: main
path: ./deploy_charts


- name: deploy sample site
run: |
helm install sample-site ./deploy_charts/charts/sample-site --set image.name=localhost:5001/node-sample-site --set image.tag=1.0.0 --set imagePullPolicy=Always --set collectorURL=http://mock-collector-mock-collector:3001 --wait
- name: Set up Google Cloud SDK
id: 'auth'
uses: 'google-github-actions/auth@v1'
with:
credentials_json: '${{ secrets.GCR_SA_KEY }}'

- name: Configure Docker credentials
run: |
gcloud auth configure-docker gcr.io
- name: pull mock collector image
run: |
docker pull gcr.io/px-docker-repo/connecteam/mock-collector:1.0.2 && \
docker tag gcr.io/px-docker-repo/connecteam/mock-collector:1.0.2 localhost:5001/mock-collector:1.0.2 && \
docker push localhost:5001/mock-collector:1.0.2 && \
docker images
- name: deploy mock collector
run: |
helm install mock-collector ./deploy_charts/charts/mock-collector --set image.repository=localhost:5001/mock-collector --set image.tag=1.0.2 --set imagePullPolicy=Always --wait
- run: kubectl get pods

- name: pull enforcer tests image
run: |
docker pull gcr.io/px-docker-repo/connecteam/enforcer-specs-tests:1.1.0 && \
docker tag gcr.io/px-docker-repo/connecteam/enforcer-specs-tests:1.1.0 localhost:5001/enforcer-spec-tests:1.1.0 && \
docker push localhost:5001/enforcer-spec-tests:1.1.0 && \
docker images
- name: run enforcer tests
run: |
helm install enforcer-spec-tests ./deploy_charts/charts/enforcer-spec-tests --set image.repository=localhost:5001/enforcer-spec-tests --set image.tag=1.1.0 --set imagePullPolicy=Always \
--set internalMockCollectorURL=http://mock-collector-mock-collector:3001 \
--set appID=PXnEpdw6lS \
--set siteURL=http://sample-site-sample-site:3000 \
--set cookieSecret=${{ secrets.TEST_COOKIE_SECRET }} \
--set supportedFeatures="${{ needs.extract_metadata.outputs.supported-features }}" \
--set-file enforcerMetadataContent=./px_metadata.json
- name: wait until test is over
run: ./ci_files/wait-for-job.sh
env:
JOB_NAME: enforcer-spec-tests

- name: get tests results
if: ${{ failure() }}
run: kubectl logs job/enforcer-spec-tests

- name: get tests results
run: kubectl logs job/enforcer-spec-tests
31 changes: 31 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
# create static files and configs
FROM node:16-slim

WORKDIR /workspace
COPY ./demo-site/shared_config.json .
COPY ./demo-site/scripts scripts
COPY ./demo-site/templates templates
COPY ./demo-site/utils utils
COPY ./demo-site/servers/nodejs/package.json servers/nodejs/package.json
RUN cd servers/nodejs && npm install
COPY ./demo-site/servers/nodejs servers/nodejs

RUN node scripts/create_static_files.js && node scripts/create_px_configs.js

WORKDIR /workspace/servers/nodejs

COPY ./ perimeterx-node-express
RUN npm install ./perimeterx-node-express

ARG ENABLE_TEST_ENDPOINTS=true
ARG PX_APP_ID=""
ARG PX_AUTH_TOKEN=""
ARG PX_COOKIE_SECRET=""

ENV ENABLE_TEST_ENDPOINTS=${ENABLE_TEST_ENDPOINTS}
ENV PX_APP_ID=${PX_APP_ID}
ENV PX_AUTH_TOKEN=${PX_AUTH_TOKEN}
ENV PX_COOKIE_SECRET=${PX_COOKIE_SECRET}

EXPOSE 3000
CMD ["node","app.js"]
66 changes: 66 additions & 0 deletions ci_files/build_cluster.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
#!/bin/sh
set -o errexit

# 1. Download kind binary
# For AMD64 / x86_64
#[ $(uname -m) = x86_64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.19.0/kind-linux-amd64
# For ARM64
#[ $(uname -m) = aarch64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.19.0/kind-linux-arm64
#chmod +x ./kind
#sudo mv ./kind /usr/local/bin/kind


# 2. Create registry container unless it already exists
reg_name='kind-registry'
reg_port='5001'
if [ "$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" != 'true' ]; then
docker run \
-d --restart=always -p "127.0.0.1:${reg_port}:5000" --name "${reg_name}" \
registry:2
fi

# 3. Create kind cluster with containerd registry config dir enabled
cat <<EOF | kind create cluster --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
EOF

# 4. Add the registry config to the nodes
#
# This is necessary because localhost resolves to loopback addresses that are
# network-namespace local.
# In other words: localhost in the container is not localhost on the host.
#
# We want a consistent name that works from both ends, so we tell containerd to
# alias localhost:${reg_port} to the registry container when pulling images
REGISTRY_DIR="/etc/containerd/certs.d/localhost:${reg_port}"
for node in $(kind get nodes); do
docker exec "${node}" mkdir -p "${REGISTRY_DIR}"
cat <<EOF | docker exec -i "${node}" cp /dev/stdin "${REGISTRY_DIR}/hosts.toml"
[host."http://${reg_name}:5000"]
EOF
done

# 5. Connect the registry to the cluster network if not already connected
# This allows kind to bootstrap the network but ensures they're on the same network
if [ "$(docker inspect -f='{{json .NetworkSettings.Networks.kind}}' "${reg_name}")" = 'null' ]; then
docker network connect "kind" "${reg_name}"
fi

# 6. Document the local registry
# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: local-registry-hosting
namespace: kube-public
data:
localRegistryHosting.v1: |
host: "localhost:${reg_port}"
help: "https://kind.sigs.k8s.io/docs/user/local-registry/"
EOF
7 changes: 7 additions & 0 deletions ci_files/extract_features.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/bin/bash

# Read JSON file as a one-liner string
json_file_path="../px_metadata.json"
json_string=$(cat "$json_file_path" | tr -d '\n' | tr -d ' ')

echo "$json_string"
42 changes: 42 additions & 0 deletions ci_files/wait-for-job.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
#!/usr/bin/env bash

export job=$JOB_NAME
export ns="${NAMESPACE:-default}"

if [ -z $job ]; then
echo JOB_NAME is required
exit 1
fi

echo JOB_NAME = $job
echo NAMESPACE = $ns


kubectl get job -n $ns $job
job_exists=$?

if [ $job_exists -ne 0 ]
then
exit 1
fi

while true;
do
echo "checking for success"
kubectl wait --for=condition=complete -n $ns job/$job --timeout=0s >> /dev/null 2>&1
success=$?
if [ $success -eq 0 ]
then
exit 0;
fi

echo "checking for failure"
kubectl wait --for=condition=failed -n $ns job/$job --timeout=0s >> /dev/null 2>&1
fail=$?
if [ $fail -eq 0 ]
then
exit 1
fi

sleep 5
done
Loading

0 comments on commit 072e114

Please sign in to comment.