From 51f36646cddc898b2634448a5320719ca8455e97 Mon Sep 17 00:00:00 2001 From: Anurag Rajawat Date: Tue, 25 Jun 2024 17:59:19 +0530 Subject: [PATCH] fix(nimbus-k8tls): Remove ownerref from ns (#197) * fix(nimbus-k8tls): Remove ownerref from ns Signed-off-by: Anurag Rajawat * fix(CI): Fix e2e-test job Signed-off-by: Anurag Rajawat --------- Signed-off-by: Anurag Rajawat --- .github/workflows/pr-checks.yaml | 45 +++++++++++---------- pkg/adapter/nimbus-k8tls/main.go | 2 +- pkg/adapter/nimbus-k8tls/manager/cronjob.go | 4 +- pkg/adapter/nimbus-k8tls/manager/k8tls.go | 12 +++++- 4 files changed, 36 insertions(+), 27 deletions(-) diff --git a/.github/workflows/pr-checks.yaml b/.github/workflows/pr-checks.yaml index ad40930c..aba58988 100644 --- a/.github/workflows/pr-checks.yaml +++ b/.github/workflows/pr-checks.yaml @@ -153,13 +153,18 @@ jobs: kind load docker-image 5gsec/nimbus:latest --name=testing - name: Install Nimbus + working-directory: ./deployments/nimbus run: | - helm upgrade --install nimbus-operator deployments/nimbus -n nimbus --create-namespace --set image.pullPolicy=Never + helm upgrade --dependency-update --install nimbus-operator . -n nimbus --create-namespace \ + --set image.pullPolicy=Never \ + --set autoDeploy.kubearmor=false \ + --set autoDeploy.kyverno=false \ + --set autoDeploy.netpol=false - name: Wait for Nimbus to start run: | kubectl wait --for=condition=ready --timeout=5m -n nimbus pod -l app.kubernetes.io/name=nimbus - kubectl get pods -A + kubectl get pods -n nimbus - name: Run Tests run: make integration-test @@ -183,7 +188,7 @@ jobs: uses: helm/kind-action@v1 with: cluster_name: testing - + - name: Build nimbus image and load in the kind cluster run: | make docker-build @@ -200,30 +205,26 @@ jobs: run: | make docker-build kind load docker-image 5gsec/nimbus-kubearmor:latest --name=testing - + - name: Build nimbus-kyverno image and load in the kind cluster working-directory: ./pkg/adapter/nimbus-kyverno run: | make docker-build kind load docker-image 5gsec/nimbus-kyverno:latest --name=testing - - - name: Install Kubearmor CRDs - run: | - kubectl create -f https://raw.githubusercontent.com/kubearmor/KubeArmor/main/deployments/CRD/KubeArmorPolicy.yaml - - - name: Install Kyverno CRDs - run: | - kubectl create -f https://raw.githubusercontent.com/kyverno/kyverno/main/config/crds/kyverno/kyverno.io_clusterpolicies.yaml - kubectl create -f https://raw.githubusercontent.com/kyverno/kyverno/main/config/crds/kyverno/kyverno.io_policies.yaml - name: Install Nimbus + working-directory: ./deployments/nimbus run: | - helm upgrade --install nimbus-operator deployments/nimbus -n nimbus --create-namespace --set image.pullPolicy=Never + helm upgrade --dependency-update --install nimbus-operator . -n nimbus --create-namespace \ + --set image.pullPolicy=Never \ + --set autoDeploy.kubearmor=false \ + --set autoDeploy.kyverno=false \ + --set autoDeploy.netpol=false - name: Wait for Nimbus to start run: | kubectl wait --for=condition=ready --timeout=5m -n nimbus pod -l app.kubernetes.io/name=nimbus - kubectl get pods -A + kubectl get pods -n nimbus - name: Install nimbus-netpol working-directory: deployments/nimbus-netpol/ @@ -233,27 +234,27 @@ jobs: - name: Wait for nimbus-netpol to start run: | kubectl wait --for=condition=ready --timeout=5m -n nimbus pod -l app.kubernetes.io/name=nimbus-netpol - kubectl get pods -A + kubectl get pods -n nimbus - name: Install nimbus-kubearmor working-directory: deployments/nimbus-kubearmor/ run: | - helm upgrade --install nimbus-kubearmor . -n nimbus --set image.pullPolicy=Never + helm upgrade --dependency-update --install nimbus-kubearmor . -n nimbus --set image.pullPolicy=Never - name: Wait for nimbus-kubearmor to start run: | kubectl wait --for=condition=ready --timeout=5m -n nimbus pod -l app.kubernetes.io/name=nimbus-kubearmor - kubectl get pods -A + kubectl get pods -n nimbus - name: Install nimbus-kyverno working-directory: deployments/nimbus-kyverno/ run: | - helm upgrade --install nimbus-kyverno . -n nimbus --set image.pullPolicy=Never - + helm upgrade --dependency-update --install nimbus-kyverno . -n nimbus --set image.pullPolicy=Never + - name: Wait for nimbus-kyverno to start run: | kubectl wait --for=condition=ready --timeout=5m -n nimbus pod -l app.kubernetes.io/name=nimbus-kyverno - kubectl get pods -A - + kubectl get pods -n nimbus + - name: Run Tests run: make e2e-test diff --git a/pkg/adapter/nimbus-k8tls/main.go b/pkg/adapter/nimbus-k8tls/main.go index 0574162b..ba7d46c7 100644 --- a/pkg/adapter/nimbus-k8tls/main.go +++ b/pkg/adapter/nimbus-k8tls/main.go @@ -16,7 +16,7 @@ import ( ) func main() { - ctrl.SetLogger(zap.New(zap.UseDevMode(true))) + ctrl.SetLogger(zap.New()) logger := ctrl.Log ctx, cancelFunc := context.WithCancel(context.Background()) diff --git a/pkg/adapter/nimbus-k8tls/manager/cronjob.go b/pkg/adapter/nimbus-k8tls/manager/cronjob.go index 88413ee6..4b8ccc13 100644 --- a/pkg/adapter/nimbus-k8tls/manager/cronjob.go +++ b/pkg/adapter/nimbus-k8tls/manager/cronjob.go @@ -54,7 +54,7 @@ func createOrUpdateCj(ctx context.Context, logger logr.Logger, cwnp v1alpha1.Clu logger.Info("configured Kubernetes CronJob", "CronJob.Name", cronJob.Name, "CronJob.Namespace", cronJob.Namespace) } - if err = adapterutil.UpdateCwnpStatus(ctx, k8sClient, "CronJob/"+cronJob.Name, cwnp.Name, false); err != nil { + if err = adapterutil.UpdateCwnpStatus(ctx, k8sClient, cronJob.Namespace+"/CronJob/"+cronJob.Name, cwnp.Name, false); err != nil { logger.Error(err, "failed to update ClusterNimbusPolicy status") } } @@ -67,7 +67,7 @@ func deleteCronJobs(ctx context.Context, logger logr.Logger, cwnpName string, cr continue } - if err := adapterutil.UpdateCwnpStatus(ctx, k8sClient, "CronJob/"+cronJob.Name, cwnpName, true); err != nil { + if err := adapterutil.UpdateCwnpStatus(ctx, k8sClient, cronJob.Namespace+"/CronJob/"+cronJob.Name, cwnpName, true); err != nil { logger.Error(err, "failed to update ClusterNimbusPolicy status") } logger.Info("Dangling Kubernetes CronJob deleted", "CronJobJob.Name", cronJob.Name, "CronJob.Namespace", cronJob.Namespace) diff --git a/pkg/adapter/nimbus-k8tls/manager/k8tls.go b/pkg/adapter/nimbus-k8tls/manager/k8tls.go index 225b960a..ee51746c 100644 --- a/pkg/adapter/nimbus-k8tls/manager/k8tls.go +++ b/pkg/adapter/nimbus-k8tls/manager/k8tls.go @@ -202,8 +202,16 @@ func setupK8tlsEnv(ctx context.Context, cwnp v1alpha1.ClusterNimbusPolicy, schem objs := []client.Object{ns, cm, sa, clusterRole, clusterRoleBinding} for idx := range objs { objToCreate := objs[idx] - if err := ctrl.SetControllerReference(&cwnp, objToCreate, scheme); err != nil { - return err + + // Don't set owner ref on namespace. In environments with configured Pod Security + // Standards labelling namespaces becomes a requirement. However, on deletion of + // CWNP a namespace with ownerReferences set also gets deleted. Since we need to + // keep the nimbus-k8tls-env namespace labeled, removing the ownerReferences + // prevents this deletion. + if idx != 0 { + if err := ctrl.SetControllerReference(&cwnp, objToCreate, scheme); err != nil { + return err + } } var existingObj client.Object