diff --git a/pkg/adapter/nimbus-kubearmor/manager/manager.go b/pkg/adapter/nimbus-kubearmor/manager/manager.go index c2794a2a..7effd747 100644 --- a/pkg/adapter/nimbus-kubearmor/manager/manager.go +++ b/pkg/adapter/nimbus-kubearmor/manager/manager.go @@ -6,6 +6,7 @@ package manager import ( "context" "fmt" + "strings" "github.com/go-logr/logr" kubearmorv1 "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/api/security.kubearmor.com/v1" @@ -89,9 +90,9 @@ func reconcileKsp(ctx context.Context, kspName, namespace string, deleted bool) return } if deleted { - logger.Info("Reconciling deleted KubeArmorPolicy", "KubeArmorPolicy.Name", kspName, "KubeArmorPolicy.Namespace", namespace) + logger.V(2).Info("Reconciling deleted KubeArmorPolicy", "KubeArmorPolicy.Name", kspName, "KubeArmorPolicy.Namespace", namespace) } else { - logger.Info("Reconciling modified KubeArmorPolicy", "KubeArmorPolicy.Name", kspName, "KubeArmorPolicy.Namespace", namespace) + logger.V(2).Info("Reconciling modified KubeArmorPolicy", "KubeArmorPolicy.Name", kspName, "KubeArmorPolicy.Namespace", namespace) } createOrUpdateKsp(ctx, npName, namespace) } @@ -109,11 +110,9 @@ func createOrUpdateKsp(ctx context.Context, npName, npNamespace string) { return } + deleteDanglingKsps(ctx, np, logger) ksps := processor.BuildKspsFrom(logger, &np) - // TODO: Fix loop due to unnecessary KSPs deletion - //deleteUnnecessaryKsps(ctx, ksps, npNamespace, logger) - // Iterate using a separate index variable to avoid aliasing for idx := range ksps { ksp := ksps[idx] @@ -130,12 +129,14 @@ func createOrUpdateKsp(ctx context.Context, npName, npNamespace string) { logger.Error(err, "failed to get existing KubeArmorPolicy", "KubeArmorPolicy.Name", ksp.Name, "KubeArmorPolicy.Namespace", ksp.Namespace) return } - if err != nil && errors.IsNotFound(err) { - if err = k8sClient.Create(ctx, &ksp); err != nil { - logger.Error(err, "failed to create KubeArmorPolicy", "KubeArmorPolicy.Name", ksp.Name, "KubeArmorPolicy.Namespace", ksp.Namespace) - return + if err != nil { + if errors.IsNotFound(err) { + if err = k8sClient.Create(ctx, &ksp); err != nil { + logger.Error(err, "failed to create KubeArmorPolicy", "KubeArmorPolicy.Name", ksp.Name, "KubeArmorPolicy.Namespace", ksp.Namespace) + return + } + logger.Info("KubeArmorPolicy created", "KubeArmorPolicy.Name", ksp.Name, "KubeArmorPolicy.Namespace", ksp.Namespace) } - logger.Info("KubeArmorPolicy created", "KubeArmorPolicy.Name", ksp.Name, "KubeArmorPolicy.Namespace", ksp.Namespace) } else { ksp.ObjectMeta.ResourceVersion = existingKsp.ObjectMeta.ResourceVersion if err = k8sClient.Update(ctx, &ksp); err != nil { @@ -145,14 +146,12 @@ func createOrUpdateKsp(ctx context.Context, npName, npNamespace string) { logger.Info("KubeArmorPolicy configured", "KubeArmorPolicy.Name", existingKsp.Name, "KubeArmorPolicy.Namespace", existingKsp.Namespace) } - // Every adapter is responsible for updating the status field of the - // corresponding NimbusPolicy with the number and names of successfully created - // policies by calling the 'adapterutil.UpdateNpStatus' API. This provides - // feedback to users about the translation and deployment of their security - // intent. - if err = adapterutil.UpdateNpStatus(ctx, k8sClient, "KubeArmorPolicy/"+ksp.Name, np.Name, np.Namespace); err != nil { - logger.Error(err, "failed to update KubeArmorPolicies status in NimbusPolicy") - } + //TODO: Due to adapters' dependency on nimbus module, the docker image build is + // failing. The relevant code is commented out below (lines 153-155). We shall + // uncomment this code in a subsequent PR. + //if err = adapterutil.UpdateNpStatus(ctx, k8sClient, "KubeArmorPolicy/"+ksp.Name, np.Name, np.Namespace, false); err != nil { + // logger.Error(err, "failed to update KubeArmorPolicies status in NimbusPolicy") + //} } } @@ -177,24 +176,49 @@ func deleteKsp(ctx context.Context, npName, npNamespace string) { } } -func deleteUnnecessaryKsps(ctx context.Context, currentKsps []kubearmorv1.KubeArmorPolicy, namespace string, logger logr.Logger) { +func deleteDanglingKsps(ctx context.Context, np intentv1.NimbusPolicy, logger logr.Logger) { var existingKsps kubearmorv1.KubeArmorPolicyList - if err := k8sClient.List(ctx, &existingKsps, client.InNamespace(namespace)); err != nil { + if err := k8sClient.List(ctx, &existingKsps, client.InNamespace(np.Namespace)); err != nil { logger.Error(err, "failed to list KubeArmorPolicies for cleanup") return } - currentKspNames := make(map[string]bool) - for _, ksp := range currentKsps { - currentKspNames[ksp.Name] = true + var kspsOwnedByNp []kubearmorv1.KubeArmorPolicy + for _, ksp := range existingKsps.Items { + ownerRef := ksp.OwnerReferences[0] + if ownerRef.Name == np.Name && ownerRef.UID == np.UID { + kspsOwnedByNp = append(kspsOwnedByNp, ksp) + } + } + if len(kspsOwnedByNp) == 0 { + return } - for i := range existingKsps.Items { - existingKsp := existingKsps.Items[i] - if _, needed := currentKspNames[existingKsp.Name]; !needed { - if err := k8sClient.Delete(ctx, &existingKsp); err != nil { - logger.Error(err, "failed to delete unnecessary KubeArmorPolicy", "KubeArmorPolicy.Name", existingKsp.Name) - } + kspsToDelete := make(map[string]kubearmorv1.KubeArmorPolicy) + + // Populate owned KSPs + for _, kspOwnedByNp := range kspsOwnedByNp { + kspsToDelete[kspOwnedByNp.Name] = kspOwnedByNp + } + + for _, nimbusRule := range np.Spec.NimbusRules { + kspName := np.Name + "-" + strings.ToLower(nimbusRule.ID) + delete(kspsToDelete, kspName) + } + + for kspName := range kspsToDelete { + ksp := kspsToDelete[kspName] + if err := k8sClient.Delete(ctx, &ksp); err != nil { + logger.Error(err, "failed to delete dangling KubeArmorPolicy", "KubeArmorPolicy.Name", ksp.Namespace, "KubeArmorPolicy.Namespace", ksp.Namespace) + continue } + + //TODO: Due to adapters' dependency on nimbus module, the docker image build is + // failing. The relevant code is commented out below (lines 217-219). We shall + // uncomment this code in a subsequent PR. + //if err := adapterutil.UpdateNpStatus(ctx, k8sClient, "KubeArmorPolicy/"+ksp.Name, np.Name, np.Namespace, true); err != nil { + // logger.Error(err, "failed to update KubeArmorPolicy status in NimbusPolicy") + //} + logger.Info("Dangling KubeArmorPolicy deleted", "KubeArmorPolicy.Name", ksp.Name, "KubeArmorPolicy.Namespace", ksp.Namespace) } } diff --git a/pkg/adapter/nimbus-netpol/manager/netpols_manager.go b/pkg/adapter/nimbus-netpol/manager/netpols_manager.go index b3dddf87..aae8a006 100644 --- a/pkg/adapter/nimbus-netpol/manager/netpols_manager.go +++ b/pkg/adapter/nimbus-netpol/manager/netpols_manager.go @@ -6,7 +6,9 @@ package manager import ( "context" "fmt" + "strings" + "github.com/go-logr/logr" netv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -88,9 +90,9 @@ func reconcileNetPol(ctx context.Context, netpolName, namespace string, deleted return } if deleted { - logger.Info("Reconciling deleted NetworkPolicy", "NetworkPolicy.Name", netpolName, "NetworkPolicy.Namespace", namespace) + logger.V(2).Info("Reconciling deleted NetworkPolicy", "NetworkPolicy.Name", netpolName, "NetworkPolicy.Namespace", namespace) } else { - logger.Info("Reconciling modified NetworkPolicy", "NetworkPolicy.Name", netpolName, "NetworkPolicy.Namespace", namespace) + logger.V(2).Info("Reconciling modified NetworkPolicy", "NetworkPolicy.Name", netpolName, "NetworkPolicy.Namespace", namespace) } createOrUpdateNetworkPolicy(ctx, npName, namespace) } @@ -108,6 +110,7 @@ func createOrUpdateNetworkPolicy(ctx context.Context, npName, npNamespace string return } + deleteDanglingNetpols(ctx, np, logger) netPols := processor.BuildNetPolsFrom(logger, np) // Iterate using a separate index variable to avoid aliasing for idx := range netPols { @@ -125,12 +128,14 @@ func createOrUpdateNetworkPolicy(ctx context.Context, npName, npNamespace string logger.Error(err, "failed to get existing NetworkPolicy", "NetworkPolicy.Name", netpol.Name, "NetworkPolicy.Namespace", netpol.Namespace) return } - if errors.IsNotFound(err) { - if err = k8sClient.Create(ctx, &netpol); err != nil { - logger.Error(err, "failed to create NetworkPolicy", "NetworkPolicy.Name", netpol.Name, "NetworkPolicy.Namespace", netpol.Namespace) - return + if err != nil { + if errors.IsNotFound(err) { + if err = k8sClient.Create(ctx, &netpol); err != nil { + logger.Error(err, "failed to create NetworkPolicy", "NetworkPolicy.Name", netpol.Name, "NetworkPolicy.Namespace", netpol.Namespace) + return + } + logger.Info("NetworkPolicy created", "NetworkPolicy.Name", netpol.Name, "NetworkPolicy.Namespace", netpol.Namespace) } - logger.Info("NetworkPolicy created", "NetworkPolicy.Name", netpol.Name, "NetworkPolicy.Namespace", netpol.Namespace) } else { netpol.ObjectMeta.ResourceVersion = existingNetpol.ObjectMeta.ResourceVersion if err = k8sClient.Update(ctx, &netpol); err != nil { @@ -139,15 +144,12 @@ func createOrUpdateNetworkPolicy(ctx context.Context, npName, npNamespace string } logger.Info("NetworkPolicy configured", "NetworkPolicy.Name", netpol.Name, "NetworkPolicy.Namespace", netpol.Namespace) } - - // Every adapter is responsible for updating the status field of the - // corresponding NimbusPolicy with the number and names of successfully created - // policies by calling the 'adapterutil.UpdateNpStatus' API. This provides - // feedback to users about the translation and deployment of their security - // intent. - if err = adapterutil.UpdateNpStatus(ctx, k8sClient, "NetworkPolicy/"+netpol.Name, np.Name, np.Namespace); err != nil { - logger.Error(err, "failed to update NetworkPolicies status in NimbusPolicy") - } + //TODO: Due to adapters' dependency on nimbus module, the docker image build is + // failing. The relevant code is commented out below (lines 150-152). We shall + // uncomment this code in a subsequent PR. + //if err = adapterutil.UpdateNpStatus(ctx, k8sClient, "NetworkPolicy/"+netpol.Name, np.Name, np.Namespace, false); err != nil { + // logger.Error(err, "failed to update NetworkPolicies status in NimbusPolicy") + //} } } @@ -171,3 +173,50 @@ func deleteNetworkPolicy(ctx context.Context, npName, npNamespace string) { ) } } + +func deleteDanglingNetpols(ctx context.Context, np intentv1.NimbusPolicy, logger logr.Logger) { + var existingNetpols netv1.NetworkPolicyList + if err := k8sClient.List(ctx, &existingNetpols, client.InNamespace(np.Namespace)); err != nil { + logger.Error(err, "failed to list NetworkPolicies for cleanup") + return + } + + var netpolsOwnedByNp []netv1.NetworkPolicy + for _, netpol := range existingNetpols.Items { + ownerRef := netpol.OwnerReferences[0] + if ownerRef.Name == np.Name && ownerRef.UID == np.UID { + netpolsOwnedByNp = append(netpolsOwnedByNp, netpol) + } + } + + if len(netpolsOwnedByNp) == 0 { + return + } + + netpolsToDelete := make(map[string]netv1.NetworkPolicy) + + // Populate owned Netpols + for _, netpolOwnedByNp := range netpolsOwnedByNp { + netpolsToDelete[netpolOwnedByNp.Name] = netpolOwnedByNp + } + + for _, nimbusRule := range np.Spec.NimbusRules { + netpolName := np.Name + "-" + strings.ToLower(nimbusRule.ID) + delete(netpolsToDelete, netpolName) + } + + for netpolName := range netpolsToDelete { + netpol := netpolsToDelete[netpolName] + if err := k8sClient.Delete(ctx, &netpol); err != nil { + logger.Error(err, "failed to delete dangling NetworkPolicy", "NetworkPolicy.Name", netpol.Namespace, "NetworkPolicy.Namespace", netpol.Namespace) + continue + } + //TODO: Due to adapters' dependency on nimbus module, the docker image build is + // failing. The relevant code is commented out below (lines 215-217). We shall + // uncomment this code in a subsequent PR. + //if err := adapterutil.UpdateNpStatus(ctx, k8sClient, "NetworkPolicy/"+netpol.Name, np.Name, np.Namespace, true); err != nil { + // logger.Error(err, "failed to update NetworkPolicy status in NimbusPolicy") + //} + logger.Info("Dangling NetworkPolicy deleted", "NetworkPolicy.Name", netpol.Name, "NetworkPolicy.Namespace", netpol.Namespace) + } +} diff --git a/pkg/adapter/util/nimbuspolicy_util.go b/pkg/adapter/util/nimbuspolicy_util.go index 2579971a..5987e46e 100644 --- a/pkg/adapter/util/nimbuspolicy_util.go +++ b/pkg/adapter/util/nimbuspolicy_util.go @@ -22,8 +22,12 @@ func ExtractNpName(policyName string) string { } // UpdateNpStatus updates the provided NimbusPolicy status with the number and -// names of its descendant policies that were created. -func UpdateNpStatus(ctx context.Context, k8sClient client.Client, currPolicyFullName, npName, namespace string) error { +// names of its descendant policies that were created. Every adapter is +// responsible for updating the status field of the corresponding NimbusPolicy +// with the number and names of successfully created policies by calling this +// API. This provides feedback to users about the translation and deployment of +// their security intent +func UpdateNpStatus(ctx context.Context, k8sClient client.Client, currPolicyFullName, npName, namespace string, decrement bool) error { // Since multiple adapters may attempt to update the NimbusPolicy status // concurrently, potentially leading to conflicts. To ensure data consistency, // retry on write failures. On conflict, the update is retried with an @@ -35,7 +39,7 @@ func UpdateNpStatus(ctx context.Context, k8sClient client.Client, currPolicyFull return nil } - updateCountAndPoliciesName(latestNp, currPolicyFullName) + updateCountAndPoliciesName(latestNp, currPolicyFullName, decrement) if err := k8sClient.Status().Update(ctx, latestNp); err != nil { return err } @@ -47,11 +51,21 @@ func UpdateNpStatus(ctx context.Context, k8sClient client.Client, currPolicyFull return nil } -func updateCountAndPoliciesName(latestNp *intentv1.NimbusPolicy, currPolicyFullName string) { +func updateCountAndPoliciesName(latestNp *intentv1.NimbusPolicy, currPolicyFullName string, decrement bool) { if !contains(latestNp.Status.Policies, currPolicyFullName) { latestNp.Status.NumberOfAdapterPolicies++ latestNp.Status.Policies = append(latestNp.Status.Policies, currPolicyFullName) } + + if decrement { + latestNp.Status.NumberOfAdapterPolicies-- + for idx, existingPolicyName := range latestNp.Status.Policies { + if existingPolicyName == currPolicyFullName { + latestNp.Status.Policies = append(latestNp.Status.Policies[:idx], latestNp.Status.Policies[idx+1:]...) + return + } + } + } } func contains(existingPolicies []string, policy string) bool {