diff --git a/pkg/adapter/nimbus-k8tls/manager/cronjob.go b/pkg/adapter/nimbus-k8tls/manager/cronjob.go index 94e17aff..f22fbe4a 100644 --- a/pkg/adapter/nimbus-k8tls/manager/cronjob.go +++ b/pkg/adapter/nimbus-k8tls/manager/cronjob.go @@ -55,7 +55,7 @@ func createOrUpdateCj(ctx context.Context, logger logr.Logger, cwnp v1alpha1.Clu logger.Info("configured Kubernetes CronJob", "CronJob.Name", cronJob.Name, "CronJob.Namespace", cronJob.Namespace) } - if err = adapterutil.UpdateCnpStatus(ctx, k8sClient, "CronJob/"+cronJob.Name, cwnp.Name, false); err != nil { + if err = adapterutil.UpdateCwnpStatus(ctx, k8sClient, "CronJob/"+cronJob.Name, cwnp.Name, false); err != nil { logger.Error(err, "failed to update ClusterNimbusPolicy status") } } @@ -68,7 +68,7 @@ func deleteCronJobs(ctx context.Context, logger logr.Logger, cwnpName string, cr continue } - if err := adapterutil.UpdateCnpStatus(ctx, k8sClient, "CronJob/"+cronJob.Name, cwnpName, true); err != nil { + if err := adapterutil.UpdateCwnpStatus(ctx, k8sClient, "CronJob/"+cronJob.Name, cwnpName, true); err != nil { logger.Error(err, "failed to update ClusterNimbusPolicy status") } logger.Info("Dangling Kubernetes CronJob deleted", "CronJobJob.Name", cronJob.Name, "CronJob.Namespace", cronJob.Namespace) diff --git a/pkg/adapter/nimbus-k8tls/manager/manager.go b/pkg/adapter/nimbus-k8tls/manager/manager.go index 00fe5986..c01dfb73 100644 --- a/pkg/adapter/nimbus-k8tls/manager/manager.go +++ b/pkg/adapter/nimbus-k8tls/manager/manager.go @@ -80,7 +80,7 @@ func Run(ctx context.Context) { func reconcileCronJob(ctx context.Context, name string) { logger := log.FromContext(ctx) - cwnpName := adapterutil.ExtractClusterNpName(name) + cwnpName := adapterutil.ExtractAnyNimbusPolicyName(name) var cwnp v1alpha1.ClusterNimbusPolicy err := k8sClient.Get(ctx, types.NamespacedName{Name: cwnpName}, &cwnp) if err != nil { diff --git a/pkg/adapter/nimbus-kubearmor/manager/manager.go b/pkg/adapter/nimbus-kubearmor/manager/manager.go index 89816a00..1aa26ca5 100644 --- a/pkg/adapter/nimbus-kubearmor/manager/manager.go +++ b/pkg/adapter/nimbus-kubearmor/manager/manager.go @@ -10,6 +10,7 @@ import ( "github.com/go-logr/logr" kubearmorv1 "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/api/security.kubearmor.com/v1" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -40,7 +41,7 @@ func init() { func Run(ctx context.Context) { npCh := make(chan common.Request) - deletedNpCh := make(chan common.Request) + deletedNpCh := make(chan *unstructured.Unstructured) go globalwatcher.WatchNimbusPolicies(ctx, npCh, deletedNpCh, "SecurityIntentBinding", "ClusterSecurityIntentBinding") updatedKspCh := make(chan common.Request) @@ -58,7 +59,7 @@ func Run(ctx context.Context) { case createdNp := <-npCh: createOrUpdateKsp(ctx, createdNp.Name, createdNp.Namespace) case deletedNp := <-deletedNpCh: - deleteKsp(ctx, deletedNp.Name, deletedNp.Namespace) + logKspToDelete(ctx, deletedNp) case updatedKsp := <-updatedKspCh: reconcileKsp(ctx, updatedKsp.Name, updatedKsp.Namespace, false) case deletedKsp := <-deletedKspCh: @@ -69,7 +70,7 @@ func Run(ctx context.Context) { func reconcileKsp(ctx context.Context, kspName, namespace string, deleted bool) { logger := log.FromContext(ctx) - npName := adapterutil.ExtractNpName(kspName) + npName := adapterutil.ExtractAnyNimbusPolicyName(kspName) var np v1alpha1.NimbusPolicy err := k8sClient.Get(ctx, types.NamespacedName{Name: npName, Namespace: namespace}, &np) if err != nil { @@ -141,23 +142,23 @@ func createOrUpdateKsp(ctx context.Context, npName, npNamespace string) { } } -func deleteKsp(ctx context.Context, npName, npNamespace string) { +func logKspToDelete(ctx context.Context, deletedNp *unstructured.Unstructured) { logger := log.FromContext(ctx) var ksps kubearmorv1.KubeArmorPolicyList - if err := k8sClient.List(ctx, &ksps, &client.ListOptions{Namespace: npNamespace}); err != nil { + if err := k8sClient.List(ctx, &ksps, &client.ListOptions{Namespace: deletedNp.GetNamespace()}); err != nil { logger.Error(err, "failed to list KubeArmorPolicies") return } // Kubernetes GC automatically deletes the child when the parent/owner is - // deleted. So, we don't need to do anything in this case since NimbusPolicy is - // the owner and when it gets deleted corresponding KSPs will be automatically - // deleted. + // deleted. So, we don't need to delete the policy because NimbusPolicy is the + // owner and when it gets deleted all the corresponding policies will be + // automatically deleted. for _, ksp := range ksps.Items { logger.Info("KubeArmorPolicy already deleted due to NimbusPolicy deletion", "KubeArmorPolicy.Name", ksp.Name, "KubeArmorPolicy.Namespace", ksp.Namespace, - "NimbusPolicy.Name", npName, "NimbusPolicy.Namespace", npNamespace, + "NimbusPolicy.Name", deletedNp.GetName(), "NimbusPolicy.Namespace", deletedNp.GetNamespace(), ) } } diff --git a/pkg/adapter/nimbus-kyverno/manager/manager.go b/pkg/adapter/nimbus-kyverno/manager/manager.go index 339330ed..1897c422 100644 --- a/pkg/adapter/nimbus-kyverno/manager/manager.go +++ b/pkg/adapter/nimbus-kyverno/manager/manager.go @@ -39,9 +39,8 @@ func init() { } func Run(ctx context.Context) { - npCh := make(chan common.Request) - deletedNpCh := make(chan common.Request) + deletedNpCh := make(chan *unstructured.Unstructured) go globalwatcher.WatchNimbusPolicies(ctx, npCh, deletedNpCh, "SecurityIntentBinding") clusterNpChan := make(chan string) @@ -50,12 +49,10 @@ func Run(ctx context.Context) { updatedKcpCh := make(chan string) deletedKcpCh := make(chan string) - go watcher.WatchKcps(ctx, updatedKcpCh, deletedKcpCh) updatedKpCh := make(chan common.Request) deletedKpCh := make(chan common.Request) - go watcher.WatchKps(ctx, updatedKpCh, deletedKpCh) for { @@ -63,10 +60,13 @@ func Run(ctx context.Context) { case <-ctx.Done(): close(npCh) close(deletedNpCh) + close(clusterNpChan) close(deletedClusterNpChan) + close(updatedKcpCh) close(deletedKcpCh) + close(updatedKpCh) close(deletedKpCh) return @@ -75,7 +75,7 @@ func Run(ctx context.Context) { case createdCnp := <-clusterNpChan: createOrUpdateKcp(ctx, createdCnp) case deletedNp := <-deletedNpCh: - deleteKp(ctx, deletedNp.Name, deletedNp.Namespace) + logKpToDelete(ctx, deletedNp) case deletedCnp := <-deletedClusterNpChan: logKcpToDelete(ctx, deletedCnp) case updatedKp := <-updatedKpCh: @@ -87,13 +87,12 @@ func Run(ctx context.Context) { case deletedKp := <-deletedKpCh: reconcileKp(ctx, deletedKp.Name, deletedKp.Namespace, true) } - } } func reconcileKp(ctx context.Context, kpName, namespace string, deleted bool) { logger := log.FromContext(ctx) - npName := adapterutil.ExtractNpName(kpName) + npName := adapterutil.ExtractAnyNimbusPolicyName(kpName) var np v1alpha1.NimbusPolicy err := k8sClient.Get(ctx, types.NamespacedName{Name: npName, Namespace: namespace}, &np) if err != nil { @@ -112,7 +111,7 @@ func reconcileKp(ctx context.Context, kpName, namespace string, deleted bool) { func reconcileKcp(ctx context.Context, kcpName string, deleted bool) { logger := log.FromContext(ctx) - cnpName := adapterutil.ExtractClusterNpName(kcpName) + cnpName := adapterutil.ExtractAnyNimbusPolicyName(kcpName) var cnp v1alpha1.ClusterNimbusPolicy err := k8sClient.Get(ctx, types.NamespacedName{Name: cnpName}, &cnp) if err != nil { @@ -232,30 +231,34 @@ func createOrUpdateKcp(ctx context.Context, cnpName string) { logger.Info("KyvernoClusterPolicy configured", "KyvernoClusterPolicy.Name", existingKcp.Name) } - if err = adapterutil.UpdateCnpStatus(ctx, k8sClient, "KyvernoClusterPolicy/"+kcp.Name, cnp.Name, false); err != nil { + if err = adapterutil.UpdateCwnpStatus(ctx, k8sClient, "KyvernoClusterPolicy/"+kcp.Name, cnp.Name, false); err != nil { logger.Error(err, "failed to update KyvernoClusterPolicies status in NimbusPolicy") } } } -func deleteKp(ctx context.Context, npName, npNamespace string) { +func logKpToDelete(ctx context.Context, deletedNp *unstructured.Unstructured) { logger := log.FromContext(ctx) - var kps kyvernov1.PolicyList - if err := k8sClient.List(ctx, &kps, &client.ListOptions{Namespace: npNamespace}); err != nil { + var kps kyvernov1.PolicyList + if err := k8sClient.List(ctx, &kps, &client.ListOptions{Namespace: deletedNp.GetNamespace()}); err != nil { logger.Error(err, "failed to list KyvernoPolicies") return } // Kubernetes GC automatically deletes the child when the parent/owner is - // deleted. So, we don't need to do anything in this case since NimbusPolicy is - // the owner and when it gets deleted corresponding kps will be automatically - // deleted. + // deleted. So, we don't need to delete the policy because NimbusPolicy is the + // owner and when it gets deleted all the corresponding policies will be + // automatically deleted. for _, kp := range kps.Items { - logger.Info("KyvernoPolicy already deleted due to NimbusPolicy deletion", - "KyvernoPolicy.Name", kp.Name, "KyvernoPolicy.Namespace", kp.Namespace, - "NimbusPolicy.Name", npName, "NimbusPolicy.Namespace", npNamespace, - ) + for _, ownerRef := range kp.OwnerReferences { + if ownerRef.Name == deletedNp.GetName() && ownerRef.UID == deletedNp.GetUID() { + logger.Info("KyvernoPolicy deleted due to NimbusPolicy deletion", + "KyvernoPolicy.Name", kp.Name, "KyvernoPolicy.Namespace", kp.Namespace, + "NimbusPolicy.Name", deletedNp.GetName(), "NimbusPolicy.Namespace", deletedNp.GetNamespace(), + ) + } + } } } @@ -373,7 +376,7 @@ func deleteDanglingkcps(ctx context.Context, cnp v1alpha1.ClusterNimbusPolicy, l logger.Info("Dangling KyvernoClusterPolicy deleted", "KyvernoClusterPolicy.Name", kcp.Name) - if err := adapterutil.UpdateCnpStatus(ctx, k8sClient, "KyvernoClusterPolicy/"+kcp.Name, cnp.Name, true); err != nil { + if err := adapterutil.UpdateCwnpStatus(ctx, k8sClient, "KyvernoClusterPolicy/"+kcp.Name, cnp.Name, true); err != nil { logger.Error(err, "failed to update KyvernoClusterPolicy statis in ClusterNimbusPolicy") } diff --git a/pkg/adapter/nimbus-netpol/manager/netpols_manager.go b/pkg/adapter/nimbus-netpol/manager/netpols_manager.go index 00b64f1c..1c854421 100644 --- a/pkg/adapter/nimbus-netpol/manager/netpols_manager.go +++ b/pkg/adapter/nimbus-netpol/manager/netpols_manager.go @@ -10,6 +10,7 @@ import ( "github.com/go-logr/logr" netv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -17,7 +18,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" - v1alpha1 "github.com/5GSEC/nimbus/api/v1alpha1" + "github.com/5GSEC/nimbus/api/v1alpha1" "github.com/5GSEC/nimbus/pkg/adapter/common" "github.com/5GSEC/nimbus/pkg/adapter/k8s" adapterutil "github.com/5GSEC/nimbus/pkg/adapter/util" @@ -43,7 +44,7 @@ func Run(ctx context.Context) { // Watch NimbusPolicies only, and not ClusterNimbusPolicies as NetworkPolicy is // namespaced scoped npCh := make(chan common.Request) - deletedNpCh := make(chan common.Request) + deletedNpCh := make(chan *unstructured.Unstructured) go globalwatcher.WatchNimbusPolicies(ctx, npCh, deletedNpCh, "SecurityIntentBinding", "ClusterSecurityIntentBinding") updatedNetpolCh := make(chan common.Request) @@ -61,7 +62,7 @@ func Run(ctx context.Context) { case createdNp := <-npCh: createOrUpdateNetworkPolicy(ctx, createdNp.Name, createdNp.Namespace) case deletedNp := <-deletedNpCh: - deleteNetworkPolicy(ctx, deletedNp.Name, deletedNp.Namespace) + logNetworkPolicyToDelete(ctx, deletedNp) case updatedNetpol := <-updatedNetpolCh: reconcileNetPol(ctx, updatedNetpol.Name, updatedNetpol.Namespace, false) case deletedNetpol := <-deletedNetpolCh: @@ -72,7 +73,7 @@ func Run(ctx context.Context) { func reconcileNetPol(ctx context.Context, netpolName, namespace string, deleted bool) { logger := log.FromContext(ctx) - npName := adapterutil.ExtractNpName(netpolName) + npName := adapterutil.ExtractAnyNimbusPolicyName(netpolName) var np v1alpha1.NimbusPolicy err := k8sClient.Get(ctx, types.NamespacedName{Name: npName, Namespace: namespace}, &np) if err != nil { @@ -143,24 +144,29 @@ func createOrUpdateNetworkPolicy(ctx context.Context, npName, npNamespace string } } -func deleteNetworkPolicy(ctx context.Context, npName, npNamespace string) { +func logNetworkPolicyToDelete(ctx context.Context, deletedNp *unstructured.Unstructured) { logger := log.FromContext(ctx) - var netpols netv1.NetworkPolicyList - if err := k8sClient.List(ctx, &netpols, &client.ListOptions{Namespace: npNamespace}); err != nil { + var netpols netv1.NetworkPolicyList + if err := k8sClient.List(ctx, &netpols, &client.ListOptions{Namespace: deletedNp.GetNamespace()}); err != nil { logger.Error(err, "failed to list NetworkPolicies") return } // Kubernetes GC automatically deletes the child when the parent/owner is - // deleted. So, we don't need to do anything in this case since NimbusPolicy is - // the owner and when it gets deleted corresponding NetworkPolicies will be automatically - // deleted. + // deleted. So, we don't need to delete the policy because NimbusPolicy is the + // owner and when it gets deleted all the corresponding policies will be + // automatically deleted. for _, netpol := range netpols.Items { - logger.Info("NetworkPolicy already deleted due to NimbusPolicy deletion", - "NetworkPolicy.Name", netpol.Name, "NetworkPolicy.Namespace", netpol.Namespace, - "NetworkPolicy.Name", npName, "NetworkPolicy.Namespace", npNamespace, - ) + for _, ownerRef := range netpol.OwnerReferences { + if ownerRef.Name == deletedNp.GetName() && ownerRef.UID == deletedNp.GetUID() { + logger.Info("NetworkPolicy already deleted due to NimbusPolicy deletion", + "NetworkPolicy.Name", netpol.Name, "NetworkPolicy.Namespace", netpol.Namespace, + "NetworkPolicy.Name", deletedNp.GetName(), "NetworkPolicy.Namespace", deletedNp.GetNamespace(), + ) + break + } + } } } diff --git a/pkg/adapter/util/clusternimbuspolicy_util.go b/pkg/adapter/util/clusternimbuspolicy.go similarity index 61% rename from pkg/adapter/util/clusternimbuspolicy_util.go rename to pkg/adapter/util/clusternimbuspolicy.go index fbe5aac1..23366bf4 100644 --- a/pkg/adapter/util/clusternimbuspolicy_util.go +++ b/pkg/adapter/util/clusternimbuspolicy.go @@ -5,30 +5,23 @@ package util import ( "context" - "strings" + "slices" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" - v1alpha1 "github.com/5GSEC/nimbus/api/v1alpha1" + "github.com/5GSEC/nimbus/api/v1alpha1" ) -// ExtractNpName extracts the actual NimbusPolicy name from a formatted policy -// name. -func ExtractClusterNpName(policyName string) string { - words := strings.Split(policyName, "-") - return strings.Join(words[:len(words)-1], "-") -} - -// UpdateNpStatus updates the provided NimbusPolicy status with the number and -// names of its descendant policies that were created. Every adapter is -// responsible for updating the status field of the corresponding NimbusPolicy -// with the number and names of successfully created policies by calling this -// API. This provides feedback to users about the translation and deployment of -// their security intent -func UpdateCnpStatus(ctx context.Context, k8sClient client.Client, currPolicyFullName, cnpName string, decrement bool) error { - // Since multiple adapters may attempt to update the NimbusPolicy status +// UpdateCwnpStatus updates provided ClusterNimbusPolicy status subresource with +// the number and names of its descendant policies that were created. Every +// adapter is responsible for updating the status field of the corresponding +// ClusterNimbusPolicy with the number and names of successfully created policies +// by calling this API. This provides feedback to users about the translation and +// deployment of their security intent. +func UpdateCwnpStatus(ctx context.Context, k8sClient client.Client, currPolicyFullName, cnpName string, decrement bool) error { + // Since multiple adapters may attempt to update the ClusterNimbusPolicy status // concurrently, potentially leading to conflicts. To ensure data consistency, // retry on write failures. On conflict, the update is retried with an // exponential backoff strategy. This provides resilience against potential @@ -52,7 +45,7 @@ func UpdateCnpStatus(ctx context.Context, k8sClient client.Client, currPolicyFul } func updateCountAndClusterPoliciesName(latestCnp *v1alpha1.ClusterNimbusPolicy, currPolicyFullName string, decrement bool) { - if !contains(latestCnp.Status.Policies, currPolicyFullName) { + if !slices.Contains(latestCnp.Status.Policies, currPolicyFullName) { latestCnp.Status.NumberOfAdapterPolicies++ latestCnp.Status.Policies = append(latestCnp.Status.Policies, currPolicyFullName) } diff --git a/pkg/adapter/util/nimbuspolicy_util.go b/pkg/adapter/util/nimbuspolicy.go similarity index 74% rename from pkg/adapter/util/nimbuspolicy_util.go rename to pkg/adapter/util/nimbuspolicy.go index b120bdba..c50d774c 100644 --- a/pkg/adapter/util/nimbuspolicy_util.go +++ b/pkg/adapter/util/nimbuspolicy.go @@ -5,28 +5,29 @@ package util import ( "context" + "slices" "strings" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" - v1alpha1 "github.com/5GSEC/nimbus/api/v1alpha1" + "github.com/5GSEC/nimbus/api/v1alpha1" ) -// ExtractNpName extracts the actual NimbusPolicy name from a formatted policy -// name. -func ExtractNpName(policyName string) string { +// ExtractAnyNimbusPolicyName extracts the actual +// NimbusPolicy/ClusterNimbusPolicy name from a formatted policy name. +func ExtractAnyNimbusPolicyName(policyName string) string { words := strings.Split(policyName, "-") return strings.Join(words[:len(words)-1], "-") } -// UpdateNpStatus updates the provided NimbusPolicy status with the number and -// names of its descendant policies that were created. Every adapter is -// responsible for updating the status field of the corresponding NimbusPolicy +// UpdateNpStatus updates the provided NimbusPolicy status subresource with the +// number and names of its descendant policies that were created. Every adapter +// is responsible for updating the status field of the corresponding NimbusPolicy // with the number and names of successfully created policies by calling this // API. This provides feedback to users about the translation and deployment of -// their security intent +// their security intent. func UpdateNpStatus(ctx context.Context, k8sClient client.Client, currPolicyFullName, npName, namespace string, decrement bool) error { // Since multiple adapters may attempt to update the NimbusPolicy status // concurrently, potentially leading to conflicts. To ensure data consistency, @@ -52,7 +53,7 @@ func UpdateNpStatus(ctx context.Context, k8sClient client.Client, currPolicyFull } func updateCountAndPoliciesName(latestNp *v1alpha1.NimbusPolicy, currPolicyFullName string, decrement bool) { - if !contains(latestNp.Status.Policies, currPolicyFullName) { + if !slices.Contains(latestNp.Status.Policies, currPolicyFullName) { latestNp.Status.NumberOfAdapterPolicies++ latestNp.Status.Policies = append(latestNp.Status.Policies, currPolicyFullName) } @@ -67,12 +68,3 @@ func updateCountAndPoliciesName(latestNp *v1alpha1.NimbusPolicy, currPolicyFullN } } } - -func contains(existingPolicies []string, policy string) bool { - for _, existingPolicy := range existingPolicies { - if existingPolicy == policy { - return true - } - } - return false -} diff --git a/pkg/adapter/util/watcher_util.go b/pkg/adapter/util/watcher.go similarity index 100% rename from pkg/adapter/util/watcher_util.go rename to pkg/adapter/util/watcher.go diff --git a/pkg/adapter/watcher/clusternimbuspolicy_watcher.go b/pkg/adapter/watcher/clusternimbuspolicy.go similarity index 72% rename from pkg/adapter/watcher/clusternimbuspolicy_watcher.go rename to pkg/adapter/watcher/clusternimbuspolicy.go index c265f6c8..0ceb0fe1 100644 --- a/pkg/adapter/watcher/clusternimbuspolicy_watcher.go +++ b/pkg/adapter/watcher/clusternimbuspolicy.go @@ -4,33 +4,20 @@ package watcher import ( - "bytes" "context" - "encoding/json" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/tools/cache" "sigs.k8s.io/controller-runtime/pkg/log" adapterutil "github.com/5GSEC/nimbus/pkg/adapter/util" ) -func setupClusterNpInformer() cache.SharedIndexInformer { - clusterNpGvr := schema.GroupVersionResource{ - Group: "intent.security.nimbus.com", - Version: "v1alpha1", - Resource: "clusternimbuspolicies", - } - clusterNpInformer := factory.ForResource(clusterNpGvr).Informer() - return clusterNpInformer -} - // WatchClusterNimbusPolicies watches for create, update and delete events for // ClusterNimbusPolicies owned by ClusterSecurityIntentBinding and put their info // on respective channels. func WatchClusterNimbusPolicies(ctx context.Context, clusterNpChan chan string, deletedClusterNpChan chan *unstructured.Unstructured) { - clusterNpInformer := setupClusterNpInformer() + clusterNimbusPolicyInformer := clusterNpInformer() logger := log.FromContext(ctx) handlers := cache.ResourceEventHandlerFuncs{ @@ -52,16 +39,10 @@ func WatchClusterNimbusPolicies(ctx context.Context, clusterNpChan chan string, return } - oldSpec, errOld := oldU.Object["spec"].(map[string]interface{}) - newSpec, errNew := newU.Object["spec"].(map[string]interface{}) - - if errOld && errNew { - oldSpecBytes, _ := json.Marshal(oldSpec) - newSpecBytes, _ := json.Marshal(newSpec) - if bytes.Equal(oldSpecBytes, newSpecBytes) { - return - } + if oldU.GetGeneration() == newU.GetGeneration() { + return } + logger.Info("ClusterNimbusPolicy modified", "ClusterNimbusPolicy.Name", newU.GetName()) clusterNpChan <- newU.GetName() }, @@ -75,11 +56,11 @@ func WatchClusterNimbusPolicies(ctx context.Context, clusterNpChan chan string, deletedClusterNpChan <- u }, } - _, err := clusterNpInformer.AddEventHandler(handlers) + _, err := clusterNimbusPolicyInformer.AddEventHandler(handlers) if err != nil { logger.Error(err, "failed to add event handlers") return } logger.Info("ClusterNimbusPolicy watcher started") - clusterNpInformer.Run(ctx.Done()) + clusterNimbusPolicyInformer.Run(ctx.Done()) } diff --git a/pkg/adapter/watcher/nimbuspolicy_watcher.go b/pkg/adapter/watcher/nimbuspolicy.go similarity index 59% rename from pkg/adapter/watcher/nimbuspolicy_watcher.go rename to pkg/adapter/watcher/nimbuspolicy.go index 0104985d..9afd715b 100644 --- a/pkg/adapter/watcher/nimbuspolicy_watcher.go +++ b/pkg/adapter/watcher/nimbuspolicy.go @@ -4,44 +4,21 @@ package watcher import ( - "bytes" "context" - "encoding/json" - "time" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/dynamic/dynamicinformer" "k8s.io/client-go/tools/cache" "sigs.k8s.io/controller-runtime/pkg/log" "github.com/5GSEC/nimbus/pkg/adapter/common" - "github.com/5GSEC/nimbus/pkg/adapter/k8s" adapterutil "github.com/5GSEC/nimbus/pkg/adapter/util" ) -var factory dynamicinformer.DynamicSharedInformerFactory - -func init() { - k8sClient := k8s.NewDynamicClient() - factory = dynamicinformer.NewDynamicSharedInformerFactory(k8sClient, time.Minute) -} - -func npInformer() cache.SharedIndexInformer { - nimbusPolicyGvr := schema.GroupVersionResource{ - Group: "intent.security.nimbus.com", - Version: "v1alpha1", - Resource: "nimbuspolicies", - } - nimbusPolicyInformer := factory.ForResource(nimbusPolicyGvr).Informer() - return nimbusPolicyInformer -} - // WatchNimbusPolicies watches for create, update and delete events for // NimbusPolicies owned by SecurityIntentBinding and put their info on respective // channels. // ownerKind indicates which owners of the NimbusPolicy are fine -func WatchNimbusPolicies(ctx context.Context, npCh, deleteNpCh chan common.Request, ownerKind ...string) { +func WatchNimbusPolicies(ctx context.Context, npCh chan common.Request, deleteNpCh chan *unstructured.Unstructured, ownerKind ...string) { nimbusPolicyInformer := npInformer() logger := log.FromContext(ctx) @@ -68,16 +45,10 @@ func WatchNimbusPolicies(ctx context.Context, npCh, deleteNpCh chan common.Reque return } - oldSpec, errOld := oldU.Object["spec"].(map[string]interface{}) - newSpec, errNew := newU.Object["spec"].(map[string]interface{}) - - if errOld && errNew { - oldSpecBytes, _ := json.Marshal(oldSpec) - newSpecBytes, _ := json.Marshal(newSpec) - if bytes.Equal(oldSpecBytes, newSpecBytes) { - return - } + if oldU.GetGeneration() == newU.GetGeneration() { + return } + npNamespacedName := common.Request{ Name: newU.GetName(), Namespace: newU.GetNamespace(), @@ -86,17 +57,13 @@ func WatchNimbusPolicies(ctx context.Context, npCh, deleteNpCh chan common.Reque npCh <- npNamespacedName }, DeleteFunc: func(obj interface{}) { - u := obj.(*unstructured.Unstructured) - if adapterutil.IsOrphan(u.GetOwnerReferences(), ownerKind...) { - logger.V(4).Info("Ignoring orphan NimbusPolicy", "NimbusPolicy.Name", u.GetName(), "NimbusPolicy.Namespace", u.GetNamespace(), "Operation", "Delete") + deletedObj := obj.(*unstructured.Unstructured) + if adapterutil.IsOrphan(deletedObj.GetOwnerReferences(), ownerKind...) { + logger.V(4).Info("Ignoring orphan NimbusPolicy", "NimbusPolicy.Name", deletedObj.GetName(), "NimbusPolicy.Namespace", deletedObj.GetNamespace(), "Operation", "Delete") return } - npNamespacedName := common.Request{ - Name: u.GetName(), - Namespace: u.GetNamespace(), - } - logger.Info("NimbusPolicy deleted", "NimbusPolicy.Name", u.GetName(), "NimbusPolicy.Namespace", u.GetNamespace()) - deleteNpCh <- npNamespacedName + logger.Info("NimbusPolicy deleted", "NimbusPolicy.Name", deletedObj.GetName(), "NimbusPolicy.Namespace", deletedObj.GetNamespace()) + deleteNpCh <- deletedObj }, } _, err := nimbusPolicyInformer.AddEventHandler(handlers) diff --git a/pkg/adapter/watcher/watcher.go b/pkg/adapter/watcher/watcher.go new file mode 100644 index 00000000..c6170704 --- /dev/null +++ b/pkg/adapter/watcher/watcher.go @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2023 Authors of Nimbus + +package watcher + +import ( + "time" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic/dynamicinformer" + "k8s.io/client-go/tools/cache" + + "github.com/5GSEC/nimbus/pkg/adapter/k8s" +) + +var factory dynamicinformer.DynamicSharedInformerFactory + +func init() { + k8sClient := k8s.NewDynamicClient() + factory = dynamicinformer.NewDynamicSharedInformerFactory(k8sClient, time.Minute) +} + +func npInformer() cache.SharedIndexInformer { + nimbusPolicyGvr := schema.GroupVersionResource{ + Group: "intent.security.nimbus.com", + Version: "v1alpha1", + Resource: "nimbuspolicies", + } + nimbusPolicyInformer := factory.ForResource(nimbusPolicyGvr).Informer() + return nimbusPolicyInformer +} + +func clusterNpInformer() cache.SharedIndexInformer { + clusterNpGvr := schema.GroupVersionResource{ + Group: "intent.security.nimbus.com", + Version: "v1alpha1", + Resource: "clusternimbuspolicies", + } + clusterNimbusPolicyInformer := factory.ForResource(clusterNpGvr).Informer() + return clusterNimbusPolicyInformer +}