From 67712a9f02b4464c4d50e6f915c9323d30d1f879 Mon Sep 17 00:00:00 2001 From: Ved Ratan <82467006+VedRatan@users.noreply.github.com> Date: Thu, 7 Nov 2024 21:11:44 -0800 Subject: [PATCH] feat: virtual-patch intent (#245) * feat: virtual-patch initial commit Signed-off-by: VedRatan * feat: added kyverno, karmor, netpol policy creation, deletion, and updation logic Signed-off-by: VedRatan * feat: added support for network policy Signed-off-by: VedRatan * feat: added scheduled fetching of latest CVE data Signed-off-by: VedRatan * chore: resolved all the review comments Signed-off-by: VedRatan * (docs): added intent description (#265) * fix: Fix CRDs version in PROJECT file Signed-off-by: Anurag Rajawat * doc: Add Intent and CRDs spec docs Signed-off-by: Anurag Rajawat * feat: added intent description Signed-off-by: VedRatan * docs: added pkg-mgr-execution intent desc Signed-off-by: VedRatan * docs: added coco-workload intent details Signed-off-by: VedRatan * docs: update exploit-pfa Signed-off-by: VedRatan * update command Signed-off-by: VedRatan * doc: Update docs Signed-off-by: Anurag Rajawat * refactored the docs Signed-off-by: VedRatan * updated quick-tutorials Signed-off-by: VedRatan --------- Signed-off-by: Anurag Rajawat Signed-off-by: VedRatan Co-authored-by: Anurag Rajawat * chore: handled error gracefully, update slice search command Signed-off-by: VedRatan * fix: tests Signed-off-by: VedRatan * fix: error handling and review comments Signed-off-by: VedRatan --------- Signed-off-by: VedRatan Signed-off-by: Anurag Rajawat Signed-off-by: Ved Ratan <82467006+VedRatan@users.noreply.github.com> Co-authored-by: Anurag Rajawat --- docs/intents/escape-to-host.md | 2 +- .../escape-to-host-si-csib-with-params.yaml | 2 +- .../escape-to-host-with-params.yaml | 2 +- examples/namespaced/virtual-patch-si-sib.yaml | 33 ++ pkg/adapter/idpool/idpool.go | 2 + pkg/adapter/nimbus-kyverno/go.mod | 1 + pkg/adapter/nimbus-kyverno/go.sum | 3 + pkg/adapter/nimbus-kyverno/manager/manager.go | 4 + .../nimbus-kyverno/processor/kcpbuilder.go | 18 +- .../nimbus-kyverno/processor/kpbuilder.go | 471 +++++++++++++++--- pkg/adapter/nimbus-kyverno/utils/utils.go | 44 ++ .../nimbus-kyverno/watcher/kpwatcher.go | 3 +- virtual_patch_si.yaml | 13 + virtual_patch_sib.yaml | 11 + vp.json | 169 +++++++ 15 files changed, 708 insertions(+), 70 deletions(-) create mode 100644 examples/namespaced/virtual-patch-si-sib.yaml create mode 100644 virtual_patch_si.yaml create mode 100644 virtual_patch_sib.yaml create mode 100644 vp.json diff --git a/docs/intents/escape-to-host.md b/docs/intents/escape-to-host.md index 8a4f56df..c54821cf 100644 --- a/docs/intents/escape-to-host.md +++ b/docs/intents/escape-to-host.md @@ -30,7 +30,7 @@ The escapeToHost intent results in `KyvernoPolicy` and a couple of `KubearmorPol ``` params: - psa_level: ["restricted"] + psaLevel: ["restricted"] ``` - The `escapeToHost` intent and corresponding policy work together to establish a strong security posture for the application. By enforcing pod security standards, the policy reduces the risk of container escape, which is critical for maintaining the integrity of the host system. diff --git a/examples/clusterscoped/escape-to-host-si-csib-with-params.yaml b/examples/clusterscoped/escape-to-host-si-csib-with-params.yaml index 6d9b7f10..3e82d76c 100644 --- a/examples/clusterscoped/escape-to-host-si-csib-with-params.yaml +++ b/examples/clusterscoped/escape-to-host-si-csib-with-params.yaml @@ -11,7 +11,7 @@ spec: description: "A attacker can breach container boundaries and can gain access to the host machine" action: Block params: - psa_level: ["restricted"] + psaLevel: ["restricted"] --- apiVersion: intent.security.nimbus.com/v1alpha1 kind: ClusterSecurityIntentBinding diff --git a/examples/namespaced/escape-to-host-with-params.yaml b/examples/namespaced/escape-to-host-with-params.yaml index f6a09a0b..a43903c6 100644 --- a/examples/namespaced/escape-to-host-with-params.yaml +++ b/examples/namespaced/escape-to-host-with-params.yaml @@ -11,7 +11,7 @@ spec: description: "A attacker can breach container boundaries and can gain access to the host machine" action: Block params: - psa_level: ["restricted"] + psaLevel: ["restricted"] --- apiVersion: intent.security.nimbus.com/v1alpha1 kind: SecurityIntentBinding diff --git a/examples/namespaced/virtual-patch-si-sib.yaml b/examples/namespaced/virtual-patch-si-sib.yaml new file mode 100644 index 00000000..31504de8 --- /dev/null +++ b/examples/namespaced/virtual-patch-si-sib.yaml @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright 2023 Authors of Nimbus + +apiVersion: intent.security.nimbus.com/v1alpha1 +kind: SecurityIntent +metadata: + name: virtual-patch +spec: + intent: + id: virtualPatch + description: > + There might exist CVE's associated with certain images, adversaries might exploit these CVE and can cause potential threat, + to any production server. Check and apply virtual patch for a given set of CVEs as per a schedule + action: Block + params: + cveList: + - "CVE-2024-4439" + - "CVE-2024-27268" + schedule: ["0 23 * * SUN"] + +--- + +apiVersion: intent.security.nimbus.com/v1alpha1 +kind: SecurityIntentBinding +metadata: + name: virtual-patch-binding +spec: + intents: + - name: virtual-patch + selector: + workloadSelector: + matchLabels: + app: prod \ No newline at end of file diff --git a/pkg/adapter/idpool/idpool.go b/pkg/adapter/idpool/idpool.go index c5d4f939..414250db 100644 --- a/pkg/adapter/idpool/idpool.go +++ b/pkg/adapter/idpool/idpool.go @@ -19,6 +19,7 @@ const ( CocoWorkload = "cocoWorkload" AssessTLS = "assessTLS" DenyENAccess = "denyExternalNetworkAccess" + VirtualPatch = "virtualPatch" ) // KaIds are IDs supported by KubeArmor. @@ -45,6 +46,7 @@ var NetPolIDs = []string{ var KyvIds = []string{ EscapeToHost, CocoWorkload, + VirtualPatch, } // k8tlsIds are IDs supported by k8tls. diff --git a/pkg/adapter/nimbus-kyverno/go.mod b/pkg/adapter/nimbus-kyverno/go.mod index 8627bb36..790eca3a 100644 --- a/pkg/adapter/nimbus-kyverno/go.mod +++ b/pkg/adapter/nimbus-kyverno/go.mod @@ -202,6 +202,7 @@ require ( github.com/puzpuzpuz/xsync/v2 v2.5.1 // indirect github.com/r3labs/diff v1.1.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/robfig/cron/v3 v3.0.1 github.com/sagikazarmark/locafero v0.3.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sassoftware/relic v7.2.1+incompatible // indirect diff --git a/pkg/adapter/nimbus-kyverno/go.sum b/pkg/adapter/nimbus-kyverno/go.sum index 10acb72c..481c93fd 100644 --- a/pkg/adapter/nimbus-kyverno/go.sum +++ b/pkg/adapter/nimbus-kyverno/go.sum @@ -1225,6 +1225,9 @@ github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5X github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 h1:Qp27Idfgi6ACvFQat5+VJvlYToylpM/hcyLBI3WaKPA= github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052/go.mod h1:uvX/8buq8uVeiZiFht+0lqSLBHF+uGV8BrTv8W/SIwk= +github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= diff --git a/pkg/adapter/nimbus-kyverno/manager/manager.go b/pkg/adapter/nimbus-kyverno/manager/manager.go index 23e0167f..cbb5de08 100644 --- a/pkg/adapter/nimbus-kyverno/manager/manager.go +++ b/pkg/adapter/nimbus-kyverno/manager/manager.go @@ -59,6 +59,7 @@ func Run(ctx context.Context) { deletedKpCh := make(chan common.Request) go watcher.WatchKps(ctx, updatedKpCh, deletedKpCh) + for { select { case <-ctx.Done(): @@ -431,6 +432,9 @@ func createTriggerForKp(ctx context.Context, nameNamespace common.Request) { ObjectMeta: metav1.ObjectMeta{ Name: nameNamespace.Name + "-trigger-configmap", Namespace: nameNamespace.Namespace, + Labels: map[string]string { + "trigger" : "configmap", + }, }, Data: map[string]string{ "data": "dummy", diff --git a/pkg/adapter/nimbus-kyverno/processor/kcpbuilder.go b/pkg/adapter/nimbus-kyverno/processor/kcpbuilder.go index b13f8c37..7710c752 100644 --- a/pkg/adapter/nimbus-kyverno/processor/kcpbuilder.go +++ b/pkg/adapter/nimbus-kyverno/processor/kcpbuilder.go @@ -121,7 +121,7 @@ func clusterCocoRuntimeAddition(cnp *v1alpha1.ClusterNimbusPolicy, rule v1alpha1 } matchFilters = append(matchFilters, resourceFilter) } - } else if namespaces[0] == "*" && len(labels) == 0 { + } else if namespaces[0] == "*" && len(labels) == 0 { if len(excludeNamespaces) > 0 { resourceFilter = kyvernov1.ResourceFilter{ ResourceDescription: kyvernov1.ResourceDescription{ @@ -167,7 +167,7 @@ func clusterCocoRuntimeAddition(cnp *v1alpha1.ClusterNimbusPolicy, rule v1alpha1 }, Mutation: kyvernov1.Mutation{ Targets: []kyvernov1.TargetResourceSpec{ - kyvernov1.TargetResourceSpec{ + { ResourceSpec: kyvernov1.ResourceSpec{ APIVersion: "apps/v1", Kind: "Deployment", @@ -185,16 +185,16 @@ func clusterCocoRuntimeAddition(cnp *v1alpha1.ClusterNimbusPolicy, rule v1alpha1 } func clusterEscapeToHost(cnp *v1alpha1.ClusterNimbusPolicy, rule v1alpha1.Rule) kyvernov1.ClusterPolicy { - var psa_level api.Level = api.LevelBaseline + var psaLevel api.Level = api.LevelBaseline - if rule.Params["psa_level"] != nil { + if rule.Params["psaLevel"] != nil { - switch rule.Params["psa_level"][0] { + switch rule.Params["psaLevel"][0] { case "restricted": - psa_level = api.LevelRestricted + psaLevel = api.LevelRestricted default: - psa_level = api.LevelBaseline + psaLevel = api.LevelBaseline } } @@ -241,7 +241,7 @@ func clusterEscapeToHost(cnp *v1alpha1.ClusterNimbusPolicy, rule v1alpha1.Rule) } else if namespaces[0] == "*" && len(labels) > 0 { if len(excludeNamespaces) > 0 { resourceFilter = kyvernov1.ResourceFilter{ - ResourceDescription: kyvernov1.ResourceDescription { + ResourceDescription: kyvernov1.ResourceDescription{ Namespaces: excludeNamespaces, }, } @@ -296,7 +296,7 @@ func clusterEscapeToHost(cnp *v1alpha1.ClusterNimbusPolicy, rule v1alpha1.Rule) }, Validation: kyvernov1.Validation{ PodSecurity: &kyvernov1.PodSecurity{ - Level: psa_level, + Level: psaLevel, Version: "latest", }, }, diff --git a/pkg/adapter/nimbus-kyverno/processor/kpbuilder.go b/pkg/adapter/nimbus-kyverno/processor/kpbuilder.go index 8a559ad3..a21d9a7a 100644 --- a/pkg/adapter/nimbus-kyverno/processor/kpbuilder.go +++ b/pkg/adapter/nimbus-kyverno/processor/kpbuilder.go @@ -6,13 +6,18 @@ package processor import ( "context" "encoding/json" + "fmt" + "os" + "strconv" "strings" v1alpha1 "github.com/5GSEC/nimbus/api/v1alpha1" "github.com/5GSEC/nimbus/pkg/adapter/idpool" "github.com/5GSEC/nimbus/pkg/adapter/k8s" + "github.com/5GSEC/nimbus/pkg/adapter/nimbus-kyverno/utils" "github.com/go-logr/logr" kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1" + "github.com/robfig/cron/v3" "go.uber.org/multierr" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,25 +37,23 @@ func init() { func BuildKpsFrom(logger logr.Logger, np *v1alpha1.NimbusPolicy) []kyvernov1.Policy { // Build KPs based on given IDs var allkps []kyvernov1.Policy - admission := true background := true for _, nimbusRule := range np.Spec.NimbusRules { id := nimbusRule.ID if idpool.IsIdSupportedBy(id, "kyverno") { - kps, err := buildKpFor(id, np) + kps, err := buildKpFor(id, np, logger) if err != nil { logger.Error(err, "error while building kyverno policies") } for _, kp := range kps { - if id != "cocoWorkload" { + if id != "cocoWorkload" && id != "virtualPatch" { kp.Name = np.Name + "-" + strings.ToLower(id) } kp.Namespace = np.Namespace kp.Annotations = make(map[string]string) kp.Annotations["policies.kyverno.io/description"] = nimbusRule.Description - kp.Spec.Admission = &admission kp.Spec.Background = &background - + if nimbusRule.Rule.RuleAction == "Block" { kp.Spec.ValidationFailureAction = kyvernov1.ValidationFailureAction("Enforce") } else { @@ -68,21 +71,130 @@ func BuildKpsFrom(logger logr.Logger, np *v1alpha1.NimbusPolicy) []kyvernov1.Pol } // buildKpFor builds a KyvernoPolicy based on intent ID supported by Kyverno Policy Engine. -func buildKpFor(id string, np *v1alpha1.NimbusPolicy) ([]kyvernov1.Policy, error) { +func buildKpFor(id string, np *v1alpha1.NimbusPolicy, logger logr.Logger) ([]kyvernov1.Policy, error) { var kps []kyvernov1.Policy switch id { case idpool.EscapeToHost: - kps = append(kps, escapeToHost(np, np.Spec.NimbusRules[0].Rule)) + kps = append(kps, escapeToHost(np)) case idpool.CocoWorkload: kpols, err := cocoRuntimeAddition(np) if err != nil { return kps, err } kps = append(kps, kpols...) + case idpool.VirtualPatch: + kpols, err := virtualPatch(np, logger) + if err != nil { + return kps, err + } + kps = append(kps, kpols...) + watchCVES(np, logger) } return kps, nil } +func watchCVES(np *v1alpha1.NimbusPolicy, logger logr.Logger) { + rule := np.Spec.NimbusRules[0].Rule + schedule := "0 0 * * *" + if rule.Params["schedule"] != nil { + schedule = rule.Params["schedule"][0] + } + // Schedule the deletion of the Nimbus policy + c := cron.New() + _, err := c.AddFunc(schedule, func() { + logger.Info("Checking for CVE updates and updation of policies") + err := deleteNimbusPolicy(np, logger) + if err != nil { + logger.Error(err, "error while updating policies") + } + }) + if err != nil { + logger.Error(err, "error while adding the schedule to update policies") + os.Exit(1) + } + c.Start() + +} + +func deleteNimbusPolicy(np *v1alpha1.NimbusPolicy, logger logr.Logger) error { + nimbusPolicyGVR := schema.GroupVersionResource{Group: "intent.security.nimbus.com", Version: "v1alpha1", Resource: "nimbuspolicies"} + err := client.Resource(nimbusPolicyGVR).Namespace(np.Namespace).Delete(context.TODO(), np.Name, metav1.DeleteOptions{}) + if err != nil { + return fmt.Errorf("failed to delete Nimbus Policy: %s", err.Error()) + } + logger.Info("Nimbus policy deleted successfully") + return nil +} + +func escapeToHost(np *v1alpha1.NimbusPolicy) kyvernov1.Policy { + rule := np.Spec.NimbusRules[0].Rule + var psaLevel api.Level = api.LevelBaseline + var matchResourceFilters []kyvernov1.ResourceFilter + + if rule.Params["psaLevel"] != nil { + + switch rule.Params["psaLevel"][0] { + case "restricted": + psaLevel = api.LevelRestricted + + default: + psaLevel = api.LevelBaseline + } + } + + labels := np.Spec.Selector.MatchLabels + + if len(labels) > 0 { + for key, value := range labels { + resourceFilter := kyvernov1.ResourceFilter{ + ResourceDescription: kyvernov1.ResourceDescription{ + Kinds: []string{ + "v1/Pod", + }, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + key: value, + }, + }, + }, + } + matchResourceFilters = append(matchResourceFilters, resourceFilter) + } + } else { + resourceFilter := kyvernov1.ResourceFilter{ + ResourceDescription: kyvernov1.ResourceDescription{ + Kinds: []string{ + "v1/Pod", + }, + }, + } + matchResourceFilters = append(matchResourceFilters, resourceFilter) + } + + background := true + kp := kyvernov1.Policy{ + Spec: kyvernov1.Spec{ + Background: &background, + Rules: []kyvernov1.Rule{ + { + Name: "pod-security-standard", + MatchResources: kyvernov1.MatchResources{ + Any: matchResourceFilters, + }, + Validation: kyvernov1.Validation{ + PodSecurity: &kyvernov1.PodSecurity{ + Level: psaLevel, + Version: "latest", + }, + }, + }, + }, + }, + } + + return kp +} + func cocoRuntimeAddition(np *v1alpha1.NimbusPolicy) ([]kyvernov1.Policy, error) { var kps []kyvernov1.Policy var errs []error @@ -93,7 +205,7 @@ func cocoRuntimeAddition(np *v1alpha1.NimbusPolicy) ([]kyvernov1.Policy, error) runtimeClass := "kata-clh" params := np.Spec.NimbusRules[0].Rule.Params["runtimeClass"] if params != nil { - runtimeClass = params[0] + runtimeClass = params[0] } patchStrategicMerge := map[string]interface{}{ "spec": map[string]interface{}{ @@ -239,75 +351,322 @@ func cocoRuntimeAddition(np *v1alpha1.NimbusPolicy) ([]kyvernov1.Policy, error) return kps, multierr.Combine(errs...) } -func escapeToHost(np *v1alpha1.NimbusPolicy, rule v1alpha1.Rule) kyvernov1.Policy { - - var psa_level api.Level = api.LevelBaseline - var matchResourceFilters []kyvernov1.ResourceFilter - - if rule.Params["psa_level"] != nil { +func virtualPatch(np *v1alpha1.NimbusPolicy, logger logr.Logger) ([]kyvernov1.Policy, error) { + rule := np.Spec.NimbusRules[0].Rule + requiredCVES := rule.Params["cveList"] + var kps []kyvernov1.Policy + resp, err := utils.FetchVirtualPatchData[[]map[string]any]() + if err != nil { + return kps, err + } + for _, currObj := range resp { + image := currObj["image"].(string) + cves := currObj["cves"].([]any) + for _, obj := range cves { + cveData := obj.(map[string]any) + cve := cveData["cve"].(string) + if utils.Contains(requiredCVES, cve) { + // create generate kyverno policies which will generate the native virtual patch policies based on the CVE's + karmorPolCount := 1 + kyvPolCount := 1 + netPolCount := 1 + virtualPatch := cveData["virtual_patch"].([]any) + for _, policy := range virtualPatch { + pol := policy.(map[string]any) + policyData, ok := pol["karmor"].(map[string]any) + if ok { + karmorPol, err := generatePol("karmor", cve, image, np, policyData, karmorPolCount, logger) + if err != nil { + logger.V(2).Error(err, "Error while generating karmor policy") + } else { + kps = append(kps, karmorPol) + karmorPolCount += 1 + } - switch rule.Params["psa_level"][0] { - case "restricted": - psa_level = api.LevelRestricted + } + policyData, ok = pol["kyverno"].(map[string]any) + if ok { + kyvernoPol, err := generatePol("kyverno", cve, image, np, policyData, kyvPolCount, logger) + if err != nil { + logger.V(2).Error(err, "Error while generating kyverno policy") + } else { + kps = append(kps, kyvernoPol) + kyvPolCount += 1 + } + } - default: - psa_level = api.LevelBaseline + policyData, ok = pol["netpol"].(map[string]any) + if ok { + netPol, err := generatePol("netpol", cve, image, np, policyData, netPolCount, logger) + if err != nil { + logger.V(2).Error(err, "Error while generating network policy") + } else { + kps = append(kps, netPol) + netPolCount += 1 + } + } + } + } } } + return kps, nil +} + +func addManagedByAnnotation(kp *kyvernov1.Policy) { + kp.Annotations["app.kubernetes.io/managed-by"] = "nimbus-kyverno" +} +func generatePol(polengine string, cve string, image string, np *v1alpha1.NimbusPolicy, policyData map[string]any, count int, logger logr.Logger) (kyvernov1.Policy, error) { + var pol kyvernov1.Policy labels := np.Spec.Selector.MatchLabels + cve = strings.ToLower(cve) + uid := np.ObjectMeta.GetUID() + ownerShipList := []any{ + map[string]any{ + "apiVersion": "intent.security.nimbus.com/v1alpha1", + "blockOwnerDeletion": true, + "controller": true, + "kind": "NimbusPolicy", + "name": np.GetName(), + "uid": uid, + }, + } - if len(labels) > 0 { + preConditionMap := map[string]any{ + "all": []any{ + map[string]any{ + "key": image, + "operator": "AnyIn", + "value": "{{ request.object.spec.containers[].image }}", + }, + }, + } + preconditionBytes, _ := json.Marshal(preConditionMap) + + getPodName := kyvernov1.ContextEntry{ + Name: "podName", + Variable: &kyvernov1.Variable{ + JMESPath: "request.object.metadata.name", + }, + } + + metadataMap := policyData["metadata"].(map[string]any) + + // set OwnerShipRef for the generatedPol + + metadataMap["ownerReferences"] = ownerShipList + + specMap := policyData["spec"].(map[string]any) + + jmesPathContainerNameQuery := "request.object.spec.containers[?(@.image=='" + image + "')].name | [0]" + + delete(policyData, "apiVersion") + delete(policyData, "kind") + + generatorPolicyName := np.Name + "-" + cve + "-" + polengine + "-" + strconv.Itoa(count) + + // kubearmor policy generation + + if polengine == "karmor" { + generatedPolicyName := metadataMap["name"].(string) + "-{{ podName }}" + selector := specMap["selector"].(map[string]any) + delete(selector, "matchLabels") + selectorLabels := make(map[string]any) for key, value := range labels { - resourceFilter := kyvernov1.ResourceFilter { - ResourceDescription: kyvernov1.ResourceDescription{ - Kinds: []string{ - "v1/Pod", - }, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - key: value, + selectorLabels[key] = value + } + selectorLabels["kubearmor.io/container.name"] = "{{ containerName }}" + selector["matchLabels"] = selectorLabels + + policyBytes, err := json.Marshal(policyData) + if err != nil { + return pol, err + } + pol = kyvernov1.Policy{ + ObjectMeta: metav1.ObjectMeta{ + Name: generatorPolicyName, + }, + Spec: kyvernov1.Spec{ + GenerateExisting: true, + Rules: []kyvernov1.Rule{ + { + Name: cve + "virtual-patch-karmor", + MatchResources: kyvernov1.MatchResources{ + Any: kyvernov1.ResourceFilters{ + { + ResourceDescription: kyvernov1.ResourceDescription{ + Kinds: []string{ + "v1/Pod", + }, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + }, + }, + }, + }, + RawAnyAllConditions: &v1.JSON{Raw: preconditionBytes}, + Context: []kyvernov1.ContextEntry{ + { + Name: "containerName", + Variable: &kyvernov1.Variable{ + JMESPath: jmesPathContainerNameQuery, + }, + }, + getPodName, + }, + Generation: kyvernov1.Generation{ + ResourceSpec: kyvernov1.ResourceSpec{ + APIVersion: "security.kubearmor.com/v1", + Kind: "KubeArmorPolicy", + Name: generatedPolicyName, + Namespace: np.GetNamespace(), + }, + RawData: &v1.JSON{Raw: policyBytes}, }, }, }, - } - matchResourceFilters = append(matchResourceFilters, resourceFilter) + }, } - } else { - resourceFilter := kyvernov1.ResourceFilter{ - ResourceDescription: kyvernov1.ResourceDescription{ - Kinds: []string{ - "v1/Pod", + } + + // kyverno policy generation + + if polengine == "kyverno" { + + generatedPolicyName := metadataMap["name"].(string) + selectorMap := map[string]any{ + "matchLabels": labels, + } + + kindMap := map[string]any{ + "kinds": []any{ + "Pod", + }, + "selector": selectorMap, + } + + newMatchMap := map[string]any{ + "any": []any{ + map[string]any{ + "resources": kindMap, }, }, } - matchResourceFilters = append(matchResourceFilters, resourceFilter) - } + rulesMap := specMap["rules"].([]any) + rule := rulesMap[0].(map[string]any) - background := true - kp := kyvernov1.Policy{ - Spec: kyvernov1.Spec{ - Background: &background, - Rules: []kyvernov1.Rule{ - { - Name: "pod-security-standard", - MatchResources: kyvernov1.MatchResources{ - Any: matchResourceFilters, - }, - Validation: kyvernov1.Validation{ - PodSecurity: &kyvernov1.PodSecurity{ - Level: psa_level, - Version: "latest", + // adding resources as Pod and ommitting all the incoming resource types + delete(rule, "match") + rule["match"] = newMatchMap + + // appending the image matching precondition to the existing preconditions + preCndMap := rule["preconditions"].(map[string]any) + conditionsList, ok := preCndMap["any"].([]any) + if ok { + preConditionMap["all"] = append(preConditionMap["all"].([]any), conditionsList...) + } + + delete(rule, "preconditions") + + rule["preconditions"] = preConditionMap + + policyBytes, err := json.Marshal(policyData) + if err != nil { + return pol, err + } + + pol = kyvernov1.Policy{ + ObjectMeta: metav1.ObjectMeta{ + Name: generatorPolicyName, + }, + Spec: kyvernov1.Spec{ + GenerateExisting: true, + Rules: []kyvernov1.Rule{ + { + Name: cve + "-virtual-patch-kyverno", + MatchResources: kyvernov1.MatchResources{ + Any: kyvernov1.ResourceFilters{ + { + ResourceDescription: kyvernov1.ResourceDescription{ + Kinds: []string{ + "v1/Pod", + }, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + }, + }, + }, + }, + Generation: kyvernov1.Generation{ + ResourceSpec: kyvernov1.ResourceSpec{ + APIVersion: "kyverno.io/v1", + Kind: "Policy", + Name: generatedPolicyName, + Namespace: np.GetNamespace(), + }, + RawData: &v1.JSON{Raw: policyBytes}, }, }, }, }, - }, + } } - return kp -} + // network policy generation -func addManagedByAnnotation(kp *kyvernov1.Policy) { - kp.Annotations["app.kubernetes.io/managed-by"] = "nimbus-kyverno" + if polengine == "netpol" { + generatedPolicyName := metadataMap["name"].(string) + selector := specMap["podSelector"].(map[string]any) + delete(selector, "matchLabels") + selector["matchLabels"] = labels + + policyBytes, err := json.Marshal(policyData) + + if err != nil { + return pol, err + } + pol = kyvernov1.Policy{ + ObjectMeta: metav1.ObjectMeta{ + Name: generatorPolicyName, + }, + Spec: kyvernov1.Spec{ + GenerateExisting: true, + Rules: []kyvernov1.Rule{ + { + Name: cve + "virtual-patch-netpol", + MatchResources: kyvernov1.MatchResources{ + Any: kyvernov1.ResourceFilters{ + { + ResourceDescription: kyvernov1.ResourceDescription{ + Kinds: []string{ + "v1/Pod", + }, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + }, + }, + }, + }, + RawAnyAllConditions: &v1.JSON{Raw: preconditionBytes}, + Context: []kyvernov1.ContextEntry{ + getPodName, + }, + Generation: kyvernov1.Generation{ + ResourceSpec: kyvernov1.ResourceSpec{ + APIVersion: "networking.k8s.io/v1", + Kind: "NetworkPolicy", + Name: generatedPolicyName, + Namespace: np.GetNamespace(), + }, + RawData: &v1.JSON{Raw: policyBytes}, + }, + }, + }, + }, + } + } + return pol, nil } diff --git a/pkg/adapter/nimbus-kyverno/utils/utils.go b/pkg/adapter/nimbus-kyverno/utils/utils.go index 73ac5bb6..c619658d 100644 --- a/pkg/adapter/nimbus-kyverno/utils/utils.go +++ b/pkg/adapter/nimbus-kyverno/utils/utils.go @@ -4,8 +4,11 @@ package utils import ( + "encoding/json" "fmt" + "os" "reflect" + "slices" "strings" kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1" @@ -14,6 +17,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +var VirtualPatchData []map[string]any + func GetGVK(kind string) string { // Map to store the mappings of kinds to their corresponding API versions kindToAPIVersion := map[string]string{ @@ -122,3 +127,42 @@ func Title(input string) string { return toTitle.String(input) } + +func FetchVirtualPatchData[T any]()(T, error) { + var out T + // Open the JSON file + file, err := os.Open("../../../vp.json") + if err != nil { + return out, err + } + defer file.Close() + + // Read the file contents + bytes, err := os.ReadFile("../../../vp.json") + if err != nil { + return out, err + } + + err = json.Unmarshal(bytes, &out) + if err != nil { + return out, err + } + + return out, nil +} + +func Contains(slice []string, value string) bool { + return slices.Contains(slice, value) +} + +func ParseImageString(imageString string) (string, string) { + parts := strings.SplitN(imageString, ":", 2) + repository := parts[0] + tag := "latest" // Default tag + + if len(parts) > 1 { + tag = parts[1] + } + + return repository, tag +} diff --git a/pkg/adapter/nimbus-kyverno/watcher/kpwatcher.go b/pkg/adapter/nimbus-kyverno/watcher/kpwatcher.go index 9ddc0522..9bfc7e0b 100644 --- a/pkg/adapter/nimbus-kyverno/watcher/kpwatcher.go +++ b/pkg/adapter/nimbus-kyverno/watcher/kpwatcher.go @@ -11,6 +11,7 @@ import ( "github.com/5GSEC/nimbus/pkg/adapter/common" "github.com/5GSEC/nimbus/pkg/adapter/k8s" "github.com/5GSEC/nimbus/pkg/adapter/nimbus-kyverno/utils" + adapterutil "github.com/5GSEC/nimbus/pkg/adapter/util" kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -122,5 +123,3 @@ func WatchKps(ctx context.Context, updatedKpCh, deletedKpCh chan common.Request) logger.Info("KyvernoPolicy watcher started") informer.Run(ctx.Done()) } - - diff --git a/virtual_patch_si.yaml b/virtual_patch_si.yaml new file mode 100644 index 00000000..fdfccb78 --- /dev/null +++ b/virtual_patch_si.yaml @@ -0,0 +1,13 @@ +apiVersion: intent.security.nimbus.com/v1alpha1 +kind: SecurityIntent +metadata: + name: virtual-patch +spec: + intent: + id: virtualPatch + description: "Check and apply virtual patch for a given set of CVEs as per a schedule" + action: Block + params: + cve_list: + - "CVE-2024-4439" + - "CVE-2024-27268" \ No newline at end of file diff --git a/virtual_patch_sib.yaml b/virtual_patch_sib.yaml new file mode 100644 index 00000000..4596f3a1 --- /dev/null +++ b/virtual_patch_sib.yaml @@ -0,0 +1,11 @@ +apiVersion: intent.security.nimbus.com/v1alpha1 +kind: SecurityIntentBinding +metadata: + name: virtual-patch-binding +spec: + intents: + - name: virtual-patch + selector: + workloadSelector: + matchLabels: + app: prod \ No newline at end of file diff --git a/vp.json b/vp.json new file mode 100644 index 00000000..1b44e807 --- /dev/null +++ b/vp.json @@ -0,0 +1,169 @@ +[ + { + "image": "nginx:latest", + "cves": [ + { + "cve": "CVE-2024-4439", + "virtual_patch": [ + { + "karmor": { + "apiVersion": "security.kubearmor.com/v1", + "kind": "KubeArmorPolicy", + "metadata": { + "name": "block-pkg-mgmt-tools-exec" + }, + "spec": { + "selector": { + "matchLabels": { + "app": "nginx" + } + }, + "process": { + "matchPaths": [ + { + "path": "/usr/bin/apt" + }, + { + "path": "/usr/bin/apt-get" + } + ] + }, + "action": "Block" + } + } + }, + { + "kyverno": { + "apiVersion": "kyverno.io/v1", + "kind": "ClusterPolicy", + "name": "CVE_NUMBER-Virtual-Patch-Kyverno", + "metadata": { + "name": "disallow-latest-tag" + }, + "spec": { + "validationFailureAction": "Enforce", + "background": true, + "rules": [ + { + "name": "validate-image-tag", + "match": { + "any": [ + { + "resources": { + "kinds": [ + "Pod" + ], + "selector": { + "matchLabels": { + "app": "test" + } + } + } + + } + ] + }, + "preconditions": { + "all": [ + { + "key": "busybox", + "operator": "AnyIn", + "value": "{{ images.containers.*.name }}" + } + ] + }, + "validate": { + "message": "Using a mutable image tag e.g. 'latest' is not allowed.", + "pattern": { + "spec": { + "containers": [ + { + "image": "!*:latest" + } + ] + } + } + } + } + ] + } + } + }, + { + "netpol": { + "apiVersion": "networking.k8s.io/v1", + "kind": "NetworkPolicy", + "metadata": { + "name": "test-network-policy" + }, + "spec": { + "podSelector": { + "matchLabels": { + "role": "db", + "app": "dsfsdf" + } + }, + "policyTypes": [ + "Ingress", + "Egress" + ], + "ingress": [ + { + "from": [ + { + "ipBlock": { + "cidr": "172.17.0.0/16", + "except": [ + "172.17.1.0/24" + ] + } + }, + { + "namespaceSelector": { + "matchLabels": { + "project": "myproject" + } + } + }, + { + "podSelector": { + "matchLabels": { + "role": "frontend" + } + } + } + ], + "ports": [ + { + "protocol": "TCP", + "port": 6379 + } + ] + } + ], + "egress": [ + { + "to": [ + { + "ipBlock": { + "cidr": "10.0.0.0/24" + } + } + ], + "ports": [ + { + "protocol": "TCP", + "port": 5978 + } + ] + } + ] + } + } + } + ] + } + ] +} +] +