diff --git a/.github/workflows/component-tests.yaml b/.github/workflows/component-tests.yaml index e0947817..2c741b92 100644 --- a/.github/workflows/component-tests.yaml +++ b/.github/workflows/component-tests.yaml @@ -43,14 +43,15 @@ jobs: Test_01_BasicAlertTest, Test_02_AllAlertsFromMaliciousApp, Test_03_BasicLoadActivities, - Test_04_MemoryLeak, + # Test_04_MemoryLeak, Test_05_MemoryLeak_10K_Alerts, Test_06_KillProcessInTheMiddle, Test_07_RuleBindingApplyTest, Test_08_ApplicationProfilePatching, Test_10_MalwareDetectionTest, - # Test_10_DemoTest - # Test_11_DuplicationTest + Test_11_EndpointTest, + Test_12_MergingProfilesTest, + Test_13_MergingNetworkNeighborhoodTest, ] steps: - name: Checkout code @@ -96,7 +97,12 @@ jobs: - name: Run test run: | cd tests && go test -v ./... -run ${{ matrix.test }} --timeout=20m --tags=component - # - name: Upload plot images + - name: Print storage logs + if: always() + run: | + kubectl logs $(kubectl get pods -n kubescape -o name | grep storage) -n kubescape + + # - name: Upload plot images # if: always() # uses: actions/upload-artifact@v2 # with: diff --git a/Makefile b/Makefile index cdfc6ddd..ce380010 100644 --- a/Makefile +++ b/Makefile @@ -9,7 +9,7 @@ binary: CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o $(BINARY_NAME) docker-build: - docker buildx build --platform linux/amd64 -t $(IMAGE):$(TAG) -f $(DOCKERFILE_PATH) . + docker buildx build --platform linux/amd64 -t $(IMAGE):$(TAG) -f $(DOCKERFILE_PATH) --load . docker-push: docker push $(IMAGE):$(TAG) diff --git a/README.md b/README.md index 91ed08f7..1028e706 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,16 @@ # NodeAgent +[![Version](https://img.shields.io/github/v/release/kubescape/node-agent)](https://github.com/kubescape/node-agent/releases) [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/kubescape/node-agent/badge)](https://securityscorecards.dev/viewer/?uri=github.com/kubescape/node-agent) [![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fkubescape%2Fsniffer.svg?type=shield&issueType=license)](https://app.fossa.com/projects/git%2Bgithub.com%2Fkubescape%2Fsniffer?ref=badge_shield&issueType=license) +[![Stars](https://img.shields.io/github/stars/kubescape/node-agent?style=social)](https://github.com/kubescape/node-agent/stargazers) -## Prerequisites -1. [Minikube](https://minikube.sigs.k8s.io/docs/start/) -Start minikube with the following command: -``` -minikube start -``` -2. Linux kernel version 5.4 and above. +NodeAgent is a component of Kubescape that enables node-level security scanning and monitoring. +It uses eBPF technology to monitor the system and provides real-time security insights. +## Running Node Agent in Kubernetes +This is the recommended way to run the Node Agent. +You can run the Node Agent in a Kubernetes cluster as part of Kubescape by using helm. +Please refer to the [docs](https://kubescape.io/docs/) for more information. ## Running the Node Agent Make sure to set the `NODE_NAME` environment variable to the name of the node you want to scan. @@ -23,13 +24,9 @@ Then run the binarty with root privileges: sudo ./node-agent ``` -## Running Node Agent in Kubernetes -You can run the Node Agent in a Kubernetes cluster as part of Kubescape by using helm. -Please refer to the [docs](https://kubescape.io/docs/) for more information. - -## Limitations: -1. This feature is using eBPF technology that is implemented only on linux. -2. the linux kernel version that supported it 5.4 and above. +## System Requirements +1. The node agent uses eBPF, so make sure your system supports it. +2. It uses `CO-RE`, so make sure your kernel version is 5.4 or higher. ## Debugging diff --git a/clamav/init.sh b/clamav/init.sh index 3559497a..960645ef 100755 --- a/clamav/init.sh +++ b/clamav/init.sh @@ -63,7 +63,8 @@ else if [ -S "/tmp/clamd.sock" ]; then unlink "/tmp/clamd.sock" fi - clamd --foreground & + # Run clamd in the foreground but redirecting output to stdout and stderr to /dev/null + clamd --foreground > /dev/null 2>&1 & while [ ! -S "/run/clamav/clamd.sock" ] && [ ! -S "/tmp/clamd.sock" ]; do if [ "${_timeout:=0}" -gt "${CLAMD_STARTUP_TIMEOUT:=1800}" ]; then echo diff --git a/configuration/config.json b/configuration/config.json index 10bb5898..42b4f730 100644 --- a/configuration/config.json +++ b/configuration/config.json @@ -10,6 +10,7 @@ "prometheusExporterEnabled": "true", "runtimeDetectionEnabled": "true", "nodeProfileServiceEnabled": "true", + "httpDetectionEnabled": "true", "nodeProfileInterval": "1m", "seccompServiceEnabled": "true", "exporters": { diff --git a/demo/README.md b/demo/README.md index b070c63e..be1c9c69 100644 --- a/demo/README.md +++ b/demo/README.md @@ -4,7 +4,8 @@ This is a walkthrough of Node Agent Runtime Detection & Response capability, in 2. Deploy a sample web application and attack it. 3. Deploy fileless malware. 4. Deploy a container with malicious image that contains malwares. -5. See how Node Agent detects the attacks. +5. Deploy an xmrig container to mine cryptocurrency. +6. See how Node Agent detects the attacks. With this demo you will be able to see how Node Agent works and how it can be used to detect and prevent attacks. To learn more about Node Agent, see [here](https://kubescape.io/docs/). @@ -177,6 +178,12 @@ ClamAV is an open source antivirus engine for detecting trojans, viruses, malwar Please note that Node Agent doesn't scan the images by default, you need to enable it by setting `capabilities.malwareDetection=enable` in the helm chart. See [here](https://kubescape.io/docs/) for more information. +## Attack Cryptocurrency Mining +Let's deploy an xmrig container to mine cryptocurrency and see how Node Agent detects it. +```bash +kubectl apply -f demo/miner/miner-pod.yaml +``` +You can see in the logs of the node-agent that it detected the xmrig container and raised an alert. ## Conclusion In this demo we saw how Node Agent can be used to detect and prevent attacks in Kubernetes. diff --git a/demo/miner/miner-pod.yaml b/demo/miner/miner-pod.yaml index e13f0367..57d46c03 100644 --- a/demo/miner/miner-pod.yaml +++ b/demo/miner/miner-pod.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: k8s-miner-deployment - namespace: kubescape + namespace: default spec: replicas: 1 selector: @@ -18,8 +18,4 @@ spec: containers: - name: k8s-miner image: docker.io/amitschendel/crypto-miner-1 - imagePullPolicy: IfNotPresent - resources: - requests: - memory: "3Gi" - cpu: "3" \ No newline at end of file + imagePullPolicy: Always diff --git a/go.mod b/go.mod index b467d2c3..053666f2 100644 --- a/go.mod +++ b/go.mod @@ -1,14 +1,14 @@ module github.com/kubescape/node-agent -go 1.22.5 +go 1.23.0 require ( - github.com/armosec/armoapi-go v0.0.425 - github.com/armosec/utils-k8s-go v0.0.26 + github.com/armosec/armoapi-go v0.0.470 + github.com/armosec/utils-k8s-go v0.0.30 github.com/cenkalti/backoff/v4 v4.3.0 - github.com/cilium/ebpf v0.15.0 + github.com/cilium/ebpf v0.16.0 github.com/crewjam/rfc5424 v0.1.0 - github.com/cyphar/filepath-securejoin v0.2.5 + github.com/cyphar/filepath-securejoin v0.3.3 github.com/deckarep/golang-set/v2 v2.6.0 github.com/dustin/go-humanize v1.0.1 github.com/dutchcoders/go-clamd v0.0.0-20170520113014-b970184f4d9e @@ -17,33 +17,34 @@ require ( github.com/go-openapi/strfmt v0.23.0 github.com/google/uuid v1.6.0 github.com/goradd/maps v0.1.5 - github.com/inspektor-gadget/inspektor-gadget v0.30.0 + github.com/hashicorp/golang-lru/v2 v2.0.7 + github.com/inspektor-gadget/inspektor-gadget v0.33.0 github.com/kinbiko/jsonassert v1.1.1 github.com/kubescape/backend v0.0.20 - github.com/kubescape/go-logger v0.0.22 + github.com/kubescape/go-logger v0.0.23 github.com/kubescape/k8s-interface v0.0.170 - github.com/kubescape/storage v0.0.89 + github.com/kubescape/storage v0.0.119 github.com/panjf2000/ants/v2 v2.9.1 github.com/prometheus/alertmanager v0.27.0 - github.com/prometheus/client_golang v1.19.1 - github.com/prometheus/procfs v0.15.0 + github.com/prometheus/client_golang v1.20.4 + github.com/prometheus/procfs v0.15.1 github.com/sirupsen/logrus v1.9.3 github.com/spf13/afero v1.11.0 github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.9.0 - go.opentelemetry.io/otel v1.27.0 - go.opentelemetry.io/otel/trace v1.27.0 + go.opentelemetry.io/otel v1.30.0 + go.opentelemetry.io/otel/trace v1.30.0 go.uber.org/multierr v1.11.0 - golang.org/x/net v0.26.0 - golang.org/x/sys v0.21.0 + golang.org/x/net v0.29.0 + golang.org/x/sys v0.25.0 gonum.org/v1/plot v0.14.0 gopkg.in/mcuadros/go-syslog.v2 v2.3.0 istio.io/pkg v0.0.0-20231221211216-7635388a563e - k8s.io/api v0.30.2 - k8s.io/apimachinery v0.30.2 - k8s.io/client-go v0.30.2 - k8s.io/kubectl v0.29.0 - k8s.io/utils v0.0.0-20240310230437-4693a0247e57 + k8s.io/api v0.31.1 + k8s.io/apimachinery v0.31.1 + k8s.io/client-go v0.31.1 + k8s.io/kubectl v0.31.0 + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 sigs.k8s.io/yaml v1.4.0 ) @@ -53,13 +54,13 @@ require ( github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/Microsoft/hcsshim v0.12.3 // indirect + github.com/Microsoft/hcsshim v0.12.5 // indirect github.com/acobaugh/osrelease v0.1.0 // indirect github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b // indirect github.com/anchore/go-logger v0.0.0-20240217160628-ee28a485904f // indirect - github.com/anchore/packageurl-go v0.1.1-0.20240312213626-055233e539b4 // indirect - github.com/anchore/stereoscope v0.0.3-0.20240501181043-2e9894674185 // indirect - github.com/anchore/syft v1.4.1 // indirect + github.com/anchore/packageurl-go v0.1.1-0.20240507183024-848e011fc24f // indirect + github.com/anchore/stereoscope v0.0.3 // indirect + github.com/anchore/syft v1.13.0 // indirect github.com/armosec/gojay v1.2.17 // indirect github.com/armosec/utils-go v0.0.57 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect @@ -67,42 +68,46 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect - github.com/briandowns/spinner v1.23.0 // indirect + github.com/briandowns/spinner v1.23.1 // indirect github.com/campoy/embedmd v1.0.0 // indirect github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cilium/cilium v1.16.1 // indirect github.com/containerd/cgroups/v3 v3.0.3 // indirect - github.com/containerd/containerd v1.7.18 // indirect + github.com/containerd/containerd v1.7.22 // indirect + github.com/containerd/containerd/api v1.7.19 // indirect github.com/containerd/continuity v0.4.3 // indirect github.com/containerd/errdefs v0.1.0 // indirect github.com/containerd/fifo v1.1.0 // indirect github.com/containerd/log v0.1.0 // indirect - github.com/containerd/ttrpc v1.2.4 // indirect + github.com/containerd/platforms v0.2.1 // indirect + github.com/containerd/ttrpc v1.2.5 // indirect github.com/containerd/typeurl/v2 v2.1.1 // indirect - github.com/containers/common v0.59.1 // indirect + github.com/containers/common v0.60.4 // indirect github.com/coreos/go-oidc v2.2.1+incompatible // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/cli v27.0.2+incompatible // indirect - github.com/docker/docker v27.1.1+incompatible // indirect + github.com/docker/cli v27.3.1+incompatible // indirect + github.com/docker/docker v27.3.1+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/emicklei/go-restful/v3 v3.12.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/facebookincubator/nvdtools v0.1.5 // indirect github.com/fatih/color v1.17.0 // indirect github.com/felixge/fgprof v0.9.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/francoispqt/gojay v1.2.13 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/gabriel-vasile/mimetype v1.4.3 // indirect - github.com/github/go-spdx/v2 v2.2.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.5 // indirect + github.com/github/go-spdx/v2 v2.3.2 // indirect github.com/go-errors/errors v1.5.1 // indirect github.com/go-fonts/liberation v0.3.2 // indirect github.com/go-latex/latex v0.0.0-20231108140139-5c1ce85aa4ea // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.23.0 // indirect github.com/go-openapi/errors v0.22.0 // indirect @@ -122,13 +127,13 @@ require ( github.com/google/btree v1.1.2 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/go-cmp v0.6.0 // indirect - github.com/google/go-containerregistry v0.19.1 // indirect + github.com/google/go-containerregistry v0.20.2 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/gopacket v1.1.19 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/gopacket/gopacket v1.2.0 // indirect github.com/gorilla/websocket v1.5.1 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/hcl v1.0.1-vault-5 // indirect @@ -137,8 +142,9 @@ require ( github.com/jinzhu/copier v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.8 // indirect + github.com/klauspost/compress v1.17.9 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/mackerelio/go-osstat v0.2.5 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -148,12 +154,13 @@ require ( github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/locker v1.0.1 // indirect - github.com/moby/moby v27.1.2+incompatible // indirect - github.com/moby/spdystream v0.2.0 // indirect - github.com/moby/sys/mountinfo v0.7.1 // indirect + github.com/moby/moby v27.3.1+incompatible // indirect + github.com/moby/spdystream v0.4.0 // indirect + github.com/moby/sys/mountinfo v0.7.2 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/signal v0.7.0 // indirect - github.com/moby/sys/user v0.1.0 // indirect + github.com/moby/sys/user v0.3.0 // indirect + github.com/moby/sys/userns v0.1.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -166,65 +173,71 @@ require ( github.com/opencontainers/image-spec v1.1.0 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opencontainers/selinux v1.11.0 // indirect - github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pquerna/cachecontrol v0.2.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.53.0 // indirect + github.com/prometheus/common v0.59.1 // indirect github.com/s3rj1k/go-fanotify/fanotify v0.0.0-20240229202106-bca3154da60a // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sasha-s/go-deadlock v0.3.1 // indirect github.com/scylladb/go-set v1.0.3-0.20200225121959-cc7b2070d91e // indirect github.com/seccomp/libseccomp-golang v0.10.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spf13/cast v1.6.0 // indirect + github.com/spf13/cast v1.7.0 // indirect github.com/spf13/cobra v1.8.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace // indirect github.com/stripe/stripe-go/v74 v74.30.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect - github.com/sylabs/squashfs v0.6.1 // indirect + github.com/sylabs/squashfs v1.0.0 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/therootcompany/xz v1.0.1 // indirect github.com/ulikunitz/xz v0.5.12 // indirect - github.com/uptrace/opentelemetry-go-extra/otelutil v0.2.4 // indirect - github.com/uptrace/opentelemetry-go-extra/otelzap v0.2.4 // indirect - github.com/uptrace/uptrace-go v1.26.2 // indirect - github.com/vishvananda/netlink v1.2.1-beta.2 // indirect + github.com/uptrace/opentelemetry-go-extra/otelutil v0.3.2 // indirect + github.com/uptrace/opentelemetry-go-extra/otelzap v0.3.2 // indirect + github.com/uptrace/uptrace-go v1.30.1 // indirect + github.com/vishvananda/netlink v1.3.0 // indirect github.com/vishvananda/netns v0.0.4 // indirect github.com/wagoodman/go-partybus v0.0.0-20230516145632-8ccac152c651 // indirect github.com/wagoodman/go-progress v0.0.0-20230925121702-07e42b3cdba0 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect - github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect go.mongodb.org/mongo-driver v1.15.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 // indirect - go.opentelemetry.io/contrib/instrumentation/runtime v0.51.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.26.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.26.0 // indirect - go.opentelemetry.io/otel/metric v1.27.0 // indirect - go.opentelemetry.io/otel/sdk v1.27.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect - go.opentelemetry.io/proto/otlp v1.2.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect + go.opentelemetry.io/contrib/instrumentation/runtime v0.55.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 // indirect + go.opentelemetry.io/otel/log v0.6.0 // indirect + go.opentelemetry.io/otel/metric v1.30.0 // indirect + go.opentelemetry.io/otel/sdk v1.30.0 // indirect + go.opentelemetry.io/otel/sdk/log v0.6.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.30.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.starlark.net v0.0.0-20240517230649-3792562d0b7f // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.24.0 // indirect - golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect + go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect + golang.org/x/crypto v0.27.0 // indirect + golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect golang.org/x/image v0.18.0 // indirect - golang.org/x/oauth2 v0.20.0 // indirect - golang.org/x/sync v0.7.0 // indirect - golang.org/x/term v0.21.0 // indirect - golang.org/x/text v0.16.0 // indirect - golang.org/x/time v0.5.0 // indirect + golang.org/x/oauth2 v0.22.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/term v0.24.0 // indirect + golang.org/x/text v0.18.0 // indirect + golang.org/x/time v0.6.0 // indirect google.golang.org/genproto v0.0.0-20240515191416-fc5f0ca64291 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240515191416-fc5f0ca64291 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 // indirect - google.golang.org/grpc v1.64.1 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/grpc v1.67.1 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -232,20 +245,22 @@ require ( gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.30.2 // indirect - k8s.io/apiserver v0.30.2 // indirect - k8s.io/cli-runtime v0.30.2 // indirect - k8s.io/component-base v0.30.2 // indirect - k8s.io/cri-api v0.30.2 // indirect - k8s.io/klog/v2 v2.120.1 // indirect - k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f // indirect - k8s.io/kubelet v0.30.2 // indirect + k8s.io/apiextensions-apiserver v0.31.1 // indirect + k8s.io/apiserver v0.31.1 // indirect + k8s.io/cli-runtime v0.31.1 // indirect + k8s.io/component-base v0.31.1 // indirect + k8s.io/cri-api v0.31.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240812233141-91dab695df6f // indirect + k8s.io/kubelet v0.31.1 // indirect oras.land/oras-go/v2 v2.4.0 // indirect - sigs.k8s.io/controller-runtime v0.18.4 // indirect + sigs.k8s.io/controller-runtime v0.19.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/kustomize/api v0.17.1 // indirect - sigs.k8s.io/kustomize/kyaml v0.17.0 // indirect + sigs.k8s.io/kustomize/api v0.17.2 // indirect + sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) replace github.com/vishvananda/netns => github.com/inspektor-gadget/netns v0.0.5-0.20230524185006-155d84c555d6 + +replace github.com/goradd/maps => github.com/matthyx/maps v0.0.0-20241029072232-2f5d83d608a7 diff --git a/go.sum b/go.sum index e9fd510b..e53a03ca 100644 --- a/go.sum +++ b/go.sum @@ -69,13 +69,13 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/Microsoft/hcsshim v0.12.3 h1:LS9NXqXhMoqNCplK1ApmVSfB4UnVLRDWRapB6EIlxE0= -github.com/Microsoft/hcsshim v0.12.3/go.mod h1:Iyl1WVpZzr+UkzjekHZbV8o5Z9ZkxNGx6CtY2Qg/JVQ= +github.com/Microsoft/hcsshim v0.12.5 h1:bpTInLlDy/nDRWFVcefDZZ1+U8tS+rz3MxjKgu9boo0= +github.com/Microsoft/hcsshim v0.12.5/go.mod h1:tIUGego4G1EN5Hb6KC90aDYiUI2dqLSTTOCjVNpOgZ8= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/acobaugh/osrelease v0.1.0 h1:Yb59HQDGGNhCj4suHaFQQfBps5wyoKLSSX/J/+UifRE= github.com/acobaugh/osrelease v0.1.0/go.mod h1:4bFEs0MtgHNHBrmHCt67gNisnabCRAlzdVasCEGHTWY= -github.com/adrg/xdg v0.4.0 h1:RzRqFcjH4nE5C6oTAxhBtoE2IRyjBSa62SCbyPidvls= -github.com/adrg/xdg v0.4.0/go.mod h1:N6ag73EX4wyxeaoeHctc1mas01KZgsj5tYiAIwqJE/E= +github.com/adrg/xdg v0.5.0 h1:dDaZvhMXatArP1NPHhnfaQUqWBLBsmx1h1HXQdMoFCY= +github.com/adrg/xdg v0.5.0/go.mod h1:dDdY4M4DF9Rjy4kHPeNL+ilVF+p2lK8IdM9/rTSGcI4= github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b h1:slYM766cy2nI3BwyRiyQj/Ud48djTMtMebDqepE95rw= @@ -84,18 +84,18 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/anchore/clio v0.0.0-20240209204744-cb94e40a4f65 h1:u9XrEabKlGPsrmRvAER+kUKkwXiJfLyqGhmOTFsXjX4= -github.com/anchore/clio v0.0.0-20240209204744-cb94e40a4f65/go.mod h1:8Jr7CjmwFVcBPtkJdTpaAGHimoGJGfbExypjzOu87Og= -github.com/anchore/fangs v0.0.0-20231201140849-5075d28d6d8b h1:L/djgY7ZbZ/38+wUtdkk398W3PIBJLkt1N8nU/7e47A= -github.com/anchore/fangs v0.0.0-20231201140849-5075d28d6d8b/go.mod h1:TLcE0RE5+8oIx2/NPWem/dq1DeaMoC+fPEH7hoSzPLo= +github.com/anchore/clio v0.0.0-20240522144804-d81e109008aa h1:pwlAn4O9SBUnlgfa69YcqIynbUyobLVFYu8HxSoCffA= +github.com/anchore/clio v0.0.0-20240522144804-d81e109008aa/go.mod h1:nD3H5uIvjxlfmakOBgtyFQbk5Zjp3l538kxfpHPslzI= +github.com/anchore/fangs v0.0.0-20240903175602-e716ef12c23d h1:ZD4wdCBgJJzJybjTUIEiiupLF7B9H3WLuBTjspBO2Mc= +github.com/anchore/fangs v0.0.0-20240903175602-e716ef12c23d/go.mod h1:Xh4ObY3fmoMzOEVXwDtS1uK44JC7+nRD0n29/1KYFYg= github.com/anchore/go-logger v0.0.0-20240217160628-ee28a485904f h1:qRQCz19ioRN2FtAct4j6Lb3Nl0VolFiuHtYMezGYBn0= github.com/anchore/go-logger v0.0.0-20240217160628-ee28a485904f/go.mod h1:ErB21zunlmQOE/aFPkt4Tv2Q00ttFxPZ2l87gSXxSec= -github.com/anchore/packageurl-go v0.1.1-0.20240312213626-055233e539b4 h1:SjemQ90fgflz39HG+VMkNfrpUVJpcFW6ZFA3TDXqzBM= -github.com/anchore/packageurl-go v0.1.1-0.20240312213626-055233e539b4/go.mod h1:Blo6OgJNiYF41ufcgHKkbCKF2MDOMlrqhXv/ij6ocR4= -github.com/anchore/stereoscope v0.0.3-0.20240501181043-2e9894674185 h1:SuViDJ27nZ+joGdKbAkxAlm7tYMt9NTxTZZ05po4hls= -github.com/anchore/stereoscope v0.0.3-0.20240501181043-2e9894674185/go.mod h1:ckIamHiRMp8iBwWoTtE5Xkt9VQ5QC+6+O4VzwqyZr5Q= -github.com/anchore/syft v1.4.1 h1:4ofNePf3vuEyNZZW7SDmTX9uR/vHYXtHkcLbo27Mtjs= -github.com/anchore/syft v1.4.1/go.mod h1:2N75VGorI/18u2xSRAP/DEaZjjjVHtIXM+hFqSkfOTM= +github.com/anchore/packageurl-go v0.1.1-0.20240507183024-848e011fc24f h1:B/E9ixKNCasntpoch61NDaQyGPDXLEJlL+B9B/PbdbA= +github.com/anchore/packageurl-go v0.1.1-0.20240507183024-848e011fc24f/go.mod h1:Blo6OgJNiYF41ufcgHKkbCKF2MDOMlrqhXv/ij6ocR4= +github.com/anchore/stereoscope v0.0.3 h1:JRPHySy8S6P+Ff3IDiQ29ap1i8/laUQxDk9K1eFh/2U= +github.com/anchore/stereoscope v0.0.3/go.mod h1:5DJheGPjVRsSqegTB24Zi6SCHnYQnA519yeIG+RG+I4= +github.com/anchore/syft v1.13.0 h1:cS7LBjalHPO5enCEtsyJrCSMAxTEE5BIB2nSmnS9uRQ= +github.com/anchore/syft v1.13.0/go.mod h1:zL9Z5vtq8O+h6RRYo0lyb61NLx00OqcvoVNgk8qoMXA= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -106,14 +106,14 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/armosec/armoapi-go v0.0.425 h1:+VZJ9TBgu14gY2R93nXssTH97FY3o7OSoZZkfBDCSls= -github.com/armosec/armoapi-go v0.0.425/go.mod h1:mpok+lZaolcN5XRz/JxpwhfF8nln1OEKnGuvwAN+7Lo= +github.com/armosec/armoapi-go v0.0.470 h1:fT2J7SruNvOR1Q8RQXjiZ0JvNtJxjVUx68rl0X4leFU= +github.com/armosec/armoapi-go v0.0.470/go.mod h1:TruqDSAPgfRBXCeM+Cgp6nN4UhJSbe7la+XDKV2pTsY= github.com/armosec/gojay v1.2.17 h1:VSkLBQzD1c2V+FMtlGFKqWXNsdNvIKygTKJI9ysY8eM= github.com/armosec/gojay v1.2.17/go.mod h1:vuvX3DlY0nbVrJ0qCklSS733AWMoQboq3cFyuQW9ybc= github.com/armosec/utils-go v0.0.57 h1:0RaqexK+t7HeKWfldBv2C1JiLLGuUx9FP0DGWDNRJpg= github.com/armosec/utils-go v0.0.57/go.mod h1:4wfINE8JTQ6EHvSL2jki0Q3/D1j6oDi6sxxrtAEug74= -github.com/armosec/utils-k8s-go v0.0.26 h1:gVSV1mrALyphaesc+JXbx9SfbxLqfgg1KvvC1/0Hfkk= -github.com/armosec/utils-k8s-go v0.0.26/go.mod h1:WL2brx3tszxeSl1yHac0oAVJUg3o22HYh1dPjaSfjXU= +github.com/armosec/utils-k8s-go v0.0.30 h1:Gj8MJck0jZPSLSq8ZMiRPT3F/laOYQdaLxXKKcjijt4= +github.com/armosec/utils-k8s-go v0.0.30/go.mod h1:t0vvPJhYE+X+bOsaMsD2SzWU7WkJmV2Ltn9hg66AIe8= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/becheran/wildmatch-go v1.0.0 h1:mE3dGGkTmpKtT4Z+88t8RStG40yN9T+kFEGj2PZFSzA= @@ -128,8 +128,8 @@ github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2y github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I= github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4Pt2A= -github.com/briandowns/spinner v1.23.0/go.mod h1:rPG4gmXeN3wQV/TsAY4w8lPdIM6RX3yqeBQJSrbXjuE= +github.com/briandowns/spinner v1.23.1 h1:t5fDPmScwUjozhDj4FA46p5acZWIPXYE30qW2Ptu650= +github.com/briandowns/spinner v1.23.1/go.mod h1:LaZeM4wm2Ywy6vO571mvhQNRcWfRUnXOs0RcKV0wYKM= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/campoy/embedmd v1.0.0 h1:V4kI2qTJJLf4J29RzI/MAt2c3Bl4dQSYPuflzwFH2hY= github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= @@ -153,8 +153,10 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= -github.com/cilium/ebpf v0.15.0 h1:7NxJhNiBT3NG8pZJ3c+yfrVdHY8ScgKD27sScgjLMMk= -github.com/cilium/ebpf v0.15.0/go.mod h1:DHp1WyrLeiBh19Cf/tfiSMhqheEiK8fXFZ4No0P1Hso= +github.com/cilium/cilium v1.16.1 h1:7FiLrRJbO3d/RE423FUN0GxhrFDQBYRwr1veHN7uBys= +github.com/cilium/cilium v1.16.1/go.mod h1:SIurqFNFBU9/sQTPC9tvrOktAMDnQOGPBCSYtsnjxp4= +github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok= +github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -170,8 +172,10 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= -github.com/containerd/containerd v1.7.18 h1:jqjZTQNfXGoEaZdW1WwPU0RqSn1Bm2Ay/KJPUuO8nao= -github.com/containerd/containerd v1.7.18/go.mod h1:IYEk9/IO6wAPUz2bCMVUbsfXjzw5UNP5fLz4PsUygQ4= +github.com/containerd/containerd v1.7.22 h1:nZuNnNRA6T6jB975rx2RRNqqH2k6ELYKDZfqTHqwyy0= +github.com/containerd/containerd v1.7.22/go.mod h1:e3Jz1rYRUZ2Lt51YrH9Rz0zPyJBOlSvB3ghr2jbVD8g= +github.com/containerd/containerd/api v1.7.19 h1:VWbJL+8Ap4Ju2mx9c9qS1uFSB1OVYr5JJrW2yT5vFoA= +github.com/containerd/containerd/api v1.7.19/go.mod h1:fwGavl3LNwAV5ilJ0sbrABL44AQxmNjDRcwheXDb6Ig= github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8= github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= @@ -180,12 +184,14 @@ github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/containerd/ttrpc v1.2.4 h1:eQCQK4h9dxDmpOb9QOOMh2NHTfzroH1IkmHiKZi05Oo= -github.com/containerd/ttrpc v1.2.4/go.mod h1:ojvb8SJBSch0XkqNO0L0YX/5NxR3UnVk2LzFKBK0upc= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/containerd/ttrpc v1.2.5 h1:IFckT1EFQoFBMG4c3sMdT8EP3/aKfumK1msY+Ze4oLU= +github.com/containerd/ttrpc v1.2.5/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4= github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= -github.com/containers/common v0.59.1 h1:7VkmJN3YvD0jLFwaUjLHSRJ98JLffydiyOJjYr0dUTo= -github.com/containers/common v0.59.1/go.mod h1:53VicJCZ2AD0O+Br7VVoyrS7viXF4YmwlTIocWUT8XE= +github.com/containers/common v0.60.4 h1:H5+LAMHPZEqX6vVNOQ+IguVsaFl8kbO/SZ/VPXjxhy0= +github.com/containers/common v0.60.4/go.mod h1:I0upBi1qJX3QmzGbUOBN1LVP6RvkKhd3qQpZbQT+Q54= github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -199,8 +205,8 @@ github.com/creack/pty v1.1.20 h1:VIPb/a2s17qNeQgDnkfZC35RScx+blkKF8GV68n80J4= github.com/creack/pty v1.1.20/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/crewjam/rfc5424 v0.1.0 h1:MSeXJm22oKovLzWj44AHwaItjIMUMugYGkEzfa831H8= github.com/crewjam/rfc5424 v0.1.0/go.mod h1:RCi9M3xHVOeerf6ULZzqv2xOGRO/zYaVUeRyPnBW3gQ= -github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo= -github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cyphar/filepath-securejoin v0.3.3 h1:lofZkCEVFIBe0KcdQOzFs8Soy9oaHOWl4gGtPI+gCFc= +github.com/cyphar/filepath-securejoin v0.3.3/go.mod h1:8s/MCNJREmFK0H02MF6Ihv1nakJe4L/w3WZLHNkvlYM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -210,10 +216,10 @@ github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpO github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v27.0.2+incompatible h1:IgWU3lWqAYNibtcxgl/PY4TB0eCmK1ZpNUZVJfenDQs= -github.com/docker/cli v27.0.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= -github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/cli v27.3.1+incompatible h1:qEGdFBF3Xu6SCvCYhc7CzaQTlBmqDuzxPDpigSyeKQQ= +github.com/docker/cli v27.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= +github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= @@ -227,8 +233,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dutchcoders/go-clamd v0.0.0-20170520113014-b970184f4d9e h1:rcHHSQqzCgvlwP0I/fQ8rQMn/MpHE5gWSLdtpxtP6KQ= github.com/dutchcoders/go-clamd v0.0.0-20170520113014-b970184f4d9e/go.mod h1:Byz7q8MSzSPkouskHJhX0er2mZY/m0Vj5bMeMCkkyY4= -github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= -github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -267,11 +273,13 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= -github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/gabriel-vasile/mimetype v1.4.5 h1:J7wGKdGu33ocBOhGy0z653k/lFKLFDPJMG8Gql0kxn4= +github.com/gabriel-vasile/mimetype v1.4.5/go.mod h1:ibHel+/kbxn9x2407k1izTA1S81ku1z/DlgOW2QE0M4= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/github/go-spdx/v2 v2.2.0 h1:yBBLMasHA70Ujd35OpL/OjJOWWVNXcJGbars0GinGRI= -github.com/github/go-spdx/v2 v2.2.0/go.mod h1:hMCrsFgT0QnCwn7G8gxy/MxMpy67WgZrwFeISTn0o6w= +github.com/github/go-spdx/v2 v2.3.2 h1:IfdyNHTqzs4zAJjXdVQfRnxt1XMfycXoHBE2Vsm1bjs= +github.com/github/go-spdx/v2 v2.3.2/go.mod h1:2ZxKsOhvBp+OYBDlsGnUMcchLeo2mrpEBn2L1C+U3IQ= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= @@ -292,8 +300,8 @@ github.com/go-latex/latex v0.0.0-20231108140139-5c1ce85aa4ea/go.mod h1:Y7Vld91/H github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= @@ -324,11 +332,10 @@ github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7 github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= -github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= +github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= @@ -397,15 +404,13 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.19.1 h1:yMQ62Al6/V0Z7CqIrrS1iYoA5/oQCm88DeNujc7C1KY= -github.com/google/go-containerregistry v0.19.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI= +github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo= +github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= -github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -425,8 +430,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= -github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= -github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= @@ -442,10 +447,9 @@ github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0 github.com/gookit/color v1.2.5/go.mod h1:AhIE+pS6D4Ql0SQWbBeXPHw7gY0/sjHoA4s/n1KB7xg= github.com/gookit/color v1.5.4 h1:FZmqs7XOyGgCAxmWyPslpiok1k05wmY3SJTytgvYFs0= github.com/gookit/color v1.5.4/go.mod h1:pZJOeOS8DM43rXbp4AZo1n9zCU2qjpcRko0b6/QJi9w= +github.com/gopacket/gopacket v1.2.0 h1:eXbzFad7f73P1n2EJHQlsKuvIMJjVXK5tXoSca78I3A= +github.com/gopacket/gopacket v1.2.0/go.mod h1:BrAKEy5EOGQ76LSqh7DMAr7z0NNPdczWm2GxCG7+I8M= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/goradd/maps v0.1.5 h1:Ut7BPJgNy5BYbleI3LswVJJquiM8X5uN0ZuZBHSdRUI= -github.com/goradd/maps v0.1.5/go.mod h1:E5X1CHMgfVm1qFTHgXpgVLVylO5wtlhZdB93dRGjnc0= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -453,8 +457,8 @@ github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJr github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -481,6 +485,8 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= @@ -502,8 +508,8 @@ github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+h github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/inspektor-gadget/inspektor-gadget v0.30.0 h1:N9VCDLR9h8iS2TnU4p7j5ErMI/aYCOrn7vycHTnhfc8= -github.com/inspektor-gadget/inspektor-gadget v0.30.0/go.mod h1:FpyWQG0hF/SCKYDYapLMlXVY8bqkRbzI/lGqJKg4xVw= +github.com/inspektor-gadget/inspektor-gadget v0.33.0 h1:cUCVuGMY8m/SMNBfYvKLgW5n3cPBonUt9QGE6HVfXDo= +github.com/inspektor-gadget/inspektor-gadget v0.33.0/go.mod h1:Axsy1a2c1AaZCw+WJqX21Ibo9uTfxvY/PNCW5/ZwiO4= github.com/inspektor-gadget/netns v0.0.5-0.20230524185006-155d84c555d6 h1:fQqkJ+WkYfzy6BoUh32fr9uYrXfOGtsfw0skMQkfOic= github.com/inspektor-gadget/netns v0.0.5-0.20230524185006-155d84c555d6/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= @@ -511,6 +517,10 @@ github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= +github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/jsimonetti/rtnetlink/v2 v2.0.1 h1:xda7qaHDSVOsADNouv7ukSuicKZO7GgVUCXxpaIEIlM= +github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -523,8 +533,8 @@ github.com/kinbiko/jsonassert v1.1.1 h1:DB12divY+YB+cVpHULLuKePSi6+ui4M/shHSzJIS github.com/kinbiko/jsonassert v1.1.1/go.mod h1:NO4lzrogohtIdNUNzx8sdzB55M4R4Q1bsrWVdqQ7C+A= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -539,23 +549,29 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kubescape/backend v0.0.20 h1:E3nZGqWW8ELSh/n3ZRitlkmuZq33Lyx/42Lm4gpghhM= github.com/kubescape/backend v0.0.20/go.mod h1:FpazfN+c3Ucuvv4jZYCnk99moSBRNMVIxl5aWCZAEBo= -github.com/kubescape/go-logger v0.0.22 h1:gle7wH6emOiGv9ljdpVi82pWLQ3jGucrUucvil6JXHE= -github.com/kubescape/go-logger v0.0.22/go.mod h1:x3HBpZo3cMT/WIdy18BxvVVd5D0e/PWFVk/HiwBNu3g= +github.com/kubescape/go-logger v0.0.23 h1:5xh+Nm8eGImhFbtippRKLaFgsvlKE1ufvQhNM2P/570= +github.com/kubescape/go-logger v0.0.23/go.mod h1:Ayg7g769c7sXVB+P3fkJmbsJpoEmMmaUf9jeo+XuC3U= github.com/kubescape/k8s-interface v0.0.170 h1:EtzomWoeeIWDz7QrAEsqUDpLHQwoh2m3tZITfrE/tiE= github.com/kubescape/k8s-interface v0.0.170/go.mod h1:VoEoHI4Va08NiGAkYzbITF50aFMT5y4fPHRb4x2LtME= -github.com/kubescape/storage v0.0.89 h1:kYjaYqKndm3C/15MB1J4hgdmA4vbV4zCMWox2ga8O3M= -github.com/kubescape/storage v0.0.89/go.mod h1:eLCQ7JKpR6JRjtENnN3JduvRLMOyJFtBihfdVC+1hLA= +github.com/kubescape/storage v0.0.119 h1:7qCSxMRfuCG35H3o832q69hBA06KKHyyLVW76nFy5YA= +github.com/kubescape/storage v0.0.119/go.mod h1:DAR1CmSDhRRBK26nNU4MrVpRAst5nN7IuPuvcnw9XeI= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= +github.com/mackerelio/go-osstat v0.2.5 h1:+MqTbZUhoIt4m8qzkVoXUJg1EuifwlAJSk4Yl2GXh+o= +github.com/mackerelio/go-osstat v0.2.5/go.mod h1:atxwWF+POUZcdtR1wnsUcQxTytoHG4uhl2AKKzrOajY= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/matthyx/maps v0.0.0-20241029072232-2f5d83d608a7 h1:LAAFb3ra/vxiZcDY1zrbS29oqnB+N9MknuQZC1ju2+A= +github.com/matthyx/maps v0.0.0-20241029072232-2f5d83d608a7/go.mod h1:E5X1CHMgfVm1qFTHgXpgVLVylO5wtlhZdB93dRGjnc0= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -573,6 +589,10 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= +github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= +github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= @@ -594,20 +614,20 @@ github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3N github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= -github.com/moby/moby v27.0.2+incompatible h1:iYtGEjFi9lkX2m/Bop2H/peXzx3VtzmPlE9r0JHyH0s= -github.com/moby/moby v27.0.2+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/moby/moby v27.1.2+incompatible h1:vqOs4c7YktTdEBnPQNm0Q+M+IOuxxTCkrYJLBAVsEHQ= -github.com/moby/moby v27.1.2+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g= -github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/moby v27.3.1+incompatible h1:KQbXBjo7PavKpzIl7UkHT31y9lw/e71Uvrqhr4X+zMA= +github.com/moby/moby v27.3.1+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= +github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8= +github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= +github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI= github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= -github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= -github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= +github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= +github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -634,10 +654,10 @@ github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn github.com/olvrng/ujson v1.1.0 h1:8xVUzVlqwdMVWh5d1UHBtLQ1D50nxoPuPEq9Wozs8oA= github.com/olvrng/ujson v1.1.0/go.mod h1:Mz4G3RODTUfbkKyvi0lgmPx/7vd3Saksk+1jgk8s9xo= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.18.0 h1:W9Y7IWXxPUpAit9ieMOLI7PJZGaW22DTKgiVAuhDTLc= -github.com/onsi/ginkgo/v2 v2.18.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= -github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= -github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw= +github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= +github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= +github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -648,8 +668,8 @@ github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc h1: github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc/go.mod h1:8tx1helyqhUC65McMm3x7HmOex8lO2/v9zPuxmKHurs= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= -github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= -github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A= +github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b/go.mod h1:AC62GU6hc0BrNm+9RK9VSiwa/EUe1bkIeFORAMcHvJU= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= github.com/panjf2000/ants/v2 v2.9.1 h1:Q5vh5xohbsZXGcD6hhszzGqB7jSSc2/CRr3QKIga8Kw= @@ -663,6 +683,8 @@ github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6 github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -685,8 +707,8 @@ github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -696,14 +718,14 @@ github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQy github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= -github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= +github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= +github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI5Ek= -github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= @@ -718,6 +740,8 @@ github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6ke github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= +github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= github.com/scylladb/go-set v1.0.3-0.20200225121959-cc7b2070d91e h1:7q6NSFZDeGfvvtIRwBrU/aegEYJYmvev0cHAwo17zZQ= github.com/scylladb/go-set v1.0.3-0.20200225121959-cc7b2070d91e/go.mod h1:DkpGd78rljTxKAnTDPFqXSGxvETQnJyuSOQwsHycqfs= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= @@ -764,14 +788,15 @@ github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace h1:9PNP1jnUjRhfmGMlkXHjYPishpcw4jpSt/V/xYY3FMA= +github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= @@ -799,8 +824,8 @@ github.com/stripe/stripe-go/v74 v74.30.0/go.mod h1:f9L6LvaXa35ja7eyvP6GQswoaIPaB github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/sylabs/squashfs v0.6.1 h1:4hgvHnD9JGlYWwT0bPYNt9zaz23mAV3Js+VEgQoRGYQ= -github.com/sylabs/squashfs v0.6.1/go.mod h1:ZwpbPCj0ocIvMy2br6KZmix6Gzh6fsGQcCnydMF+Kx8= +github.com/sylabs/squashfs v1.0.0 h1:xAyMS21ogglkuR5HaY55PCfqY3H32ma9GkasTYo28Zg= +github.com/sylabs/squashfs v1.0.0/go.mod h1:rhWzvgefq1X+R+LZdts10hfMsTg3g74OfGunW8tvg/4= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= @@ -809,20 +834,22 @@ github.com/therootcompany/xz v1.0.1/go.mod h1:3K3UH1yCKgBneZYhuQUvJ9HPD19UEXEI0B github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/uptrace/opentelemetry-go-extra/otelutil v0.2.4 h1:A6+6ZGgLRoUTD+Jkw/Ph0g8HKiHUsiGlbngcSqBaHsw= -github.com/uptrace/opentelemetry-go-extra/otelutil v0.2.4/go.mod h1:gNYQe4RRVyszriFOhuMpwpAu4kdoFlZgcsw6dcIDFWg= -github.com/uptrace/opentelemetry-go-extra/otelzap v0.2.4 h1:/4mU8NB88+6u9JVKlkdD6HjrhRM1V1KRTsJaU8FSr8I= -github.com/uptrace/opentelemetry-go-extra/otelzap v0.2.4/go.mod h1:JoL6Kg6zYo9WtK5Y715GWItSUNpWprRYj5wgO01h00g= -github.com/uptrace/uptrace-go v1.26.2 h1:SWUejeNNqi+6peNl8164px+i1bS3gtlxyJdv7XGVzFY= -github.com/uptrace/uptrace-go v1.26.2/go.mod h1:TNlPcHKsOGnHYxmIrNK9+iVPneRxfhBBRFbbWLwkfKA= +github.com/uptrace/opentelemetry-go-extra/otelutil v0.3.2 h1:3/aHKUq7qaFMWxyQV0W2ryNgg8x8rVeKVA20KJUkfS0= +github.com/uptrace/opentelemetry-go-extra/otelutil v0.3.2/go.mod h1:Zit4b8AQXaXvA68+nzmbyDzqiyFRISyw1JiD5JqUBjw= +github.com/uptrace/opentelemetry-go-extra/otelzap v0.3.2 h1:cj/Z6FKTTYBnstI0Lni9PA+k2foounKIPUmj1LBwNiQ= +github.com/uptrace/opentelemetry-go-extra/otelzap v0.3.2/go.mod h1:LDaXk90gKEC2nC7JH3Lpnhfu+2V7o/TsqomJJmqA39o= +github.com/uptrace/uptrace-go v1.30.1 h1:9Bb3bIfPZ9LmtwAbKpN7+nX8iVwx+LmVe/CuiljFqxc= +github.com/uptrace/uptrace-go v1.30.1/go.mod h1:Sy6C30poEuG7ecG4Rcy4cBrC/jw8qJGHonXO3bXS5aQ= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= -github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= -github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk= +github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= github.com/wagoodman/go-partybus v0.0.0-20230516145632-8ccac152c651 h1:jIVmlAFIqV3d+DOxazTR9v+zgj8+VYuQBzPgBZvWBHA= github.com/wagoodman/go-partybus v0.0.0-20230516145632-8ccac152c651/go.mod h1:b26F2tHLqaoRQf8DywqzVaV1MQ9yvjb0OMcNl7Nxu20= github.com/wagoodman/go-progress v0.0.0-20230925121702-07e42b3cdba0 h1:0KGbf+0SMg+UFy4e1A/CPVvXn21f1qtWdeJwxZFoQG8= github.com/wagoodman/go-progress v0.0.0-20230925121702-07e42b3cdba0/go.mod h1:jLXFoL31zFaHKAAyZUh+sxiTDFe1L1ZHrcK2T1itVKA= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= @@ -847,31 +874,37 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 h1:Xs2Ncz0gNihqu9iosIZ5SkBbWo5T8JhhLJFMQL1qmLI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0/go.mod h1:vy+2G/6NvVMpwGX/NyLqcC41fxepnuKHk16E6IZUcJc= -go.opentelemetry.io/contrib/instrumentation/runtime v0.51.0 h1:1tBjncp/Rr5iuV0WfdKGGynrzIJ8bMm5z7Zl6jMjfIE= -go.opentelemetry.io/contrib/instrumentation/runtime v0.51.0/go.mod h1:6MqTuVXkhmzrIc7SFHYVTo7N6OFvVpDH5eq5xXKpAZQ= -go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= -go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.26.0 h1:HGZWGmCVRCVyAs2GQaiHQPbDHo+ObFWeUEOd+zDnp64= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.26.0/go.mod h1:SaH+v38LSCHddyk7RGlU9uZyQoRrKao6IBnJw6Kbn+c= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 h1:1u/AyyOqAWzy+SkPxDpahCNZParHV8Vid1RnI2clyDE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0/go.mod h1:z46paqbJ9l7c9fIPCXTqTGwhQZ5XoTIsfeFYWboizjs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 h1:1wp/gyxsuYtuE/JFxsQRtcCDtMrO2qMvlfXALU5wkzI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0/go.mod h1:gbTHmghkGgqxMomVQQMur1Nba4M0MQ8AYThXDUjsJ38= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.26.0 h1:0W5o9SzoR15ocYHEQfvfipzcNog1lBxOLfnex91Hk6s= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.26.0/go.mod h1:zVZ8nz+VSggWmnh6tTsJqXQ7rU4xLwRtna1M4x5jq58= -go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= -go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= -go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= -go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= -go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= -go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= -go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= -go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/contrib/instrumentation/runtime v0.55.0 h1:GotCpbh7YkCHdFs+hYMdvAEyGsBZifFognqrOnBwyJM= +go.opentelemetry.io/contrib/instrumentation/runtime v0.55.0/go.mod h1:6b0AS55EEPj7qP44khqF5dqTUq+RkakDMShFaW1EcA4= +go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= +go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0 h1:QSKmLBzbFULSyHzOdO9JsN9lpE4zkrz1byYGmJecdVE= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0/go.mod h1:sTQ/NH8Yrirf0sJ5rWqVu+oT82i4zL9FaF6rWcqnptM= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0 h1:VrMAbeJz4gnVDg2zEzjHG4dEH86j4jO6VYB+NgtGD8s= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0/go.mod h1:qqN/uFdpeitTvm+JDqqnjm517pmQRYxTORbETHq5tOc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 h1:umZgi92IyxfXd/l4kaDhnKgY8rnN/cZcF1LKc6I8OQ8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0/go.mod h1:4lVs6obhSVRb1EW5FhOuBTyiQhtRtAnnva9vD3yRfq8= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 h1:kn1BudCgwtE7PxLqcZkErpD8GKqLZ6BSzeW9QihQJeM= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0/go.mod h1:ljkUDtAMdleoi9tIG1R6dJUpVwDcYjw3J2Q6Q/SuiC0= +go.opentelemetry.io/otel/log v0.6.0 h1:nH66tr+dmEgW5y+F9LanGJUBYPrRgP4g2EkmPE3LeK8= +go.opentelemetry.io/otel/log v0.6.0/go.mod h1:KdySypjQHhP069JX0z/t26VHwa8vSwzgaKmXtIB3fJM= +go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= +go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= +go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= +go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= +go.opentelemetry.io/otel/sdk/log v0.6.0 h1:4J8BwXY4EeDE9Mowg+CyhWVBhTSLXVXodiXxS/+PGqI= +go.opentelemetry.io/otel/sdk/log v0.6.0/go.mod h1:L1DN8RMAduKkrwRAFDEX3E3TLOq46+XMGSbUfHU/+vE= +go.opentelemetry.io/otel/sdk/metric v1.30.0 h1:QJLT8Pe11jyHBHfSAgYH7kEmT24eX792jZO1bo4BXkM= +go.opentelemetry.io/otel/sdk/metric v1.30.0/go.mod h1:waS6P3YqFNzeP01kuo/MBBYqaoBJl7efRQHOaydhy1Y= +go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= +go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= -go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.starlark.net v0.0.0-20240517230649-3792562d0b7f h1:APah0oANPHA7m/z/1Ngcccc+BEO/dmLcEfrzHAQQY6w= go.starlark.net v0.0.0-20240517230649-3792562d0b7f/go.mod h1:YKMCv9b1WrfWmeqdV5MAuEHWsu5iC+fe6kYl2sQjdI8= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -884,6 +917,8 @@ go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= +go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -897,8 +932,8 @@ golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -909,8 +944,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.18.0 h1:jGzIakQa/ZXI1I0Fxvaa9W7yP25TqT6cHIHn+6CqvSQ= @@ -986,8 +1021,8 @@ golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1007,8 +1042,8 @@ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1022,8 +1057,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1063,7 +1098,6 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1095,16 +1129,16 @@ golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1114,14 +1148,14 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1178,8 +1212,8 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1301,10 +1335,10 @@ google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20240515191416-fc5f0ca64291 h1:CTZGpOdDJr2Jq+LcJ/mpjG8mClGy/uJdBBVYbS9g5lY= google.golang.org/genproto v0.0.0-20240515191416-fc5f0ca64291/go.mod h1:ch5ZrEj5+9MCxUeR3Gp3mCJ4u0eVpusYAmSr/mvpMSk= -google.golang.org/genproto/googleapis/api v0.0.0-20240515191416-fc5f0ca64291 h1:4HZJ3Xv1cmrJ+0aFo304Zn79ur1HMxptAE7aCPNLSqc= -google.golang.org/genproto/googleapis/api v0.0.0-20240515191416-fc5f0ca64291/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 h1:AgADTJarZTBqgjiUzRgfaBchgYB3/WFTC80GPwsMcRI= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -1335,8 +1369,8 @@ google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= -google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1398,32 +1432,32 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= istio.io/pkg v0.0.0-20231221211216-7635388a563e h1:ZlLVbKDlCzfP0MPbWc6VRcY23d9NdjLxwpPQpDrh3Gc= istio.io/pkg v0.0.0-20231221211216-7635388a563e/go.mod h1:fvmqEdHhZjYYwf6dSiIwvwc7db54kMWVTfsb91KmhzY= -k8s.io/api v0.30.2 h1:+ZhRj+28QT4UOH+BKznu4CBgPWgkXO7XAvMcMl0qKvI= -k8s.io/api v0.30.2/go.mod h1:ULg5g9JvOev2dG0u2hig4Z7tQ2hHIuS+m8MNZ+X6EmI= -k8s.io/apiextensions-apiserver v0.30.2 h1:l7Eue2t6QiLHErfn2vwK4KgF4NeDgjQkCXtEbOocKIE= -k8s.io/apiextensions-apiserver v0.30.2/go.mod h1:lsJFLYyK40iguuinsb3nt+Sj6CmodSI4ACDLep1rgjw= -k8s.io/apimachinery v0.30.2 h1:fEMcnBj6qkzzPGSVsAZtQThU62SmQ4ZymlXRC5yFSCg= -k8s.io/apimachinery v0.30.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= -k8s.io/apiserver v0.30.2 h1:ACouHiYl1yFI2VFI3YGM+lvxgy6ir4yK2oLOsLI1/tw= -k8s.io/apiserver v0.30.2/go.mod h1:BOTdFBIch9Sv0ypSEcUR6ew/NUFGocRFNl72Ra7wTm8= -k8s.io/cli-runtime v0.30.2 h1:ooM40eEJusbgHNEqnHziN9ZpLN5U4WcQGsdLKVxpkKE= -k8s.io/cli-runtime v0.30.2/go.mod h1:Y4g/2XezFyTATQUbvV5WaChoUGhojv/jZAtdp5Zkm0A= -k8s.io/client-go v0.30.2 h1:sBIVJdojUNPDU/jObC+18tXWcTJVcwyqS9diGdWHk50= -k8s.io/client-go v0.30.2/go.mod h1:JglKSWULm9xlJLx4KCkfLLQ7XwtlbflV6uFFSHTMgVs= -k8s.io/component-base v0.30.2 h1:pqGBczYoW1sno8q9ObExUqrYSKhtE5rW3y6gX88GZII= -k8s.io/component-base v0.30.2/go.mod h1:yQLkQDrkK8J6NtP+MGJOws+/PPeEXNpwFixsUI7h/OE= -k8s.io/cri-api v0.30.2 h1:4KR5W6ziqfGzKYVmFG9AEOJzxNbCPyZMoeCeIlK9jew= -k8s.io/cri-api v0.30.2/go.mod h1://4/umPJSW1ISNSNng4OwjpkvswJOQwU8rnkvO8P+xg= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f h1:0LQagt0gDpKqvIkAMPaRGcXawNMouPECM1+F9BVxEaM= -k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f/go.mod h1:S9tOR0FxgyusSNR+MboCuiDpVWkAifZvaYI1Q2ubgro= -k8s.io/kubectl v0.29.0 h1:Oqi48gXjikDhrBF67AYuZRTcJV4lg2l42GmvsP7FmYI= -k8s.io/kubectl v0.29.0/go.mod h1:0jMjGWIcMIQzmUaMgAzhSELv5WtHo2a8pq67DtviAJs= -k8s.io/kubelet v0.30.2 h1:Ck4E/pHndI20IzDXxS57dElhDGASPO5pzXF7BcKfmCY= -k8s.io/kubelet v0.30.2/go.mod h1:DSwwTbLQmdNkebAU7ypIALR4P9aXZNFwgRmedojUE94= -k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY= -k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= +k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= +k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40= +k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ= +k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= +k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/apiserver v0.31.1 h1:Sars5ejQDCRBY5f7R3QFHdqN3s61nhkpaX8/k1iEw1c= +k8s.io/apiserver v0.31.1/go.mod h1:lzDhpeToamVZJmmFlaLwdYZwd7zB+WYRYIboqA1kGxM= +k8s.io/cli-runtime v0.31.1 h1:/ZmKhmZ6hNqDM+yf9s3Y4KEYakNXUn5sod2LWGGwCuk= +k8s.io/cli-runtime v0.31.1/go.mod h1:pKv1cDIaq7ehWGuXQ+A//1OIF+7DI+xudXtExMCbe9U= +k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= +k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= +k8s.io/component-base v0.31.1 h1:UpOepcrX3rQ3ab5NB6g5iP0tvsgJWzxTyAo20sgYSy8= +k8s.io/component-base v0.31.1/go.mod h1:WGeaw7t/kTsqpVTaCoVEtillbqAhF2/JgvO0LDOMa0w= +k8s.io/cri-api v0.31.1 h1:x0aI8yTI7Ho4c8tpuig8NwI/MRe+VhjiYyyebC2xphQ= +k8s.io/cri-api v0.31.1/go.mod h1:Po3TMAYH/+KrZabi7QiwQI4a692oZcUOUThd/rqwxrI= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240812233141-91dab695df6f h1:bnWtxXWdAl5bVOCEPoNdvMkyj6cTW3zxHuwKIakuV9w= +k8s.io/kube-openapi v0.0.0-20240812233141-91dab695df6f/go.mod h1:G0W3eI9gG219NHRq3h5uQaRBl4pj4ZpwzRP5ti8y770= +k8s.io/kubectl v0.31.0 h1:kANwAAPVY02r4U4jARP/C+Q1sssCcN/1p9Nk+7BQKVg= +k8s.io/kubectl v0.31.0/go.mod h1:pB47hhFypGsaHAPjlwrNbvhXgmuAr01ZBvAIIUaI8d4= +k8s.io/kubelet v0.31.1 h1:aAxwVxGzbbMKKk/FnSjvkN52K3LdHhjhzmYcyGBuE0c= +k8s.io/kubelet v0.31.1/go.mod h1:8ZbexYHqUO946gXEfFmnMZiK2UKRGhk7LlGvJ71p2Ig= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go/v2 v2.4.0 h1:i+Wt5oCaMHu99guBD0yuBjdLvX7Lz8ukPbwXdR7uBMs= oras.land/oras-go/v2 v2.4.0/go.mod h1:osvtg0/ClRq1KkydMAEu/IxFieyjItcsQ4ut4PPF+f8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= @@ -1431,14 +1465,14 @@ rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= -sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= +sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= +sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.17.1 h1:MYJBOP/yQ3/5tp4/sf6HiiMfNNyO97LmtnirH9SLNr4= -sigs.k8s.io/kustomize/api v0.17.1/go.mod h1:ffn5491s2EiNrJSmgqcWGzQUVhc/pB0OKNI0HsT/0tA= -sigs.k8s.io/kustomize/kyaml v0.17.0 h1:G2bWs03V9Ur2PinHLzTUJ8Ded+30SzXZKiO92SRDs3c= -sigs.k8s.io/kustomize/kyaml v0.17.0/go.mod h1:6lxkYF1Cv9Ic8g/N7I86cvxNc5iinUo/P2vKsHNmpyE= +sigs.k8s.io/kustomize/api v0.17.2 h1:E7/Fjk7V5fboiuijoZHgs4aHuexi5Y2loXlVOAVAG5g= +sigs.k8s.io/kustomize/api v0.17.2/go.mod h1:UWTz9Ct+MvoeQsHcJ5e+vziRRkwimm3HytpZgIYqye0= +sigs.k8s.io/kustomize/kyaml v0.17.1 h1:TnxYQxFXzbmNG6gOINgGWQt09GghzgTP6mIurOgrLCQ= +sigs.k8s.io/kustomize/kyaml v0.17.1/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/main.go b/main.go index 5f12e017..34e0318b 100644 --- a/main.go +++ b/main.go @@ -10,9 +10,10 @@ import ( "strings" "syscall" - "github.com/kubescape/node-agent/internal/validator" + apitypes "github.com/armosec/armoapi-go/armotypes" "github.com/kubescape/node-agent/pkg/applicationprofilemanager" applicationprofilemanagerv1 "github.com/kubescape/node-agent/pkg/applicationprofilemanager/v1" + cloudmetadata "github.com/kubescape/node-agent/pkg/cloudmetadata" "github.com/kubescape/node-agent/pkg/config" "github.com/kubescape/node-agent/pkg/containerwatcher/v1" "github.com/kubescape/node-agent/pkg/dnsmanager" @@ -29,9 +30,12 @@ import ( nodeprofilemanagerv1 "github.com/kubescape/node-agent/pkg/nodeprofilemanager/v1" "github.com/kubescape/node-agent/pkg/objectcache" "github.com/kubescape/node-agent/pkg/objectcache/applicationprofilecache" + "github.com/kubescape/node-agent/pkg/objectcache/dnscache" "github.com/kubescape/node-agent/pkg/objectcache/k8scache" "github.com/kubescape/node-agent/pkg/objectcache/networkneighborhoodcache" objectcachev1 "github.com/kubescape/node-agent/pkg/objectcache/v1" + "github.com/kubescape/node-agent/pkg/processmanager" + processmanagerv1 "github.com/kubescape/node-agent/pkg/processmanager/v1" "github.com/kubescape/node-agent/pkg/relevancymanager" relevancymanagerv1 "github.com/kubescape/node-agent/pkg/relevancymanager/v1" rulebinding "github.com/kubescape/node-agent/pkg/rulebindingmanager" @@ -43,6 +47,7 @@ import ( seccompmanagerv1 "github.com/kubescape/node-agent/pkg/seccompmanager/v1" "github.com/kubescape/node-agent/pkg/storage/v1" "github.com/kubescape/node-agent/pkg/utils" + "github.com/kubescape/node-agent/pkg/validator" "github.com/kubescape/node-agent/pkg/watcher/dynamicwatcher" "github.com/kubescape/node-agent/pkg/watcher/seccompprofilewatcher" @@ -136,7 +141,7 @@ func main() { logger.L().Ctx(ctx).Info("Detected container runtime", helpers.String("containerRuntime", containerRuntime.Name.String())) // Create watchers - dWatcher := dynamicwatcher.NewWatchHandler(k8sClient, cfg.SkipNamespace) + dWatcher := dynamicwatcher.NewWatchHandler(k8sClient, storageClient.StorageClient, cfg.SkipNamespace) // create k8sObject cache k8sObjectCache, err := k8scache.NewK8sObjectCache(nodeName, k8sClient) if err != nil { @@ -154,7 +159,7 @@ func main() { if err != nil { logger.L().Ctx(ctx).Fatal("error creating SeccompManager", helpers.Error(err)) } - seccompWatcher := seccompprofilewatcher.NewSeccompProfileWatcher(k8sClient, seccompManager) + seccompWatcher := seccompprofilewatcher.NewSeccompProfileWatcher(storageClient.StorageClient, seccompManager) dWatcher.AddAdaptor(seccompWatcher) } else { seccompManager = seccompmanager.NewSeccompManagerMock() @@ -188,11 +193,39 @@ func main() { relevancyManager = relevancymanager.CreateRelevancyManagerMock() } + // Create the network and DNS managers + var networkManagerClient networkmanager.NetworkManagerClient + var dnsManagerClient dnsmanager.DNSManagerClient + var dnsResolver dnsmanager.DNSResolver + if cfg.EnableNetworkTracing || cfg.EnableRuntimeDetection { + dnsManager := dnsmanager.CreateDNSManager() + dnsManagerClient = dnsManager + // NOTE: dnsResolver is set for threat detection. + dnsResolver = dnsManager + networkManagerClient = networkmanagerv2.CreateNetworkManager(ctx, cfg, clusterData.ClusterName, k8sClient, storageClient, dnsManager, preRunningContainersIDs, k8sObjectCache) + } else { + dnsManagerClient = dnsmanager.CreateDNSManagerMock() + dnsResolver = dnsmanager.CreateDNSManagerMock() + networkManagerClient = networkmanager.CreateNetworkManagerMock() + } + var ruleManager rulemanager.RuleManagerClient + var processManager processmanager.ProcessManagerClient var objCache objectcache.ObjectCache var ruleBindingNotify chan rulebinding.RuleBindingNotify + var cloudMetadata *apitypes.CloudMetadata + + if cfg.EnableRuntimeDetection || cfg.EnableMalwareDetection { + cloudMetadata, err = cloudmetadata.GetCloudMetadata(ctx, k8sClient, nodeName) + if err != nil { + logger.L().Ctx(ctx).Error("error getting cloud metadata", helpers.Error(err)) + } + } if cfg.EnableRuntimeDetection { + // create the process manager + processManager = processmanagerv1.CreateProcessManager(ctx) + // create ruleBinding cache ruleBindingCache := rulebindingcachev1.NewCache(nodeName, k8sClient) dWatcher.AddAdaptor(ruleBindingCache) @@ -200,20 +233,22 @@ func main() { ruleBindingNotify = make(chan rulebinding.RuleBindingNotify, 100) ruleBindingCache.AddNotifier(&ruleBindingNotify) - apc := applicationprofilecache.NewApplicationProfileCache(nodeName, k8sClient) + apc := applicationprofilecache.NewApplicationProfileCache(nodeName, storageClient.StorageClient, cfg.MaxDelaySeconds) dWatcher.AddAdaptor(apc) - nnc := networkneighborhoodcache.NewNetworkNeighborhoodCache(nodeName, k8sClient) + nnc := networkneighborhoodcache.NewNetworkNeighborhoodCache(nodeName, storageClient.StorageClient, cfg.MaxDelaySeconds) dWatcher.AddAdaptor(nnc) + dc := dnscache.NewDnsCache(dnsResolver) + // create object cache - objCache = objectcachev1.NewObjectCache(k8sObjectCache, apc, nnc) + objCache = objectcachev1.NewObjectCache(k8sObjectCache, apc, nnc, dc) // create exporter - exporter := exporters.InitExporters(cfg.Exporters, clusterData.ClusterName, nodeName) + exporter := exporters.InitExporters(cfg.Exporters, clusterData.ClusterName, nodeName, cloudMetadata) // create runtimeDetection managers - ruleManager, err = rulemanagerv1.CreateRuleManager(ctx, cfg, k8sClient, ruleBindingCache, objCache, exporter, prometheusExporter, nodeName, clusterData.ClusterName) + ruleManager, err = rulemanagerv1.CreateRuleManager(ctx, cfg, k8sClient, ruleBindingCache, objCache, exporter, prometheusExporter, nodeName, clusterData.ClusterName, processManager) if err != nil { logger.L().Ctx(ctx).Fatal("error creating RuleManager", helpers.Error(err)) } @@ -222,6 +257,7 @@ func main() { ruleManager = rulemanager.CreateRuleManagerMock() objCache = objectcache.NewObjectCacheMock() ruleBindingNotify = make(chan rulebinding.RuleBindingNotify, 1) + processManager = processmanager.CreateProcessManagerMock() } // Create the node profile manager @@ -237,7 +273,7 @@ func main() { var malwareManager malwaremanager.MalwareManagerClient if cfg.EnableMalwareDetection { // create exporter - exporter := exporters.InitExporters(cfg.Exporters, clusterData.ClusterName, nodeName) + exporter := exporters.InitExporters(cfg.Exporters, clusterData.ClusterName, nodeName, cloudMetadata) malwareManager, err = malwaremanagerv1.CreateMalwareManager(cfg, k8sClient, nodeName, clusterData.ClusterName, exporter, prometheusExporter) if err != nil { logger.L().Ctx(ctx).Fatal("error creating MalwareManager", helpers.Error(err)) @@ -246,20 +282,8 @@ func main() { malwareManager = malwaremanager.CreateMalwareManagerMock() } - // Create the network and DNS managers - var networkManagerClient networkmanager.NetworkManagerClient - var dnsManagerClient dnsmanager.DNSManagerClient - if cfg.EnableNetworkTracing { - dnsManager := dnsmanager.CreateDNSManager() - dnsManagerClient = dnsManager - networkManagerClient = networkmanagerv2.CreateNetworkManager(ctx, cfg, clusterData.ClusterName, k8sClient, storageClient, dnsManager, preRunningContainersIDs, k8sObjectCache) - } else { - dnsManagerClient = dnsmanager.CreateDNSManagerMock() - networkManagerClient = networkmanager.CreateNetworkManagerMock() - } - // Create the container handler - mainHandler, err := containerwatcher.CreateIGContainerWatcher(cfg, applicationProfileManager, k8sClient, relevancyManager, networkManagerClient, dnsManagerClient, prometheusExporter, ruleManager, malwareManager, preRunningContainersIDs, &ruleBindingNotify, containerRuntime) + mainHandler, err := containerwatcher.CreateIGContainerWatcher(cfg, applicationProfileManager, k8sClient, relevancyManager, networkManagerClient, dnsManagerClient, prometheusExporter, ruleManager, malwareManager, preRunningContainersIDs, &ruleBindingNotify, containerRuntime, nil, processManager) if err != nil { logger.L().Ctx(ctx).Fatal("error creating the container watcher", helpers.Error(err)) } diff --git a/pkg/applicationprofilemanager/applicationprofile_manager_interface.go b/pkg/applicationprofilemanager/applicationprofile_manager_interface.go index ab665d66..f929ea33 100644 --- a/pkg/applicationprofilemanager/applicationprofile_manager_interface.go +++ b/pkg/applicationprofilemanager/applicationprofile_manager_interface.go @@ -1,6 +1,9 @@ package applicationprofilemanager -import containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" +import ( + containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" + tracerhttptype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/http/types" +) type ApplicationProfileManagerClient interface { ContainerCallback(notif containercollection.PubSubEvent) @@ -8,6 +11,7 @@ type ApplicationProfileManagerClient interface { ReportCapability(k8sContainerID, capability string) ReportFileExec(k8sContainerID, path string, args []string) ReportFileOpen(k8sContainerID, path string, flags []string) + ReportHTTPEvent(k8sContainerID string, event *tracerhttptype.Event) ReportDroppedEvent(k8sContainerID string) ContainerReachedMaxTime(containerID string) } diff --git a/pkg/applicationprofilemanager/applicationprofile_manager_mock.go b/pkg/applicationprofilemanager/applicationprofile_manager_mock.go index 236f7360..56b57834 100644 --- a/pkg/applicationprofilemanager/applicationprofile_manager_mock.go +++ b/pkg/applicationprofilemanager/applicationprofile_manager_mock.go @@ -1,6 +1,9 @@ package applicationprofilemanager -import containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" +import ( + containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" + tracerhttptype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/http/types" +) type ApplicationProfileManagerMock struct { } @@ -34,6 +37,11 @@ func (a ApplicationProfileManagerMock) ReportFileOpen(_, _ string, _ []string) { func (a ApplicationProfileManagerMock) ReportDroppedEvent(_ string) { // noop } + +func (a ApplicationProfileManagerMock) ReportHTTPEvent(_ string, _ *tracerhttptype.Event) { + // noop +} + func (a ApplicationProfileManagerMock) ContainerReachedMaxTime(_ string) { // noop } diff --git a/pkg/applicationprofilemanager/v1/applicationprofile_manager.go b/pkg/applicationprofilemanager/v1/applicationprofile_manager.go index 03d6b67c..33eeadb5 100644 --- a/pkg/applicationprofilemanager/v1/applicationprofile_manager.go +++ b/pkg/applicationprofilemanager/v1/applicationprofile_manager.go @@ -4,7 +4,9 @@ import ( "context" "errors" "fmt" + "regexp" "runtime" + "strings" "time" "github.com/armosec/utils-k8s-go/wlid" @@ -19,36 +21,43 @@ import ( "github.com/kubescape/k8s-interface/workloadinterface" "github.com/kubescape/node-agent/pkg/applicationprofilemanager" "github.com/kubescape/node-agent/pkg/config" + tracerhttptype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/http/types" "github.com/kubescape/node-agent/pkg/k8sclient" "github.com/kubescape/node-agent/pkg/objectcache" "github.com/kubescape/node-agent/pkg/seccompmanager" "github.com/kubescape/node-agent/pkg/storage" "github.com/kubescape/node-agent/pkg/utils" "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + "github.com/kubescape/storage/pkg/registry/file/dynamicpathdetector" storageUtils "github.com/kubescape/storage/pkg/utils" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" + "istio.io/pkg/cache" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +var procRegex = regexp.MustCompile(`^/proc/\d+`) + type ApplicationProfileManager struct { cfg config.Config clusterName string ctx context.Context - containerMutexes storageUtils.MapMutex[string] // key is k8sContainerID - trackedContainers mapset.Set[string] // key is k8sContainerID - removedContainers mapset.Set[string] // key is k8sContainerID - savedCapabilities maps.SafeMap[string, mapset.Set[string]] // key is k8sContainerID - savedExecs maps.SafeMap[string, *maps.SafeMap[string, []string]] // key is k8sContainerID - droppedEvents maps.SafeMap[string, bool] // key is k8sContainerID - savedOpens maps.SafeMap[string, *maps.SafeMap[string, mapset.Set[string]]] // key is k8sContainerID - savedSyscalls maps.SafeMap[string, mapset.Set[string]] // key is k8sContainerID - toSaveCapabilities maps.SafeMap[string, mapset.Set[string]] // key is k8sContainerID - toSaveExecs maps.SafeMap[string, *maps.SafeMap[string, []string]] // key is k8sContainerID - toSaveOpens maps.SafeMap[string, *maps.SafeMap[string, mapset.Set[string]]] // key is k8sContainerID - watchedContainerChannels maps.SafeMap[string, chan error] // key is ContainerID + containerMutexes storageUtils.MapMutex[string] // key is k8sContainerID + trackedContainers mapset.Set[string] // key is k8sContainerID + removedContainers mapset.Set[string] // key is k8sContainerID + droppedEventsContainers mapset.Set[string] // key is k8sContainerID + savedCapabilities maps.SafeMap[string, cache.ExpiringCache] // key is k8sContainerID + savedEndpoints maps.SafeMap[string, cache.ExpiringCache] // key is k8sContainerID + savedExecs maps.SafeMap[string, cache.ExpiringCache] // key is k8sContainerID + savedOpens maps.SafeMap[string, cache.ExpiringCache] // key is k8sContainerID + savedSyscalls maps.SafeMap[string, mapset.Set[string]] // key is k8sContainerID + toSaveCapabilities maps.SafeMap[string, mapset.Set[string]] // key is k8sContainerID + toSaveEndpoints maps.SafeMap[string, *maps.SafeMap[string, *v1beta1.HTTPEndpoint]] // key is k8sContainerID + toSaveExecs maps.SafeMap[string, *maps.SafeMap[string, []string]] // key is k8sContainerID + toSaveOpens maps.SafeMap[string, *maps.SafeMap[string, mapset.Set[string]]] // key is k8sContainerID + watchedContainerChannels maps.SafeMap[string, chan error] // key is ContainerID k8sClient k8sclient.K8sClientInterface k8sObjectCache objectcache.K8sObjectCache storageClient storage.StorageClient @@ -61,17 +70,18 @@ var _ applicationprofilemanager.ApplicationProfileManagerClient = (*ApplicationP func CreateApplicationProfileManager(ctx context.Context, cfg config.Config, clusterName string, k8sClient k8sclient.K8sClientInterface, storageClient storage.StorageClient, preRunningContainerIDs mapset.Set[string], k8sObjectCache objectcache.K8sObjectCache, seccompManager seccompmanager.SeccompManagerClient) (*ApplicationProfileManager, error) { return &ApplicationProfileManager{ - cfg: cfg, - clusterName: clusterName, - ctx: ctx, - k8sClient: k8sClient, - k8sObjectCache: k8sObjectCache, - storageClient: storageClient, - containerMutexes: storageUtils.NewMapMutex[string](), - trackedContainers: mapset.NewSet[string](), - removedContainers: mapset.NewSet[string](), - preRunningContainerIDs: preRunningContainerIDs, - seccompManager: seccompManager, + cfg: cfg, + clusterName: clusterName, + ctx: ctx, + k8sClient: k8sClient, + k8sObjectCache: k8sObjectCache, + storageClient: storageClient, + containerMutexes: storageUtils.NewMapMutex[string](), + trackedContainers: mapset.NewSet[string](), + removedContainers: mapset.NewSet[string](), + droppedEventsContainers: mapset.NewSet[string](), + preRunningContainerIDs: preRunningContainerIDs, + seccompManager: seccompManager, }, nil } @@ -130,16 +140,19 @@ func (am *ApplicationProfileManager) deleteResources(watchedContainer *utils.Wat // delete resources watchedContainer.UpdateDataTicker.Stop() am.trackedContainers.Remove(watchedContainer.K8sContainerID) + am.droppedEventsContainers.Remove(watchedContainer.K8sContainerID) am.savedCapabilities.Delete(watchedContainer.K8sContainerID) + am.savedEndpoints.Delete(watchedContainer.K8sContainerID) am.savedExecs.Delete(watchedContainer.K8sContainerID) - am.droppedEvents.Delete(watchedContainer.K8sContainerID) am.savedOpens.Delete(watchedContainer.K8sContainerID) am.savedSyscalls.Delete(watchedContainer.K8sContainerID) am.toSaveCapabilities.Delete(watchedContainer.K8sContainerID) + am.toSaveEndpoints.Delete(watchedContainer.K8sContainerID) am.toSaveExecs.Delete(watchedContainer.K8sContainerID) am.toSaveOpens.Delete(watchedContainer.K8sContainerID) am.watchedContainerChannels.Delete(watchedContainer.ContainerID) } + func (am *ApplicationProfileManager) ContainerReachedMaxTime(containerID string) { if channel := am.watchedContainerChannels.Get(containerID); channel != nil { channel <- utils.ContainerReachedMaxTime @@ -168,7 +181,7 @@ func (am *ApplicationProfileManager) monitorContainer(ctx context.Context, conta // adjust ticker after first tick if !watchedContainer.InitialDelayExpired { watchedContainer.InitialDelayExpired = true - watchedContainer.UpdateDataTicker.Reset(am.cfg.UpdateDataPeriod) + watchedContainer.UpdateDataTicker.Reset(utils.AddJitter(am.cfg.UpdateDataPeriod, am.cfg.MaxJitterPercentage)) } watchedContainer.SetStatus(utils.WatchedContainerStatusReady) am.saveProfile(ctx, watchedContainer, container.K8s.Namespace) @@ -233,7 +246,7 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon // sleep for container index second to desynchronize the profiles saving time.Sleep(time.Duration(watchedContainer.ContainerIndex) * time.Second) - if droppedEvents := am.droppedEvents.Get(watchedContainer.K8sContainerID); droppedEvents { + if am.droppedEventsContainers.ContainsOne(watchedContainer.K8sContainerID) { watchedContainer.SetStatus(utils.WatchedContainerStatusMissingRuntime) } @@ -256,6 +269,7 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon // get capabilities from IG var capabilities []string + endpoints := make(map[string]*v1beta1.HTTPEndpoint) execs := make(map[string][]string) opens := make(map[string]mapset.Set[string]) if toSaveCapabilities := am.toSaveCapabilities.Get(watchedContainer.K8sContainerID); toSaveCapabilities.Cardinality() > 0 { @@ -270,11 +284,19 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon } } + // get pointer to endpoints map from IG + toSaveEndpoints := am.toSaveEndpoints.Get(watchedContainer.K8sContainerID) + // point IG to a new endpoints map + am.toSaveEndpoints.Set(watchedContainer.K8sContainerID, new(maps.SafeMap[string, *v1beta1.HTTPEndpoint])) + // prepare endpoints map + toSaveEndpoints.Range(func(path string, endpoint *v1beta1.HTTPEndpoint) bool { + endpoints[path] = endpoint + return true + }) // get pointer to execs map from IG toSaveExecs := am.toSaveExecs.Get(watchedContainer.K8sContainerID) // point IG to a new exec map am.toSaveExecs.Set(watchedContainer.K8sContainerID, new(maps.SafeMap[string, []string])) - // prepare execs map toSaveExecs.Range(func(execIdentifier string, pathAndArgs []string) bool { execs[execIdentifier] = pathAndArgs @@ -301,9 +323,9 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon // 3a. the object is missing its container slice - ADD one with the container profile at the right index // 3b. the object is missing the container profile - ADD the container profile at the right index // 3c. default - patch the container ourselves and REPLACE it at the right index - if len(capabilities) > 0 || len(execs) > 0 || len(opens) > 0 || len(toSaveSyscalls) > 0 || watchedContainer.StatusUpdated() { + if len(capabilities) > 0 || len(endpoints) > 0 || len(execs) > 0 || len(opens) > 0 || len(toSaveSyscalls) > 0 || watchedContainer.StatusUpdated() { // 0. calculate patch - operations := utils.CreateCapabilitiesPatchOperations(capabilities, observedSyscalls, execs, opens, watchedContainer.ContainerType.String(), watchedContainer.ContainerIndex) + operations := utils.CreateCapabilitiesPatchOperations(capabilities, observedSyscalls, execs, opens, endpoints, watchedContainer.ContainerType.String(), watchedContainer.ContainerIndex) operations = utils.AppendStatusAnnotationPatchOperations(operations, watchedContainer) operations = append(operations, utils.PatchOperation{ Op: "add", @@ -339,6 +361,7 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon } containers = append(containers, v1beta1.ApplicationProfileContainer{ Name: name, + Endpoints: make([]v1beta1.HTTPEndpoint, 0), Execs: make([]v1beta1.ExecCalls, 0), Opens: make([]v1beta1.OpenCalls, 0), Capabilities: make([]string, 0), @@ -354,7 +377,7 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon newObject.Spec.EphemeralContainers = addContainers(newObject.Spec.EphemeralContainers, watchedContainer.ContainerNames[utils.EphemeralContainer]) // enrich container newContainer := utils.GetApplicationProfileContainer(newObject, watchedContainer.ContainerType, watchedContainer.ContainerIndex) - utils.EnrichApplicationProfileContainer(newContainer, capabilities, observedSyscalls, execs, opens) + utils.EnrichApplicationProfileContainer(newContainer, capabilities, observedSyscalls, execs, opens, endpoints) // try to create object if err := am.storageClient.CreateApplicationProfile(newObject, namespace); err != nil { gotErr = err @@ -397,6 +420,7 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon logger.L().Debug("ApplicationProfileManager - got seccomp profile", helpers.Interface("profile", seccompProfile)) existingContainer = &v1beta1.ApplicationProfileContainer{ Name: containerNames[watchedContainer.ContainerIndex], + Endpoints: make([]v1beta1.HTTPEndpoint, 0), Execs: make([]v1beta1.ExecCalls, 0), Opens: make([]v1beta1.OpenCalls, 0), Capabilities: make([]string, 0), @@ -405,7 +429,7 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon } } // update it - utils.EnrichApplicationProfileContainer(existingContainer, capabilities, observedSyscalls, execs, opens) + utils.EnrichApplicationProfileContainer(existingContainer, capabilities, observedSyscalls, execs, opens, endpoints) // get existing containers var existingContainers []v1beta1.ApplicationProfileContainer if watchedContainer.ContainerType == utils.Container { @@ -440,6 +464,7 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon Path: fmt.Sprintf("/spec/%s/%d", watchedContainer.ContainerType, i), Value: v1beta1.ApplicationProfileContainer{ Name: name, + Endpoints: make([]v1beta1.HTTPEndpoint, 0), Execs: make([]v1beta1.ExecCalls, 0), Opens: make([]v1beta1.OpenCalls, 0), Capabilities: make([]string, 0), @@ -484,6 +509,13 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon if gotErr != nil { // restore capabilities set am.toSaveCapabilities.Get(watchedContainer.K8sContainerID).Append(capabilities...) + // restore endpoints map entries + toSaveEndpoints.Range(func(path string, endpoint *v1beta1.HTTPEndpoint) bool { + if !am.toSaveEndpoints.Get(watchedContainer.K8sContainerID).Has(path) { + am.toSaveEndpoints.Get(watchedContainer.K8sContainerID).Set(path, endpoint) + } + return true + }) // restore execs map entries toSaveExecs.Range(func(uniqueExecIdentifier string, v []string) bool { if !am.toSaveExecs.Get(watchedContainer.K8sContainerID).Has(uniqueExecIdentifier) { @@ -500,18 +532,35 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon // record saved syscalls am.savedSyscalls.Get(watchedContainer.K8sContainerID).Append(toSaveSyscalls...) // record saved capabilities - am.savedCapabilities.Get(watchedContainer.K8sContainerID).Append(capabilities...) + savedCapabilities := am.savedCapabilities.Get(watchedContainer.K8sContainerID) + for _, capability := range capabilities { + savedCapabilities.Set(capability, nil) + } + // record saved endpoints + savedEndpoints := am.savedEndpoints.Get(watchedContainer.K8sContainerID) + toSaveEndpoints.Range(func(path string, endpoint *v1beta1.HTTPEndpoint) bool { + savedEndpoints.Set(path, endpoint) + return true + }) // record saved execs + savedExecs := am.savedExecs.Get(watchedContainer.K8sContainerID) toSaveExecs.Range(func(uniqueExecIdentifier string, v []string) bool { - if !am.savedExecs.Get(watchedContainer.K8sContainerID).Has(uniqueExecIdentifier) { - am.savedExecs.Get(watchedContainer.K8sContainerID).Set(uniqueExecIdentifier, v) - } + savedExecs.Set(uniqueExecIdentifier, v) return true }) // record saved opens - toSaveOpens.Range(utils.SetInMap(am.savedOpens.Get(watchedContainer.K8sContainerID))) + savedOpens := am.savedOpens.Get(watchedContainer.K8sContainerID) + toSaveOpens.Range(func(path string, newOpens mapset.Set[string]) bool { + if oldOpens, ok := savedOpens.Get(path); ok { + oldOpens.(mapset.Set[string]).Append(newOpens.ToSlice()...) + } else { + savedOpens.Set(path, newOpens) + } + return true + }) logger.L().Debug("ApplicationProfileManager - saved application profile", helpers.Int("capabilities", len(capabilities)), + helpers.Int("endpoints", toSaveEndpoints.Len()), helpers.Int("execs", toSaveExecs.Len()), helpers.Int("opens", toSaveOpens.Len()), helpers.String("slug", slug), @@ -531,7 +580,7 @@ func (am *ApplicationProfileManager) startApplicationProfiling(ctx context.Conte watchedContainer := &utils.WatchedContainerData{ ContainerID: container.Runtime.ContainerID, - UpdateDataTicker: time.NewTicker(utils.AddRandomDuration(5, 10, am.cfg.InitialDelay)), // get out of sync with the relevancy manager + UpdateDataTicker: time.NewTicker(utils.AddJitter(am.cfg.InitialDelay, am.cfg.MaxJitterPercentage)), SyncChannel: syncChannel, K8sContainerID: k8sContainerID, NsMntId: container.Mntns, @@ -584,12 +633,13 @@ func (am *ApplicationProfileManager) ContainerCallback(notif containercollection if am.watchedContainerChannels.Has(notif.Container.Runtime.ContainerID) { return } - am.savedCapabilities.Set(k8sContainerID, mapset.NewSet[string]()) - am.droppedEvents.Set(k8sContainerID, false) - am.savedExecs.Set(k8sContainerID, new(maps.SafeMap[string, []string])) - am.savedOpens.Set(k8sContainerID, new(maps.SafeMap[string, mapset.Set[string]])) + am.savedCapabilities.Set(k8sContainerID, cache.NewTTL(5*am.cfg.UpdateDataPeriod, am.cfg.UpdateDataPeriod)) + am.savedEndpoints.Set(k8sContainerID, cache.NewTTL(5*am.cfg.UpdateDataPeriod, am.cfg.UpdateDataPeriod)) + am.savedExecs.Set(k8sContainerID, cache.NewTTL(5*am.cfg.UpdateDataPeriod, am.cfg.UpdateDataPeriod)) + am.savedOpens.Set(k8sContainerID, cache.NewTTL(5*am.cfg.UpdateDataPeriod, am.cfg.UpdateDataPeriod)) am.savedSyscalls.Set(k8sContainerID, mapset.NewSet[string]()) am.toSaveCapabilities.Set(k8sContainerID, mapset.NewSet[string]()) + am.toSaveEndpoints.Set(k8sContainerID, new(maps.SafeMap[string, *v1beta1.HTTPEndpoint])) am.toSaveExecs.Set(k8sContainerID, new(maps.SafeMap[string, []string])) am.toSaveOpens.Set(k8sContainerID, new(maps.SafeMap[string, mapset.Set[string]])) am.removedContainers.Remove(k8sContainerID) // make sure container is not in the removed list @@ -612,9 +662,11 @@ func (am *ApplicationProfileManager) ReportCapability(k8sContainerID, capability if err := am.waitForContainer(k8sContainerID); err != nil { return } - if am.savedCapabilities.Get(k8sContainerID).ContainsOne(capability) { + // check if we already have this capability + if _, ok := am.savedCapabilities.Get(k8sContainerID).Get(capability); ok { return } + // add to capability map am.toSaveCapabilities.Get(k8sContainerID).Add(capability) } @@ -628,24 +680,25 @@ func (am *ApplicationProfileManager) ReportFileExec(k8sContainerID, path string, } // check if we already have this exec // we use a SHA256 hash of the exec to identify it uniquely (path + args, in the order they were provided) - savedExecs := am.savedExecs.Get(k8sContainerID) execIdentifier := utils.CalculateSHA256FileExecHash(path, args) - if savedExecs.Has(execIdentifier) { + if _, ok := am.savedExecs.Get(k8sContainerID).Get(execIdentifier); ok { return } - // add to exec map, first element is the path, the rest are the args - execMap := am.toSaveExecs.Get(k8sContainerID) - execMap.Set(execIdentifier, append([]string{path}, args...)) + am.toSaveExecs.Get(k8sContainerID).Set(execIdentifier, append([]string{path}, args...)) } func (am *ApplicationProfileManager) ReportFileOpen(k8sContainerID, path string, flags []string) { if err := am.waitForContainer(k8sContainerID); err != nil { return } + // deduplicate /proc/1234/* into /proc/.../* (quite a common case) + // we perform it here instead of waiting for compression + if strings.HasPrefix(path, "/proc/") { + path = procRegex.ReplaceAllString(path, "/proc/"+dynamicpathdetector.DynamicIdentifier) + } // check if we already have this open - savedOpens := am.savedOpens.Get(k8sContainerID) - if savedOpens.Has(path) && savedOpens.Get(path).Contains(flags...) { + if opens, ok := am.savedOpens.Get(k8sContainerID).Get(path); ok && opens.(mapset.Set[string]).Contains(flags...) { return } // add to open map @@ -658,8 +711,29 @@ func (am *ApplicationProfileManager) ReportFileOpen(k8sContainerID, path string, } func (am *ApplicationProfileManager) ReportDroppedEvent(k8sContainerID string) { + am.droppedEventsContainers.Add(k8sContainerID) +} + +func (am *ApplicationProfileManager) ReportHTTPEvent(k8sContainerID string, event *tracerhttptype.Event) { if err := am.waitForContainer(k8sContainerID); err != nil { return } - am.droppedEvents.Set(k8sContainerID, true) + // get endpoint from event + endpointIdentifier, err := am.GetEndpointIdentifier(event) + if err != nil { + logger.L().Ctx(am.ctx).Warning("ApplicationProfileManager - failed to get endpoint identifier", helpers.Error(err)) + return + } + endpoint, err := GetNewEndpoint(event, endpointIdentifier) + if err != nil { + logger.L().Ctx(am.ctx).Warning("ApplicationProfileManager - failed to get new endpoint", helpers.Error(err)) + return + } + // check if we already have this endpoint + endpointHash := CalculateHTTPEndpointHash(endpoint) + if _, ok := am.savedEndpoints.Get(k8sContainerID).Get(endpointHash); ok { + return + } + // add to endpoint map + am.toSaveEndpoints.Get(k8sContainerID).Set(endpointHash, endpoint) } diff --git a/pkg/applicationprofilemanager/v1/applicationprofile_manager_test.go b/pkg/applicationprofilemanager/v1/applicationprofile_manager_test.go index c7f65ad6..aac26329 100644 --- a/pkg/applicationprofilemanager/v1/applicationprofile_manager_test.go +++ b/pkg/applicationprofilemanager/v1/applicationprofile_manager_test.go @@ -2,19 +2,26 @@ package applicationprofilemanager import ( "context" + "encoding/json" + "net/http" + "net/url" "sort" + "strings" "testing" "time" mapset "github.com/deckarep/golang-set/v2" + "github.com/goradd/maps" containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" "github.com/inspektor-gadget/inspektor-gadget/pkg/types" "github.com/kubescape/node-agent/pkg/config" + tracerhttptype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/http/types" "github.com/kubescape/node-agent/pkg/k8sclient" "github.com/kubescape/node-agent/pkg/objectcache" "github.com/kubescape/node-agent/pkg/seccompmanager" "github.com/kubescape/node-agent/pkg/storage" "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + "github.com/kubescape/storage/pkg/registry/file/dynamicpathdetector" "github.com/stretchr/testify/assert" ) @@ -22,7 +29,7 @@ func TestApplicationProfileManager(t *testing.T) { cfg := config.Config{ InitialDelay: 1 * time.Second, MaxSniffingTime: 5 * time.Minute, - UpdateDataPeriod: 1 * time.Second, + UpdateDataPeriod: 5 * time.Second, } ctx := context.TODO() k8sClient := &k8sclient.K8sClientMock{} @@ -72,6 +79,99 @@ func TestApplicationProfileManager(t *testing.T) { go am.ReportFileOpen("ns/pod/cont", "/etc/hosts", []string{"O_RDONLY"}) // report another file open go am.ReportFileExec("ns/pod/cont", "/bin/bash", []string{"-c", "ls"}) // duplicate - will not be reported + + // report endpoint + + parsedURL, _ := url.Parse("/abc") + + request := &http.Request{ + Method: "GET", + URL: parsedURL, + Host: "localhost:123 GMT", + + Header: map[string][]string{}, + } + + testEvent := &tracerhttptype.Event{ + Request: request, + Internal: false, + + Direction: "inbound", + } + + go am.ReportHTTPEvent("ns/pod/cont", testEvent) + + request = &http.Request{ + Method: "GET", + URL: parsedURL, + Host: "localhost", + + Header: map[string][]string{}, + } + + testEvent = &tracerhttptype.Event{ + Request: request, + Internal: false, + + Direction: "inbound", + } + + go am.ReportHTTPEvent("ns/pod/cont", testEvent) + + request = &http.Request{ + Method: "POST", + Host: "localhost", + + URL: parsedURL, + Header: map[string][]string{ + "Connection": {"keep-alive"}, + }, + } + + testEvent = &tracerhttptype.Event{ + Request: request, + Internal: false, + Direction: "inbound", + } + + go am.ReportHTTPEvent("ns/pod/cont", testEvent) + + request = &http.Request{ + Method: "POST", + URL: parsedURL, + Host: "localhost", + Header: map[string][]string{ + "Connection": {"keep-alive"}, + }, + } + + testEvent = &tracerhttptype.Event{ + Request: request, + Internal: false, + Direction: "inbound", + } + + go am.ReportHTTPEvent("ns/pod/cont", testEvent) + + request = &http.Request{ + Method: "POST", + URL: parsedURL, + Host: "localhost:123", + Header: map[string][]string{ + "Connection": {"keep-alive"}, + }, + } + + testEvent = &tracerhttptype.Event{ + Request: request, + Internal: false, + Direction: "inbound", + } + + go am.ReportHTTPEvent("ns/pod/cont", testEvent) + + time.Sleep(8 * time.Second) + // sleep more time.Sleep(2 * time.Second) // report container stopped @@ -100,6 +200,14 @@ func TestApplicationProfileManager(t *testing.T) { assert.Contains(t, reportedExecs, expectedExec) } assert.Equal(t, []v1beta1.OpenCalls{{Path: "/etc/passwd", Flags: []string{"O_RDONLY"}}}, storageClient.ApplicationProfiles[0].Spec.Containers[1].Opens) + + expectedEndpoints := GetExcpectedEndpoints(t) + actualEndpoints := storageClient.ApplicationProfiles[1].Spec.Containers[1].Endpoints + + sortHTTPEndpoints(expectedEndpoints) + sortHTTPEndpoints(actualEndpoints) + + assert.Equal(t, expectedEndpoints, actualEndpoints) // check the second profile - this is a patch for execs and opens sort.Strings(storageClient.ApplicationProfiles[1].Spec.Containers[0].Capabilities) assert.Equal(t, []string{"NET_BIND_SERVICE"}, storageClient.ApplicationProfiles[1].Spec.Containers[1].Capabilities) @@ -109,3 +217,81 @@ func TestApplicationProfileManager(t *testing.T) { {Path: "/etc/hosts", Flags: []string{"O_RDONLY"}}, }, storageClient.ApplicationProfiles[1].Spec.Containers[1].Opens) } + +func GetExcpectedEndpoints(t *testing.T) []v1beta1.HTTPEndpoint { + headers := map[string][]string{"Host": {"localhost"}, "Connection": {"keep-alive"}} + rawJSON, err := json.Marshal(headers) + assert.NoError(t, err) + + endpointPost := v1beta1.HTTPEndpoint{ + Endpoint: ":80/abc", + Methods: []string{"POST"}, + Internal: false, + Direction: "inbound", + Headers: rawJSON} + + headers = map[string][]string{"Host": {"localhost"}} + rawJSON, err = json.Marshal(headers) + assert.NoError(t, err) + + endpointGet := v1beta1.HTTPEndpoint{ + Endpoint: ":80/abc", + Methods: []string{"GET"}, + Internal: false, + Direction: "inbound", + Headers: rawJSON} + + headers = map[string][]string{"Host": {"localhost:123"}, "Connection": {"keep-alive"}} + rawJSON, err = json.Marshal(headers) + assert.NoError(t, err) + + endpointPort := v1beta1.HTTPEndpoint{ + Endpoint: ":123/abc", + Methods: []string{"POST"}, + Internal: false, + Direction: "inbound", + Headers: rawJSON} + + return []v1beta1.HTTPEndpoint{endpointPost, endpointGet, endpointPort} +} + +func sortHTTPEndpoints(endpoints []v1beta1.HTTPEndpoint) { + sort.Slice(endpoints, func(i, j int) bool { + // Sort by Endpoint first + if endpoints[i].Endpoint != endpoints[j].Endpoint { + return endpoints[i].Endpoint < endpoints[j].Endpoint + } + // If Endpoints are the same, sort by the first Method + if len(endpoints[i].Methods) > 0 && len(endpoints[j].Methods) > 0 { + return endpoints[i].Methods[0] < endpoints[j].Methods[0] + } + // If Methods are empty or the same, sort by Internal + if endpoints[i].Internal != endpoints[j].Internal { + return endpoints[i].Internal + } + // If Internal is the same, sort by Direction + if endpoints[i].Direction != endpoints[j].Direction { + return string(endpoints[i].Direction) < string(endpoints[j].Direction) + } + // If all else is equal, sort by Headers + return string(endpoints[i].Headers) < string(endpoints[j].Headers) + }) +} + +func BenchmarkReportFileOpen(b *testing.B) { + savedOpens := maps.SafeMap[string, mapset.Set[string]]{} + savedOpens.Set("/proc/"+dynamicpathdetector.DynamicIdentifier+"/foo/bar", mapset.NewSet("O_LARGEFILE", "O_RDONLY")) + paths := []string{"/proc/12345/foo/bar", "/bin/ls", "/etc/passwd"} + flags := []string{"O_CLOEXEC", "O_RDONLY"} + for i := 0; i < b.N; i++ { + for _, path := range paths { + if strings.HasPrefix(path, "/proc/") { + path = procRegex.ReplaceAllString(path, "/proc/"+dynamicpathdetector.DynamicIdentifier) + } + if savedOpens.Has(path) && savedOpens.Get(path).Contains(flags...) { + continue + } + } + } + b.ReportAllocs() +} diff --git a/pkg/applicationprofilemanager/v1/endpoint_utils.go b/pkg/applicationprofilemanager/v1/endpoint_utils.go new file mode 100644 index 00000000..6acf18e8 --- /dev/null +++ b/pkg/applicationprofilemanager/v1/endpoint_utils.go @@ -0,0 +1,89 @@ +package applicationprofilemanager + +import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "net" + "net/url" + "sort" + "strings" + + "github.com/kubescape/go-logger" + "github.com/kubescape/go-logger/helpers" + tracerhttphelper "github.com/kubescape/node-agent/pkg/ebpf/gadgets/http/tracer" + tracerhttptype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/http/types" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" +) + +func GetNewEndpoint(event *tracerhttptype.Event, identifier string) (*v1beta1.HTTPEndpoint, error) { + headers := tracerhttphelper.ExtractConsistentHeaders(event.Request.Header) + headers["Host"] = []string{event.Request.Host} + rawJSON, err := json.Marshal(headers) + if err != nil { + logger.L().Error("Error marshaling JSON:", helpers.Error(err)) + return nil, err + } + + return &v1beta1.HTTPEndpoint{ + Endpoint: identifier, + Methods: []string{event.Request.Method}, + Internal: event.Internal, + Direction: event.Direction, + Headers: rawJSON}, nil +} + +func (am *ApplicationProfileManager) GetEndpointIdentifier(request *tracerhttptype.Event) (string, error) { + identifier := request.Request.URL.String() + if host := request.Request.Host; host != "" { + + if !isValidHost(host) { + return "", fmt.Errorf("invalid host: %s", host) + } + + _, port, err := net.SplitHostPort(host) + if err != nil { + port = "80" + } + identifier = ":" + port + identifier + } + + return identifier, nil +} + +func CalculateHTTPEndpointHash(endpoint *v1beta1.HTTPEndpoint) string { + hash := sha256.New() + + hash.Write([]byte(endpoint.Endpoint)) + + sortedMethods := make([]string, len(endpoint.Methods)) + copy(sortedMethods, endpoint.Methods) + sort.Strings(sortedMethods) + + hash.Write([]byte(strings.Join(sortedMethods, ","))) + hash.Write(endpoint.Headers) + + hash.Write([]byte(endpoint.Direction)) + + return hex.EncodeToString(hash.Sum(nil)) +} + +func isValidHost(host string) bool { + // Check if the host is empty + if host == "" { + return false + } + + // Check if host contains spaces or invalid characters + if strings.ContainsAny(host, " \t\r\n") { + return false + } + + // Parse the host using http's standard URL parser + if _, err := url.ParseRequestURI("http://" + host); err != nil { + return false + } + + return true +} diff --git a/pkg/cloudmetadata/metadata.go b/pkg/cloudmetadata/metadata.go new file mode 100644 index 00000000..b4569734 --- /dev/null +++ b/pkg/cloudmetadata/metadata.go @@ -0,0 +1,406 @@ +package cloudmetadata + +import ( + "context" + "fmt" + "strings" + + apitypes "github.com/armosec/armoapi-go/armotypes" + "github.com/kubescape/k8s-interface/k8sinterface" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + ProviderAWS = "aws" + ProviderGCP = "gcp" + ProviderAzure = "azure" + ProviderDigitalOcean = "digitalocean" + ProviderOpenStack = "openstack" + ProviderVMware = "vmware" + ProviderAlibaba = "alibaba" + ProviderIBM = "ibm" + ProviderOracle = "oracle" + ProviderLinode = "linode" + ProviderScaleway = "scaleway" + ProviderVultr = "vultr" + ProviderHetzner = "hetzner" + ProviderEquinixMetal = "equinixmetal" // formerly Packet + ProviderExoscale = "exoscale" + ProviderUnknown = "unknown" +) + +// Getapitypes.CloudMetadata retrieves cloud metadata for a given node +func GetCloudMetadata(ctx context.Context, client *k8sinterface.KubernetesApi, nodeName string) (*apitypes.CloudMetadata, error) { + node, err := client.GetKubernetesClient().CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to get node %s: %v", nodeName, err) + } + + metadata := &apitypes.CloudMetadata{ + Hostname: node.Name, + } + + // Determine provider and extract metadata + providerID := node.Spec.ProviderID + switch { + case strings.HasPrefix(providerID, "aws://"): + metadata.Provider = ProviderAWS + metadata = extractAWSMetadata(node, metadata) + case strings.HasPrefix(providerID, "gce://"): + metadata.Provider = ProviderGCP + metadata = extractGCPMetadata(node, metadata) + case strings.HasPrefix(providerID, "azure://"): + metadata.Provider = ProviderAzure + metadata = extractAzureMetadata(node, metadata) + case strings.HasPrefix(providerID, "digitalocean://"): + metadata.Provider = ProviderDigitalOcean + metadata = extractDigitalOceanMetadata(node, metadata) + case strings.HasPrefix(providerID, "openstack://"): + metadata.Provider = ProviderOpenStack + metadata = extractOpenstackMetadata(node, metadata) + case strings.HasPrefix(providerID, "vsphere://"): + metadata.Provider = ProviderVMware + metadata = extractVMwareMetadata(node, metadata) + case strings.HasPrefix(providerID, "alicloud://"): + metadata.Provider = ProviderAlibaba + metadata = extractAlibabaMetadata(node, metadata) + case strings.HasPrefix(providerID, "ibm://"): + metadata.Provider = ProviderIBM + metadata = extractIBMMetadata(node, metadata) + case strings.HasPrefix(providerID, "oci://"): + metadata.Provider = ProviderOracle + metadata = extractOracleMetadata(node, metadata) + case strings.HasPrefix(providerID, "linode://"): + metadata.Provider = ProviderLinode + metadata = extractLinodeMetadata(node, metadata) + case strings.HasPrefix(providerID, "scaleway://"): + metadata.Provider = ProviderScaleway + metadata = extractScalewayMetadata(node, metadata) + case strings.HasPrefix(providerID, "vultr://"): + metadata.Provider = ProviderVultr + metadata = extractVultrMetadata(node, metadata) + case strings.HasPrefix(providerID, "hcloud://"): + metadata.Provider = ProviderHetzner + metadata = extractHetznerMetadata(node, metadata) + case strings.HasPrefix(providerID, "equinixmetal://"): + metadata.Provider = ProviderEquinixMetal + metadata = extractEquinixMetalMetadata(node, metadata) + case strings.HasPrefix(providerID, "exoscale://"): + metadata.Provider = ProviderExoscale + metadata = extractExoscaleMetadata(node, metadata) + default: + metadata.Provider = ProviderUnknown + return nil, fmt.Errorf("unknown cloud provider for node %s: %s", nodeName, providerID) + } + + // Extract common metadata from node status + for _, addr := range node.Status.Addresses { + switch addr.Type { + case "InternalIP": + metadata.PrivateIP = addr.Address + case "ExternalIP": + metadata.PublicIP = addr.Address + case "Hostname": + metadata.Hostname = addr.Address + } + } + + return metadata, nil +} + +func extractAWSMetadata(node *corev1.Node, metadata *apitypes.CloudMetadata) *apitypes.CloudMetadata { + // Extract from labels + metadata.InstanceType = node.Labels["node.kubernetes.io/instance-type"] + metadata.Region = node.Labels["topology.kubernetes.io/region"] + metadata.Zone = node.Labels["topology.kubernetes.io/zone"] + + // Extract instance ID from provider ID + // Format: aws:///us-west-2a/i-1234567890abcdef0 + parts := strings.Split(node.Spec.ProviderID, "/") + if len(parts) > 0 { + metadata.InstanceID = parts[len(parts)-1] + } + + // Extract account ID from annotations if available + if accountID, ok := node.Annotations["eks.amazonaws.com/account-id"]; ok { + metadata.AccountID = accountID + } + + return metadata +} + +func extractGCPMetadata(node *corev1.Node, metadata *apitypes.CloudMetadata) *apitypes.CloudMetadata { + // Extract from labels + metadata.InstanceType = node.Labels["beta.kubernetes.io/instance-type"] + metadata.Region = node.Labels["topology.kubernetes.io/region"] + metadata.Zone = node.Labels["topology.kubernetes.io/zone"] + + // Extract project and instance ID from provider ID + // Format: gce:///project-name/zone/instance-name + parts := strings.Split(node.Spec.ProviderID, "/") + if len(parts) > 3 { + metadata.AccountID = parts[3] // project name + metadata.InstanceID = parts[len(parts)-1] + } + + return metadata +} + +func extractAzureMetadata(node *corev1.Node, metadata *apitypes.CloudMetadata) *apitypes.CloudMetadata { + // Extract from labels + metadata.InstanceType = node.Labels["node.kubernetes.io/instance-type"] + metadata.Region = node.Labels["topology.kubernetes.io/region"] + metadata.Zone = node.Labels["topology.kubernetes.io/zone"] + + // Extract subscription ID and resource info from provider ID + // Format: azure:///subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachineScaleSets/ + if parts := strings.Split(node.Spec.ProviderID, "/"); len(parts) > 3 { + for i, part := range parts { + if part == "subscriptions" && i+1 < len(parts) { + metadata.AccountID = parts[i+1] + } + if part == "virtualMachineScaleSets" && i+1 < len(parts) { + metadata.InstanceID = parts[i+1] + } + } + } + + return metadata +} + +func extractDigitalOceanMetadata(node *corev1.Node, metadata *apitypes.CloudMetadata) *apitypes.CloudMetadata { + // Extract from labels + metadata.InstanceType = node.Labels["beta.kubernetes.io/instance-type"] + metadata.Region = node.Labels["topology.kubernetes.io/region"] + metadata.Zone = node.Labels["topology.kubernetes.io/zone"] + + // Extract droplet ID from provider ID + // Format: digitalocean:///droplet-id + parts := strings.Split(node.Spec.ProviderID, "/") + if len(parts) > 0 { + metadata.InstanceID = parts[len(parts)-1] + } + + return metadata +} + +func extractOpenstackMetadata(node *corev1.Node, metadata *apitypes.CloudMetadata) *apitypes.CloudMetadata { + // Extract from labels + metadata.InstanceType = node.Labels["node.kubernetes.io/instance-type"] + metadata.Region = node.Labels["topology.kubernetes.io/region"] + metadata.Zone = node.Labels["topology.kubernetes.io/zone"] + + // Extract instance ID from provider ID + // Format: openstack:///instance-id + parts := strings.Split(node.Spec.ProviderID, "/") + if len(parts) > 0 { + metadata.InstanceID = parts[len(parts)-1] + } + + // Extract project ID if available + if projectID, ok := node.Labels["project.openstack.org/project-id"]; ok { + metadata.AccountID = projectID + } + + return metadata +} + +func extractVMwareMetadata(node *corev1.Node, metadata *apitypes.CloudMetadata) *apitypes.CloudMetadata { + // Extract from labels + metadata.InstanceType = node.Labels["node.kubernetes.io/instance-type"] + metadata.Zone = node.Labels["topology.kubernetes.io/zone"] + + // Extract VM UUID from provider ID + // Format: vsphere:///vm-uuid + parts := strings.Split(node.Spec.ProviderID, "/") + if len(parts) > 0 { + metadata.InstanceID = parts[len(parts)-1] + } + + // Extract datacenter info if available + if dc, ok := node.Labels["vsphere.kubernetes.io/datacenter"]; ok { + metadata.Region = dc + } + + return metadata +} + +func extractAlibabaMetadata(node *corev1.Node, metadata *apitypes.CloudMetadata) *apitypes.CloudMetadata { + // Extract from labels + metadata.InstanceType = node.Labels["node.kubernetes.io/instance-type"] + metadata.Region = node.Labels["topology.kubernetes.io/region"] + metadata.Zone = node.Labels["topology.kubernetes.io/zone"] + + // Extract instance ID from provider ID + // Format: alicloud:///instance-id + parts := strings.Split(node.Spec.ProviderID, "/") + if len(parts) > 0 { + metadata.InstanceID = parts[len(parts)-1] + } + + // Extract account ID if available + if accountID, ok := node.Labels["alibabacloud.com/account-id"]; ok { + metadata.AccountID = accountID + } + + return metadata +} + +func extractIBMMetadata(node *corev1.Node, metadata *apitypes.CloudMetadata) *apitypes.CloudMetadata { + // Extract from labels + metadata.InstanceType = node.Labels["node.kubernetes.io/instance-type"] + metadata.Region = node.Labels["topology.kubernetes.io/region"] + metadata.Zone = node.Labels["topology.kubernetes.io/zone"] + + // Extract instance ID from provider ID + // Format: ibm:///instance-id + parts := strings.Split(node.Spec.ProviderID, "/") + if len(parts) > 0 { + metadata.InstanceID = parts[len(parts)-1] + } + + // Extract account ID if available + if accountID, ok := node.Labels["ibm-cloud.kubernetes.io/account-id"]; ok { + metadata.AccountID = accountID + } + + return metadata +} + +func extractOracleMetadata(node *corev1.Node, metadata *apitypes.CloudMetadata) *apitypes.CloudMetadata { + // Extract from labels + metadata.InstanceType = node.Labels["node.kubernetes.io/instance-type"] + metadata.Region = node.Labels["topology.kubernetes.io/region"] + metadata.Zone = node.Labels["topology.kubernetes.io/zone"] + + // Extract OCID from provider ID + // Format: oci:///ocid + parts := strings.Split(node.Spec.ProviderID, "/") + if len(parts) > 0 { + metadata.InstanceID = parts[len(parts)-1] + } + + // Extract compartment ID if available + if compartmentID, ok := node.Labels["oci.oraclecloud.com/compartment-id"]; ok { + metadata.AccountID = compartmentID + } + + return metadata +} + +func extractLinodeMetadata(node *corev1.Node, metadata *apitypes.CloudMetadata) *apitypes.CloudMetadata { + // Extract from labels + metadata.InstanceType = node.Labels["node.kubernetes.io/instance-type"] + metadata.Region = node.Labels["topology.kubernetes.io/region"] + metadata.Zone = node.Labels["topology.kubernetes.io/zone"] + + // Extract Linode ID from provider ID + // Format: linode:///linode-id + parts := strings.Split(node.Spec.ProviderID, "/") + if len(parts) > 0 { + metadata.InstanceID = parts[len(parts)-1] + } + + return metadata +} + +func extractScalewayMetadata(node *corev1.Node, metadata *apitypes.CloudMetadata) *apitypes.CloudMetadata { + // Extract from labels + metadata.InstanceType = node.Labels["node.kubernetes.io/instance-type"] + metadata.Region = node.Labels["topology.kubernetes.io/region"] + metadata.Zone = node.Labels["topology.kubernetes.io/zone"] + + // Extract instance ID from provider ID + // Format: scaleway:///instance-id + parts := strings.Split(node.Spec.ProviderID, "/") + if len(parts) > 0 { + metadata.InstanceID = parts[len(parts)-1] + } + + // Extract organization ID if available + if orgID, ok := node.Labels["scaleway.com/organization-id"]; ok { + metadata.AccountID = orgID + } + + return metadata +} + +func extractVultrMetadata(node *corev1.Node, metadata *apitypes.CloudMetadata) *apitypes.CloudMetadata { + // Extract from labels + metadata.InstanceType = node.Labels["node.kubernetes.io/instance-type"] + metadata.Region = node.Labels["topology.kubernetes.io/region"] + metadata.Zone = node.Labels["topology.kubernetes.io/zone"] + + // Extract instance ID from provider ID + // Format: vultr:///instance-id + parts := strings.Split(node.Spec.ProviderID, "/") + if len(parts) > 0 { + metadata.InstanceID = parts[len(parts)-1] + } + + return metadata +} + +func extractHetznerMetadata(node *corev1.Node, metadata *apitypes.CloudMetadata) *apitypes.CloudMetadata { + // Extract from labels + metadata.InstanceType = node.Labels["node.kubernetes.io/instance-type"] + metadata.Region = node.Labels["topology.kubernetes.io/region"] + metadata.Zone = node.Labels["topology.kubernetes.io/zone"] + + // Extract server ID from provider ID + // Format: hcloud:///server-id + parts := strings.Split(node.Spec.ProviderID, "/") + if len(parts) > 0 { + metadata.InstanceID = parts[len(parts)-1] + } + + // Extract project ID if available + if projectID, ok := node.Labels["hcloud.hetzner.cloud/project-id"]; ok { + metadata.AccountID = projectID + } + + return metadata +} + +func extractEquinixMetalMetadata(node *corev1.Node, metadata *apitypes.CloudMetadata) *apitypes.CloudMetadata { + // Extract from labels + metadata.InstanceType = node.Labels["node.kubernetes.io/instance-type"] + metadata.Region = node.Labels["topology.kubernetes.io/region"] + metadata.Zone = node.Labels["topology.kubernetes.io/zone"] + + // Extract device ID from provider ID + // Format: equinixmetal:///device-id + parts := strings.Split(node.Spec.ProviderID, "/") + if len(parts) > 0 { + metadata.InstanceID = parts[len(parts)-1] + } + + // Extract project ID if available + if projectID, ok := node.Labels["metal.equinix.com/project-id"]; ok { + metadata.AccountID = projectID + } + + return metadata +} + +func extractExoscaleMetadata(node *corev1.Node, metadata *apitypes.CloudMetadata) *apitypes.CloudMetadata { + // Extract from labels + metadata.InstanceType = node.Labels["node.kubernetes.io/instance-type"] + metadata.Region = node.Labels["topology.kubernetes.io/region"] + metadata.Zone = node.Labels["topology.kubernetes.io/zone"] + + // Extract instance ID from provider ID + // Format: exoscale:///instance-id + parts := strings.Split(node.Spec.ProviderID, "/") + if len(parts) > 0 { + metadata.InstanceID = parts[len(parts)-1] + } + + // Extract organization ID if available + if orgID, ok := node.Labels["exoscale.com/organization-id"]; ok { + metadata.AccountID = orgID + } + + return metadata +} diff --git a/pkg/config/config.go b/pkg/config/config.go index 14ea7752..6902dfc0 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -17,11 +17,14 @@ type Config struct { InitialDelay time.Duration `mapstructure:"initialDelay"` MaxSniffingTime time.Duration `mapstructure:"maxSniffingTimePerContainer"` UpdateDataPeriod time.Duration `mapstructure:"updateDataPeriod"` + MaxDelaySeconds int `mapstructure:"maxDelaySeconds"` + MaxJitterPercentage int `mapstructure:"maxJitterPercentage"` EnableFullPathTracing bool `mapstructure:"fullPathTracingEnabled"` EnableApplicationProfile bool `mapstructure:"applicationProfileServiceEnabled"` EnableMalwareDetection bool `mapstructure:"malwareDetectionEnabled"` EnablePrometheusExporter bool `mapstructure:"prometheusExporterEnabled"` EnableRuntimeDetection bool `mapstructure:"runtimeDetectionEnabled"` + EnableHttpDetection bool `mapstructure:"httpDetectionEnabled"` EnableNetworkTracing bool `mapstructure:"networkServiceEnabled"` EnableRelevancy bool `mapstructure:"relevantCVEServiceEnabled"` EnableNodeProfile bool `mapstructure:"nodeProfileServiceEnabled"` @@ -40,6 +43,8 @@ func LoadConfig(path string) (Config, error) { viper.SetDefault("fullPathTracingEnabled", true) viper.SetDefault("initialDelay", 2*time.Minute) viper.SetDefault("nodeProfileInterval", 10*time.Minute) + viper.SetDefault("maxDelaySeconds", 30) + viper.SetDefault("maxJitterPercentage", 5) viper.AutomaticEnv() diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index a800cf09..d812a4a8 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -26,10 +26,13 @@ func TestLoadConfig(t *testing.T) { EnableRelevancy: true, EnableNetworkTracing: true, EnableNodeProfile: true, + EnableHttpDetection: true, InitialDelay: 2 * time.Minute, MaxSniffingTime: 6 * time.Hour, UpdateDataPeriod: 1 * time.Minute, NodeProfileInterval: 1 * time.Minute, + MaxDelaySeconds: 30, + MaxJitterPercentage: 5, EnablePrometheusExporter: true, EnableRuntimeDetection: true, EnableSeccomp: true, diff --git a/pkg/containerwatcher/container_watcher_interface.go b/pkg/containerwatcher/container_watcher_interface.go index c63b5754..bc9ce8e5 100644 --- a/pkg/containerwatcher/container_watcher_interface.go +++ b/pkg/containerwatcher/container_watcher_interface.go @@ -2,10 +2,37 @@ package containerwatcher import ( "context" + + containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" + "github.com/inspektor-gadget/inspektor-gadget/pkg/socketenricher" + tracercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/tracer-collection" + "github.com/kubescape/node-agent/pkg/utils" ) type ContainerWatcher interface { Ready() bool Start(ctx context.Context) error Stop() + GetTracerCollection() *tracercollection.TracerCollection + GetContainerCollection() *containercollection.ContainerCollection + GetSocketEnricher() *socketenricher.SocketEnricher + GetContainerSelector() *containercollection.ContainerSelector + RegisterCustomTracer(tracer CustomTracer) error + UnregisterCustomTracer(tracer CustomTracer) error + RegisterContainerReceiver(receiver ContainerReceiver) + UnregisterContainerReceiver(receiver ContainerReceiver) +} + +type CustomTracer interface { + Start() error + Stop() error + Name() string +} + +type EventReceiver interface { + ReportEvent(eventType utils.EventType, event utils.K8sEvent) +} + +type ContainerReceiver interface { + ContainerCallback(notif containercollection.PubSubEvent) } diff --git a/pkg/containerwatcher/container_watcher_mock.go b/pkg/containerwatcher/container_watcher_mock.go index c043231d..66989e11 100644 --- a/pkg/containerwatcher/container_watcher_mock.go +++ b/pkg/containerwatcher/container_watcher_mock.go @@ -2,6 +2,10 @@ package containerwatcher import ( "context" + + containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" + "github.com/inspektor-gadget/inspektor-gadget/pkg/socketenricher" + tracercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/tracer-collection" ) type ContainerWatcherMock struct{} @@ -16,4 +20,48 @@ func (c ContainerWatcherMock) Start(_ context.Context) error { func (c ContainerWatcherMock) Stop() {} +func (c ContainerWatcherMock) RegisterCustomTracer(_ CustomTracer) error { + return nil +} + +func (c ContainerWatcherMock) UnregisterCustomTracer(_ CustomTracer) error { + return nil +} + +func (c ContainerWatcherMock) RegisterContainerReceiver(_ ContainerReceiver) {} + +func (c ContainerWatcherMock) UnregisterContainerReceiver(_ ContainerReceiver) {} + +func (c ContainerWatcherMock) GetTracerCollection() *tracercollection.TracerCollection { + return nil +} + +func (c ContainerWatcherMock) GetContainerCollection() *containercollection.ContainerCollection { + return nil +} + +func (c ContainerWatcherMock) GetSocketEnricher() *socketenricher.SocketEnricher { + return nil +} + +func (c ContainerWatcherMock) GetContainerSelector() *containercollection.ContainerSelector { + return nil +} + var _ ContainerWatcher = (*ContainerWatcherMock)(nil) + +type CustomTracerMock struct{} + +func (c CustomTracerMock) Start() error { + return nil +} + +func (c CustomTracerMock) Stop() error { + return nil +} + +func (c CustomTracerMock) Name() string { + return "" +} + +var _ CustomTracer = (*CustomTracerMock)(nil) diff --git a/pkg/containerwatcher/v1/container_watcher.go b/pkg/containerwatcher/v1/container_watcher.go index 8146ea18..4e1af499 100644 --- a/pkg/containerwatcher/v1/container_watcher.go +++ b/pkg/containerwatcher/v1/container_watcher.go @@ -6,6 +6,7 @@ import ( "os" mapset "github.com/deckarep/golang-set/v2" + "github.com/goradd/maps" containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" containerutilsTypes "github.com/inspektor-gadget/inspektor-gadget/pkg/container-utils/types" tracerseccomp "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/advise/seccomp/tracer" @@ -31,12 +32,18 @@ import ( "github.com/kubescape/node-agent/pkg/dnsmanager" tracerhardlink "github.com/kubescape/node-agent/pkg/ebpf/gadgets/hardlink/tracer" tracerhardlinktype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/hardlink/types" + tracerhttp "github.com/kubescape/node-agent/pkg/ebpf/gadgets/http/tracer" + tracerhttptype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/http/types" + tracerptrace "github.com/kubescape/node-agent/pkg/ebpf/gadgets/ptrace/tracer" + tracerptracetype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/ptrace/tracer/types" tracerandomx "github.com/kubescape/node-agent/pkg/ebpf/gadgets/randomx/tracer" tracerandomxtype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/randomx/types" tracerssh "github.com/kubescape/node-agent/pkg/ebpf/gadgets/ssh/tracer" tracersshtype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/ssh/types" tracersymlink "github.com/kubescape/node-agent/pkg/ebpf/gadgets/symlink/tracer" tracersymlinktype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/symlink/types" + "github.com/kubescape/node-agent/pkg/processmanager" + "github.com/kubescape/node-agent/pkg/malwaremanager" "github.com/kubescape/node-agent/pkg/metricsmanager" "github.com/kubescape/node-agent/pkg/networkmanager" @@ -53,19 +60,23 @@ const ( networkTraceName = "trace_network" dnsTraceName = "trace_dns" openTraceName = "trace_open" + ptraceTraceName = "trace_ptrace" randomxTraceName = "trace_randomx" symlinkTraceName = "trace_symlink" hardlinkTraceName = "trace_hardlink" sshTraceName = "trace_ssh" + httpTraceName = "trace_http" capabilitiesWorkerPoolSize = 1 execWorkerPoolSize = 2 openWorkerPoolSize = 8 + ptraceWorkerPoolSize = 1 networkWorkerPoolSize = 1 dnsWorkerPoolSize = 5 randomxWorkerPoolSize = 1 symlinkWorkerPoolSize = 1 hardlinkWorkerPoolSize = 1 sshWorkerPoolSize = 1 + httpWorkerPoolSize = 4 ) type IGContainerWatcher struct { @@ -94,6 +105,7 @@ type IGContainerWatcher struct { capabilitiesTracer *tracercapabilities.Tracer execTracer *tracerexec.Tracer openTracer *traceropen.Tracer + ptraceTracer *tracerptrace.Tracer syscallTracer *tracerseccomp.Tracer networkTracer *tracernetwork.Tracer dnsTracer *tracerdns.Tracer @@ -101,46 +113,54 @@ type IGContainerWatcher struct { symlinkTracer *tracersymlink.Tracer hardlinkTracer *tracerhardlink.Tracer sshTracer *tracerssh.Tracer + httpTracer *tracerhttp.Tracer kubeIPInstance operators.OperatorInstance kubeNameInstance operators.OperatorInstance + // Third party tracers + thirdPartyTracers mapset.Set[containerwatcher.CustomTracer] + // Third party container receivers + thirdPartyContainerReceivers mapset.Set[containerwatcher.ContainerReceiver] // Worker pools capabilitiesWorkerPool *ants.PoolWithFunc execWorkerPool *ants.PoolWithFunc openWorkerPool *ants.PoolWithFunc + ptraceWorkerPool *ants.PoolWithFunc networkWorkerPool *ants.PoolWithFunc dnsWorkerPool *ants.PoolWithFunc randomxWorkerPool *ants.PoolWithFunc symlinkWorkerPool *ants.PoolWithFunc hardlinkWorkerPool *ants.PoolWithFunc sshdWorkerPool *ants.PoolWithFunc + httpWorkerPool *ants.PoolWithFunc capabilitiesWorkerChan chan *tracercapabilitiestype.Event execWorkerChan chan *tracerexectype.Event openWorkerChan chan *traceropentype.Event + ptraceWorkerChan chan *tracerptracetype.Event networkWorkerChan chan *tracernetworktype.Event dnsWorkerChan chan *tracerdnstype.Event randomxWorkerChan chan *tracerandomxtype.Event symlinkWorkerChan chan *tracersymlinktype.Event hardlinkWorkerChan chan *tracerhardlinktype.Event sshWorkerChan chan *tracersshtype.Event + httpWorkerChan chan *tracerhttptype.Event preRunningContainersIDs mapset.Set[string] - - timeBasedContainers mapset.Set[string] // list of containers to track based on ticker - ruleManagedPods mapset.Set[string] // list of pods to track based on rules - metrics metricsmanager.MetricsManager - + timeBasedContainers mapset.Set[string] // list of containers to track based on ticker + ruleManagedPods mapset.Set[string] // list of pods to track based on rules + metrics metricsmanager.MetricsManager // cache ruleBindingPodNotify *chan rulebinding.RuleBindingNotify - // container runtime runtime *containerutilsTypes.RuntimeConfig + // process manager + processManager processmanager.ProcessManagerClient } var _ containerwatcher.ContainerWatcher = (*IGContainerWatcher)(nil) -func CreateIGContainerWatcher(cfg config.Config, applicationProfileManager applicationprofilemanager.ApplicationProfileManagerClient, k8sClient *k8sinterface.KubernetesApi, relevancyManager relevancymanager.RelevancyManagerClient, networkManagerClient networkmanager.NetworkManagerClient, dnsManagerClient dnsmanager.DNSManagerClient, metrics metricsmanager.MetricsManager, ruleManager rulemanager.RuleManagerClient, malwareManager malwaremanager.MalwareManagerClient, preRunningContainers mapset.Set[string], ruleBindingPodNotify *chan rulebinding.RuleBindingNotify, runtime *containerutilsTypes.RuntimeConfig) (*IGContainerWatcher, error) { +func CreateIGContainerWatcher(cfg config.Config, applicationProfileManager applicationprofilemanager.ApplicationProfileManagerClient, k8sClient *k8sinterface.KubernetesApi, relevancyManager relevancymanager.RelevancyManagerClient, networkManagerClient networkmanager.NetworkManagerClient, dnsManagerClient dnsmanager.DNSManagerClient, metrics metricsmanager.MetricsManager, ruleManager rulemanager.RuleManagerClient, malwareManager malwaremanager.MalwareManagerClient, preRunningContainers mapset.Set[string], ruleBindingPodNotify *chan rulebinding.RuleBindingNotify, runtime *containerutilsTypes.RuntimeConfig, thirdPartyEventReceivers *maps.SafeMap[utils.EventType, mapset.Set[containerwatcher.EventReceiver]], processManager processmanager.ProcessManagerClient) (*IGContainerWatcher, error) { // Use container collection to get notified for new containers containerCollection := &containercollection.ContainerCollection{} // Create a tracer collection instance @@ -158,7 +178,10 @@ func CreateIGContainerWatcher(cfg config.Config, applicationProfileManager appli metrics.ReportEvent(utils.CapabilitiesEventType) k8sContainerID := utils.CreateK8sContainerID(event.K8s.Namespace, event.K8s.PodName, event.K8s.ContainerName) applicationProfileManager.ReportCapability(k8sContainerID, event.CapName) - ruleManager.ReportCapability(event) + ruleManager.ReportEvent(utils.CapabilitiesEventType, &event) + + // Report capabilities to event receivers + reportEventToThirdPartyTracers(utils.CapabilitiesEventType, &event, thirdPartyEventReceivers) }) if err != nil { return nil, fmt.Errorf("creating capabilities worker pool: %w", err) @@ -183,10 +206,14 @@ func CreateIGContainerWatcher(cfg config.Config, applicationProfileManager appli path = event.Args[0] } metrics.ReportEvent(utils.ExecveEventType) + processManager.ReportEvent(utils.ExecveEventType, &event) applicationProfileManager.ReportFileExec(k8sContainerID, path, event.Args) relevancyManager.ReportFileExec(event.Runtime.ContainerID, k8sContainerID, path) - ruleManager.ReportFileExec(event) - malwareManager.ReportFileExec(k8sContainerID, event) + ruleManager.ReportEvent(utils.ExecveEventType, &event) + malwareManager.ReportEvent(utils.ExecveEventType, &event) + + // Report exec events to event receivers + reportEventToThirdPartyTracers(utils.ExecveEventType, &event, thirdPartyEventReceivers) }) if err != nil { return nil, fmt.Errorf("creating exec worker pool: %w", err) @@ -213,8 +240,11 @@ func CreateIGContainerWatcher(cfg config.Config, applicationProfileManager appli metrics.ReportEvent(utils.OpenEventType) applicationProfileManager.ReportFileOpen(k8sContainerID, path, event.Flags) relevancyManager.ReportFileOpen(event.Runtime.ContainerID, k8sContainerID, path) - ruleManager.ReportFileOpen(event) - malwareManager.ReportFileOpen(k8sContainerID, event) + ruleManager.ReportEvent(utils.OpenEventType, &event) + malwareManager.ReportEvent(utils.OpenEventType, &event) + + // Report open events to event receivers + reportEventToThirdPartyTracers(utils.OpenEventType, &event, thirdPartyEventReceivers) }) if err != nil { return nil, fmt.Errorf("creating open worker pool: %w", err) @@ -234,7 +264,10 @@ func CreateIGContainerWatcher(cfg config.Config, applicationProfileManager appli } metrics.ReportEvent(utils.NetworkEventType) networkManagerClient.ReportNetworkEvent(k8sContainerID, event) - ruleManager.ReportNetworkEvent(event) + ruleManager.ReportEvent(utils.NetworkEventType, &event) + + // Report network events to event receivers + reportEventToThirdPartyTracers(utils.NetworkEventType, &event, thirdPartyEventReceivers) }) if err != nil { return nil, fmt.Errorf("creating network worker pool: %w", err) @@ -243,6 +276,10 @@ func CreateIGContainerWatcher(cfg config.Config, applicationProfileManager appli dnsWorkerPool, err := ants.NewPoolWithFunc(dnsWorkerPoolSize, func(i interface{}) { event := i.(tracerdnstype.Event) + if event.K8s.ContainerName == "" { + return + } + // ignore DNS events that are not responses if event.Qr != tracerdnstype.DNSPktTypeResponse { return @@ -255,7 +292,10 @@ func CreateIGContainerWatcher(cfg config.Config, applicationProfileManager appli metrics.ReportEvent(utils.DnsEventType) dnsManagerClient.ReportDNSEvent(event) - ruleManager.ReportDNSEvent(event) + ruleManager.ReportEvent(utils.DnsEventType, &event) + + // Report DNS events to event receivers + reportEventToThirdPartyTracers(utils.DnsEventType, &event, thirdPartyEventReceivers) }) if err != nil { return nil, fmt.Errorf("creating dns worker pool: %w", err) @@ -267,7 +307,10 @@ func CreateIGContainerWatcher(cfg config.Config, applicationProfileManager appli return } metrics.ReportEvent(utils.RandomXEventType) - ruleManager.ReportRandomxEvent(event) + ruleManager.ReportEvent(utils.RandomXEventType, &event) + + // Report randomx events to event receivers + reportEventToThirdPartyTracers(utils.RandomXEventType, &event, thirdPartyEventReceivers) }) if err != nil { return nil, fmt.Errorf("creating randomx worker pool: %w", err) @@ -279,7 +322,10 @@ func CreateIGContainerWatcher(cfg config.Config, applicationProfileManager appli return } metrics.ReportEvent(utils.SymlinkEventType) - ruleManager.ReportSymlinkEvent(event) + ruleManager.ReportEvent(utils.SymlinkEventType, &event) + + // Report symlink events to event receivers + reportEventToThirdPartyTracers(utils.SymlinkEventType, &event, thirdPartyEventReceivers) }) if err != nil { return nil, fmt.Errorf("creating symlink worker pool: %w", err) @@ -291,7 +337,10 @@ func CreateIGContainerWatcher(cfg config.Config, applicationProfileManager appli return } metrics.ReportEvent(utils.HardlinkEventType) - ruleManager.ReportHardlinkEvent(event) + ruleManager.ReportEvent(utils.HardlinkEventType, &event) + + // Report hardlink events to event receivers + reportEventToThirdPartyTracers(utils.HardlinkEventType, &event, thirdPartyEventReceivers) }) if err != nil { return nil, fmt.Errorf("creating hardlink worker pool: %w", err) @@ -303,12 +352,53 @@ func CreateIGContainerWatcher(cfg config.Config, applicationProfileManager appli return } metrics.ReportEvent(utils.SSHEventType) - ruleManager.ReportSSHEvent(event) + ruleManager.ReportEvent(utils.SSHEventType, &event) + + // Report ssh events to event receivers + reportEventToThirdPartyTracers(utils.SSHEventType, &event, thirdPartyEventReceivers) }) if err != nil { return nil, fmt.Errorf("creating ssh worker pool: %w", err) } + // Create a http worker pool + httpWorkerPool, err := ants.NewPoolWithFunc(httpWorkerPoolSize, func(i interface{}) { + event := i.(tracerhttptype.Event) + // ignore events with empty container name + if event.K8s.ContainerName == "" { + return + } + + k8sContainerID := utils.CreateK8sContainerID(event.K8s.Namespace, event.K8s.PodName, event.K8s.ContainerName) + + if isDroppedEvent(event.Type, event.Message) { + applicationProfileManager.ReportDroppedEvent(k8sContainerID) + return + } + + metrics.ReportEvent(utils.HTTPEventType) + applicationProfileManager.ReportHTTPEvent(k8sContainerID, &event) + + reportEventToThirdPartyTracers(utils.HTTPEventType, &event, thirdPartyEventReceivers) + }) + + if err != nil { + return nil, fmt.Errorf("creating http worker pool: %w", err) + } + + // Create a ptrace worker pool + ptraceWorkerPool, err := ants.NewPoolWithFunc(ptraceWorkerPoolSize, func(i interface{}) { + event := i.(tracerptracetype.Event) + if event.K8s.ContainerName == "" { + return + } + ruleManager.ReportEvent(utils.PtraceEventType, &event) + }) + + if err != nil { + return nil, fmt.Errorf("creating ptrace worker pool: %w", err) + } + return &IGContainerWatcher{ // Configuration cfg: cfg, @@ -338,6 +428,8 @@ func CreateIGContainerWatcher(cfg config.Config, applicationProfileManager appli symlinkWorkerPool: symlinkWorkerPool, hardlinkWorkerPool: hardlinkWorkerPool, sshdWorkerPool: sshWorkerPool, + httpWorkerPool: httpWorkerPool, + ptraceWorkerPool: ptraceWorkerPool, metrics: metrics, preRunningContainersIDs: preRunningContainers, @@ -345,30 +437,78 @@ func CreateIGContainerWatcher(cfg config.Config, applicationProfileManager appli capabilitiesWorkerChan: make(chan *tracercapabilitiestype.Event, 1000), execWorkerChan: make(chan *tracerexectype.Event, 10000), openWorkerChan: make(chan *traceropentype.Event, 500000), + ptraceWorkerChan: make(chan *tracerptracetype.Event, 1000), networkWorkerChan: make(chan *tracernetworktype.Event, 500000), dnsWorkerChan: make(chan *tracerdnstype.Event, 100000), randomxWorkerChan: make(chan *tracerandomxtype.Event, 5000), symlinkWorkerChan: make(chan *tracersymlinktype.Event, 1000), hardlinkWorkerChan: make(chan *tracerhardlinktype.Event, 1000), sshWorkerChan: make(chan *tracersshtype.Event, 1000), + httpWorkerChan: make(chan *tracerhttptype.Event, 500000), // cache - ruleBindingPodNotify: ruleBindingPodNotify, + ruleBindingPodNotify: ruleBindingPodNotify, + timeBasedContainers: mapset.NewSet[string](), + ruleManagedPods: mapset.NewSet[string](), + runtime: runtime, + thirdPartyTracers: mapset.NewSet[containerwatcher.CustomTracer](), + thirdPartyContainerReceivers: mapset.NewSet[containerwatcher.ContainerReceiver](), + processManager: processManager, + }, nil +} - timeBasedContainers: mapset.NewSet[string](), - ruleManagedPods: mapset.NewSet[string](), +func (ch *IGContainerWatcher) GetContainerCollection() *containercollection.ContainerCollection { + return ch.containerCollection +} - runtime: runtime, - }, nil +func (ch *IGContainerWatcher) GetTracerCollection() *tracercollection.TracerCollection { + return ch.tracerCollection +} + +func (ch *IGContainerWatcher) GetSocketEnricher() *socketenricher.SocketEnricher { + return ch.socketEnricher +} + +func (ch *IGContainerWatcher) GetContainerSelector() *containercollection.ContainerSelector { + return &ch.containerSelector +} + +func (ch *IGContainerWatcher) RegisterCustomTracer(tracer containerwatcher.CustomTracer) error { + for t := range ch.thirdPartyTracers.Iter() { + if t.Name() == tracer.Name() { + return fmt.Errorf("tracer with name %s already registered", tracer.Name()) + } + } + + ch.thirdPartyTracers.Add(tracer) + return nil +} + +func (ch *IGContainerWatcher) UnregisterCustomTracer(tracer containerwatcher.CustomTracer) error { + ch.thirdPartyTracers.Remove(tracer) + return nil +} + +func (ch *IGContainerWatcher) RegisterContainerReceiver(receiver containerwatcher.ContainerReceiver) { + ch.thirdPartyContainerReceivers.Add(receiver) +} + +func (ch *IGContainerWatcher) UnregisterContainerReceiver(receiver containerwatcher.ContainerReceiver) { + ch.thirdPartyContainerReceivers.Remove(receiver) } func (ch *IGContainerWatcher) Start(ctx context.Context) error { if !ch.running { - if err := ch.startContainerCollection(ctx); err != nil { return fmt.Errorf("setting up container collection: %w", err) } + // We want to populate the initial processes before starting the tracers but after retrieving the shims. + if err := ch.processManager.PopulateInitialProcesses(); err != nil { + ch.stopContainerCollection() + return fmt.Errorf("populating initial processes: %w", err) + } + if err := ch.startTracers(); err != nil { ch.stopContainerCollection() return fmt.Errorf("starting app behavior tracing: %w", err) @@ -394,3 +534,11 @@ func (ch *IGContainerWatcher) Stop() { func (ch *IGContainerWatcher) Ready() bool { return ch.running } + +func reportEventToThirdPartyTracers(eventType utils.EventType, event utils.K8sEvent, thirdPartyEventReceivers *maps.SafeMap[utils.EventType, mapset.Set[containerwatcher.EventReceiver]]) { + if thirdPartyEventReceivers != nil && thirdPartyEventReceivers.Has(eventType) { + for receiver := range thirdPartyEventReceivers.Get(eventType).Iter() { + receiver.ReportEvent(eventType, event) + } + } +} diff --git a/pkg/containerwatcher/v1/container_watcher_private.go b/pkg/containerwatcher/v1/container_watcher_private.go index 0a50adde..55e6451d 100644 --- a/pkg/containerwatcher/v1/container_watcher_private.go +++ b/pkg/containerwatcher/v1/container_watcher_private.go @@ -34,17 +34,16 @@ func (ch *IGContainerWatcher) containerCallback(notif containercollection.PubSub k8sContainerID := utils.CreateK8sContainerID(notif.Container.K8s.Namespace, notif.Container.K8s.PodName, notif.Container.K8s.ContainerName) - if !ch.preRunningContainersIDs.Contains(notif.Container.Runtime.ContainerID) { - // container is not in preRunningContainersIDs, it is a new container - ch.timeBasedContainers.Add(notif.Container.Runtime.ContainerID) - } - switch notif.Type { case containercollection.EventTypeAddContainer: logger.L().Info("start monitor on container", helpers.String("container ID", notif.Container.Runtime.ContainerID), helpers.String("k8s workload", k8sContainerID)) - + if ch.running { + ch.timeBasedContainers.Add(notif.Container.Runtime.ContainerID) + } else { + ch.preRunningContainersIDs.Add(notif.Container.Runtime.ContainerID) + } // Check if Pod has a label of max sniffing time - sniffingTime := ch.cfg.MaxSniffingTime + sniffingTime := utils.AddJitter(ch.cfg.MaxSniffingTime, ch.cfg.MaxJitterPercentage) if podLabelMaxSniffingTime, ok := notif.Container.K8s.PodLabels[MaxSniffingTimeLabel]; ok { if duration, err := time.ParseDuration(podLabelMaxSniffingTime); err == nil { sniffingTime = duration @@ -73,6 +72,12 @@ func (ch *IGContainerWatcher) containerCallback(notif containercollection.PubSub func (ch *IGContainerWatcher) startContainerCollection(ctx context.Context) error { ch.ctx = ctx + // This is needed when not running as gadget. + // https://github.com/inspektor-gadget/inspektor-gadget/blob/9a797dc046f8bc1f45e85f15db7e99dd4e5cb6e5/cmd/ig/containers/containers.go#L45-L46 + if err := host.Init(host.Config{AutoMountFilesystems: true}); err != nil { + return fmt.Errorf("initializing host package: %w", err) + } + // Start the container collection containerEventFuncs := []containercollection.FuncNotify{ ch.containerCallback, @@ -81,6 +86,11 @@ func (ch *IGContainerWatcher) startContainerCollection(ctx context.Context) erro ch.networkManager.ContainerCallback, ch.malwareManager.ContainerCallback, ch.ruleManager.ContainerCallback, + ch.processManager.ContainerCallback, + } + + for receiver := range ch.thirdPartyContainerReceivers.Iter() { + containerEventFuncs = append(containerEventFuncs, receiver.ContainerCallback) } // Define the different options for the container collection instance @@ -120,7 +130,7 @@ func (ch *IGContainerWatcher) startContainerCollection(ctx context.Context) erro return nil } -func (ch *IGContainerWatcher) startRunningContainers() error { +func (ch *IGContainerWatcher) startRunningContainers() { k8sClient, err := containercollection.NewK8sClient(ch.nodeName) if err != nil { logger.L().Fatal("creating IG Kubernetes client", helpers.Error(err)) @@ -129,7 +139,6 @@ func (ch *IGContainerWatcher) startRunningContainers() error { for n := range *ch.ruleBindingPodNotify { ch.addRunningContainers(k8sClient, &n) } - return nil } func (ch *IGContainerWatcher) addRunningContainers(k8sClient IGK8sClient, notf *rulebindingmanager.RuleBindingNotify) { @@ -192,11 +201,7 @@ func (ch *IGContainerWatcher) startTracers() error { logger.L().Error("error starting seccomp tracing", helpers.Error(err)) return err } - // Start capabilities tracer - if err := ch.startCapabilitiesTracing(); err != nil { - logger.L().Error("error starting capabilities tracing", helpers.Error(err)) - return err - } + logger.L().Info("Started syscall tracing") } if ch.cfg.EnableRelevancy || ch.cfg.EnableApplicationProfile { // Start exec tracer @@ -204,16 +209,16 @@ func (ch *IGContainerWatcher) startTracers() error { logger.L().Error("error starting exec tracing", helpers.Error(err)) return err } + logger.L().Info("Started exec tracing") // Start open tracer if err := ch.startOpenTracing(); err != nil { logger.L().Error("error starting open tracing", helpers.Error(err)) return err } + logger.L().Info("Started open tracing") } if ch.cfg.EnableNetworkTracing { - host.Init(host.Config{AutoMountFilesystems: true}) - if err := ch.startKubernetesResolution(); err != nil { logger.L().Error("error starting kubernetes resolution", helpers.Error(err)) return err @@ -230,20 +235,29 @@ func (ch *IGContainerWatcher) startTracers() error { // not failing on dns tracing error logger.L().Error("error starting dns tracing", helpers.Error(err)) } + logger.L().Info("Started dns tracing") if err := ch.startNetworkTracing(); err != nil { logger.L().Error("error starting network tracing", helpers.Error(err)) return err } + logger.L().Info("Started network tracing") } if ch.cfg.EnableRuntimeDetection { + // Start capabilities tracer + if err := ch.startCapabilitiesTracing(); err != nil { + logger.L().Error("error starting capabilities tracing", helpers.Error(err)) + return err + } + logger.L().Info("Started capabilities tracing") // The randomx tracing is only supported on amd64 architecture. if runtime.GOARCH == "amd64" { if err := ch.startRandomxTracing(); err != nil { logger.L().Error("error starting randomx tracing", helpers.Error(err)) return err } + logger.L().Info("Started randomx tracing") } else { logger.L().Warning("randomx tracing is not supported on this architecture", helpers.String("architecture", runtime.GOARCH)) } @@ -252,17 +266,43 @@ func (ch *IGContainerWatcher) startTracers() error { logger.L().Error("error starting symlink tracing", helpers.Error(err)) return err } + logger.L().Info("Started symlink tracing") if err := ch.startHardlinkTracing(); err != nil { logger.L().Error("error starting hardlink tracing", helpers.Error(err)) return err } + logger.L().Info("Started hardlink tracing") // NOTE: SSH tracing relies on the network tracer, so it must be started after the network tracer. if err := ch.startSshTracing(); err != nil { logger.L().Error("error starting ssh tracing", helpers.Error(err)) return err } + logger.L().Info("Started ssh tracing") + + if err := ch.startPtraceTracing(); err != nil { + logger.L().Error("error starting ptrace tracing", helpers.Error(err)) + return err + } + logger.L().Info("Started ptrace tracing") + + // Start third party tracers + for tracer := range ch.thirdPartyTracers.Iter() { + if err := tracer.Start(); err != nil { + logger.L().Error("error starting custom tracer", helpers.String("tracer", tracer.Name()), helpers.Error(err)) + return err + } + logger.L().Info("Started custom tracer", helpers.String("tracer", tracer.Name())) + } + } + + if ch.cfg.EnableHttpDetection { + if err := ch.startHttpTracing(); err != nil { + logger.L().Error("error starting http tracing", helpers.Error(err)) + return err + } + logger.L().Info("Started http tracing") } return nil @@ -270,7 +310,7 @@ func (ch *IGContainerWatcher) startTracers() error { func (ch *IGContainerWatcher) stopTracers() error { var errs error - if ch.cfg.EnableApplicationProfile { + if ch.cfg.EnableApplicationProfile || ch.cfg.EnableRuntimeDetection { // Stop capabilities tracer if err := ch.stopCapabilitiesTracing(); err != nil { logger.L().Error("error stopping capabilities tracing", helpers.Error(err)) @@ -282,7 +322,7 @@ func (ch *IGContainerWatcher) stopTracers() error { errs = errors.Join(errs, err) } } - if ch.cfg.EnableRelevancy || ch.cfg.EnableApplicationProfile { + if ch.cfg.EnableRelevancy || ch.cfg.EnableApplicationProfile || ch.cfg.EnableRuntimeDetection { // Stop exec tracer if err := ch.stopExecTracing(); err != nil { logger.L().Error("error stopping exec tracing", helpers.Error(err)) @@ -295,7 +335,7 @@ func (ch *IGContainerWatcher) stopTracers() error { } } - if ch.cfg.EnableNetworkTracing { + if ch.cfg.EnableNetworkTracing || ch.cfg.EnableRuntimeDetection { // Stop network tracer if err := ch.stopNetworkTracing(); err != nil { logger.L().Error("error stopping network tracing", helpers.Error(err)) @@ -331,11 +371,32 @@ func (ch *IGContainerWatcher) stopTracers() error { // Stop ssh tracer if err := ch.stopSshTracing(); err != nil { - logger.L().Error("error stopping ssh tracing", helpers.Error(err)) + logger.L().Error("error starting ssh tracing", helpers.Error(err)) + errs = errors.Join(errs, err) + } + + // Stop ptrace tracer + if err := ch.stopPtraceTracing(); err != nil { + logger.L().Error("error starting ptrace tracing", helpers.Error(err)) errs = errors.Join(errs, err) } + + // Stop third party tracers + for tracer := range ch.thirdPartyTracers.Iter() { + if err := tracer.Stop(); err != nil { + logger.L().Error("error stopping custom tracer", helpers.String("tracer", tracer.Name()), helpers.Error(err)) + errs = errors.Join(errs, err) + } + } } + if ch.cfg.EnableHttpDetection { + // Stop http tracer + if err := ch.stopHttpTracing(); err != nil { + logger.L().Error("error stopping http tracing", helpers.Error(err)) + errs = errors.Join(errs, err) + } + } return errs } @@ -377,8 +438,8 @@ func (ch *IGContainerWatcher) unregisterContainer(container *containercollection } func (ch *IGContainerWatcher) ignoreContainer(namespace, name string) bool { - // do not trace the node-agent pod - if name == ch.podName && namespace == ch.namespace { + // do not trace any of our pods + if namespace == ch.namespace { return true } // do not trace the node-agent pods if MULTIPLY is set diff --git a/pkg/containerwatcher/v1/dns.go b/pkg/containerwatcher/v1/dns.go index d2bb2bd7..515286f3 100644 --- a/pkg/containerwatcher/v1/dns.go +++ b/pkg/containerwatcher/v1/dns.go @@ -32,7 +32,7 @@ func (ch *IGContainerWatcher) startDNSTracing() error { return fmt.Errorf("adding tracer: %w", err) } - tracerDns, err := tracerdns.NewTracer() + tracerDns, err := tracerdns.NewTracer(&tracerdns.Config{GetPaths: true}) if err != nil { return fmt.Errorf("creating tracer: %w", err) } diff --git a/pkg/containerwatcher/v1/http.go b/pkg/containerwatcher/v1/http.go new file mode 100644 index 00000000..2618f876 --- /dev/null +++ b/pkg/containerwatcher/v1/http.go @@ -0,0 +1,68 @@ +package containerwatcher + +import ( + "fmt" + + tracerhttp "github.com/kubescape/node-agent/pkg/ebpf/gadgets/http/tracer" + tracerhttptype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/http/types" + + "github.com/inspektor-gadget/inspektor-gadget/pkg/types" + "github.com/kubescape/go-logger" + "github.com/kubescape/go-logger/helpers" +) + +const ( + StatusOK = 200 + StatusBadRequest = 300 +) + +func (ch *IGContainerWatcher) httpEventCallback(event *tracerhttptype.Event) { + if event.Type == types.DEBUG { + return + } + + if isDroppedEvent(event.Type, event.Message) { + logger.L().Ctx(ch.ctx).Warning("http tracer got drop events - we may miss some realtime data", helpers.Interface("event", event), helpers.String("error", event.Message)) + return + } + + if event.Response == nil || (event.Response.StatusCode < StatusOK || event.Response.StatusCode >= StatusBadRequest) { + return + } + + ch.httpWorkerChan <- event +} + +func (ch *IGContainerWatcher) startHttpTracing() error { + if err := ch.tracerCollection.AddTracer(httpTraceName, ch.containerSelector); err != nil { + return fmt.Errorf("adding tracer: %w", err) + } + + // Get mount namespace map to filter by containers + httpMountnsmap, err := ch.tracerCollection.TracerMountNsMap(httpTraceName) + if err != nil { + return fmt.Errorf("getting httpMountnsmap: %w", err) + } + + tracerHttp, err := tracerhttp.NewTracer(&tracerhttp.Config{MountnsMap: httpMountnsmap}, ch.containerCollection, ch.httpEventCallback) + if err != nil { + return fmt.Errorf("creating tracer: %w", err) + } + + go func() { + for event := range ch.httpWorkerChan { + _ = ch.httpWorkerPool.Invoke(*event) + } + }() + + ch.httpTracer = tracerHttp + return nil +} + +func (ch *IGContainerWatcher) stopHttpTracing() error { + if err := ch.tracerCollection.RemoveTracer(httpTraceName); err != nil { + return fmt.Errorf("removing tracer: %w", err) + } + ch.httpTracer.Close() + return nil +} diff --git a/pkg/containerwatcher/v1/open_test.go b/pkg/containerwatcher/v1/open_test.go index 65d05237..7fb4e084 100644 --- a/pkg/containerwatcher/v1/open_test.go +++ b/pkg/containerwatcher/v1/open_test.go @@ -23,7 +23,7 @@ func BenchmarkIGContainerWatcher_openEventCallback(b *testing.B) { assert.NoError(b, err) mockExporter := metricsmanager.NewMetricsMock() - mainHandler, err := CreateIGContainerWatcher(cfg, nil, nil, relevancyManager, nil, nil, mockExporter, nil, nil, nil, nil, nil) + mainHandler, err := CreateIGContainerWatcher(cfg, nil, nil, relevancyManager, nil, nil, mockExporter, nil, nil, nil, nil, nil, nil, nil) assert.NoError(b, err) event := &traceropentype.Event{ Event: types.Event{ diff --git a/pkg/containerwatcher/v1/ptrace.go b/pkg/containerwatcher/v1/ptrace.go new file mode 100644 index 00000000..286b48e6 --- /dev/null +++ b/pkg/containerwatcher/v1/ptrace.go @@ -0,0 +1,52 @@ +package containerwatcher + +import ( + "fmt" + + "github.com/inspektor-gadget/inspektor-gadget/pkg/types" + tracerptrace "github.com/kubescape/node-agent/pkg/ebpf/gadgets/ptrace/tracer" + tracerptracetype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/ptrace/tracer/types" +) + +func (ch *IGContainerWatcher) ptraceEventCallback(event *tracerptracetype.Event) { + if event.Type != types.NORMAL { + return + } + + ch.ptraceWorkerChan <- event + +} + +func (ch *IGContainerWatcher) startPtraceTracing() error { + if err := ch.tracerCollection.AddTracer(ptraceTraceName, ch.containerSelector); err != nil { + return fmt.Errorf("adding tracer: %w", err) + } + + // Get mount namespace map to filter by containers + ptraceMountnsmap, err := ch.tracerCollection.TracerMountNsMap(ptraceTraceName) + if err != nil { + return fmt.Errorf("getting ptraceMountnsmap: %w", err) + } + + tracerPtrace, err := tracerptrace.NewTracer(&tracerptrace.Config{MountnsMap: ptraceMountnsmap}, ch.containerCollection, ch.ptraceEventCallback) + if err != nil { + return fmt.Errorf("creating tracer: %w", err) + } + go func() { + for event := range ch.ptraceWorkerChan { + _ = ch.ptraceWorkerPool.Invoke(*event) + } + }() + + ch.ptraceTracer = tracerPtrace + + return nil +} + +func (ch *IGContainerWatcher) stopPtraceTracing() error { + if err := ch.tracerCollection.RemoveTracer(ptraceTraceName); err != nil { + return fmt.Errorf("removing tracer: %w", err) + } + ch.ptraceTracer.Close() + return nil +} diff --git a/pkg/dnsmanager/dns_manager.go b/pkg/dnsmanager/dns_manager.go index 3a7d2f49..9234137c 100644 --- a/pkg/dnsmanager/dns_manager.go +++ b/pkg/dnsmanager/dns_manager.go @@ -2,38 +2,89 @@ package dnsmanager import ( "net" + "time" "github.com/goradd/maps" tracerdnstype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/dns/types" + "istio.io/pkg/cache" ) -// DNSManager is used to manage DNS events and save IP resolutions. It exposes an API to resolve IP address to domain name. +// DNSManager is used to manage DNS events and save IP resolutions. type DNSManager struct { - addressToDomainMap maps.SafeMap[string, string] // this map is used to resolve IP address to domain name + addressToDomainMap maps.SafeMap[string, string] + lookupCache cache.ExpiringCache // Cache for DNS lookups + failureCache cache.ExpiringCache // Cache for failed lookups } +type cacheEntry struct { + addresses []string +} + +const ( + defaultPositiveTTL = 1 * time.Minute // Default TTL for successful lookups + defaultNegativeTTL = 5 * time.Second // Default TTL for failed lookups +) + var _ DNSManagerClient = (*DNSManager)(nil) var _ DNSResolver = (*DNSManager)(nil) func CreateDNSManager() *DNSManager { - return &DNSManager{} + return &DNSManager{ + // Create TTL caches with their respective expiration times + lookupCache: cache.NewTTL(defaultPositiveTTL, defaultPositiveTTL), + failureCache: cache.NewTTL(defaultNegativeTTL, defaultNegativeTTL), + } } func (dm *DNSManager) ReportDNSEvent(dnsEvent tracerdnstype.Event) { - + // If we have addresses in the event, use them directly if len(dnsEvent.Addresses) > 0 { for _, address := range dnsEvent.Addresses { dm.addressToDomainMap.Set(address, dnsEvent.DNSName) } - } else { - addresses, err := net.LookupIP(dnsEvent.DNSName) - if err != nil { - return - } - for _, address := range addresses { - dm.addressToDomainMap.Set(address.String(), dnsEvent.DNSName) + + // Update the cache with these known good addresses + dm.lookupCache.Set(dnsEvent.DNSName, cacheEntry{ + addresses: dnsEvent.Addresses, + }) + return + } + + // Check if we've recently failed to look up this domain + if _, found := dm.failureCache.Get(dnsEvent.DNSName); found { + return + } + + // Check if we have a cached result + if cached, found := dm.lookupCache.Get(dnsEvent.DNSName); found { + entry := cached.(cacheEntry) + // Use cached addresses + for _, addr := range entry.addresses { + dm.addressToDomainMap.Set(addr, dnsEvent.DNSName) } + return + } + + // Only perform lookup if we don't have cached results + addresses, err := net.LookupIP(dnsEvent.DNSName) + if err != nil { + // Cache the failure - we just need to store something, using empty struct + dm.failureCache.Set(dnsEvent.DNSName, struct{}{}) + return + } + + // Convert addresses to strings and store them + addrStrings := make([]string, 0, len(addresses)) + for _, addr := range addresses { + addrStr := addr.String() + addrStrings = append(addrStrings, addrStr) + dm.addressToDomainMap.Set(addrStr, dnsEvent.DNSName) } + + // Cache the successful lookup + dm.lookupCache.Set(dnsEvent.DNSName, cacheEntry{ + addresses: addrStrings, + }) } func (dm *DNSManager) ResolveIPAddress(ipAddr string) (string, bool) { diff --git a/pkg/dnsmanager/dns_manager_test.go b/pkg/dnsmanager/dns_manager_test.go index bc8edebc..8fd6e173 100644 --- a/pkg/dnsmanager/dns_manager_test.go +++ b/pkg/dnsmanager/dns_manager_test.go @@ -2,8 +2,11 @@ package dnsmanager import ( "net" + "sync" "testing" + "math/rand/v2" + tracerdnstype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/dns/types" ) @@ -13,6 +16,7 @@ func TestResolveIPAddress(t *testing.T) { dnsEvent tracerdnstype.Event ipAddr string want string + wantOk bool }{ { name: "ip found", @@ -24,7 +28,8 @@ func TestResolveIPAddress(t *testing.T) { "67.225.146.248", }, }, - want: "test.com", + want: "test.com", + wantOk: true, }, { name: "ip not found", @@ -36,57 +41,195 @@ func TestResolveIPAddress(t *testing.T) { "54.23.332.4", }, }, - want: "", + want: "", + wantOk: false, }, { name: "no address", ipAddr: "67.225.146.248", dnsEvent: tracerdnstype.Event{ DNSName: "test.com", - NumAnswers: 0, // will not resolve + NumAnswers: 0, }, - want: "", + want: "", + wantOk: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - dm := &DNSManager{} + dm := CreateDNSManager() + dm.ReportDNSEvent(tt.dnsEvent) - got, _ := dm.ResolveIPAddress(tt.ipAddr) - if got != tt.want { - t.Errorf("ResolveIPAddress() got = %v, want %v", got, tt.want) + got, ok := dm.ResolveIPAddress(tt.ipAddr) + if got != tt.want || ok != tt.wantOk { + t.Errorf("ResolveIPAddress() got = %v, ok = %v, want = %v, wantOk = %v", got, ok, tt.want, tt.wantOk) } }) } } func TestResolveIPAddressFallback(t *testing.T) { + // Skip the test if running in CI or without network access + if testing.Short() { + t.Skip("Skipping test that requires network access") + } + tests := []struct { name string dnsEvent tracerdnstype.Event want string + wantOk bool }{ - { name: "dns resolution fallback", dnsEvent: tracerdnstype.Event{ - DNSName: "test.com", + DNSName: "example.com", // Using example.com as it's guaranteed to exist NumAnswers: 1, }, - want: "test.com", + want: "example.com", + wantOk: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - addresses, _ := net.LookupIP(tt.dnsEvent.DNSName) - dm := &DNSManager{} + dm := CreateDNSManager() + + // Perform the actual DNS lookup + addresses, err := net.LookupIP(tt.dnsEvent.DNSName) + if err != nil { + t.Skipf("DNS lookup failed: %v", err) + return + } + if len(addresses) == 0 { + t.Skip("No addresses returned from DNS lookup") + return + } + dm.ReportDNSEvent(tt.dnsEvent) - got, _ := dm.ResolveIPAddress(addresses[0].String()) - if got != tt.want { - t.Errorf("ResolveIPAddress() got = %v, want %v", got, tt.want) + got, ok := dm.ResolveIPAddress(addresses[0].String()) + if got != tt.want || ok != tt.wantOk { + t.Errorf("ResolveIPAddress() got = %v, ok = %v, want = %v, wantOk = %v", got, ok, tt.want, tt.wantOk) } }) } } + +func TestCacheFallbackBehavior(t *testing.T) { + dm := CreateDNSManager() + + // Test successful DNS lookup caching + event := tracerdnstype.Event{ + DNSName: "test.com", + Addresses: []string{ + "1.2.3.4", + }, + } + dm.ReportDNSEvent(event) + + // Check if the lookup is cached + cached, found := dm.lookupCache.Get(event.DNSName) + if !found { + t.Error("Expected DNS lookup to be cached") + } + + entry, ok := cached.(cacheEntry) + if !ok { + t.Error("Cached entry is not of type cacheEntry") + } + if len(entry.addresses) != 1 || entry.addresses[0] != "1.2.3.4" { + t.Error("Cached addresses do not match expected values") + } + + // Test failed lookup caching + failEvent := tracerdnstype.Event{ + DNSName: "nonexistent.local", + } + dm.ReportDNSEvent(failEvent) + + // Check if the failure is cached + _, found = dm.failureCache.Get(failEvent.DNSName) + if !found { + t.Error("Expected failed DNS lookup to be cached") + } + + // Test cache hit behavior + hitCount := 0 + for i := 0; i < 5; i++ { + if cached, found := dm.lookupCache.Get(event.DNSName); found { + entry := cached.(cacheEntry) + if len(entry.addresses) > 0 { + hitCount++ + } + } + } + if hitCount != 5 { + t.Errorf("Expected 5 cache hits, got %d", hitCount) + } +} + +func TestConcurrentAccess(t *testing.T) { + dm := CreateDNSManager() + const numGoroutines = 100 + const numOperations = 1000 + + // Create a wait group to synchronize goroutines + var wg sync.WaitGroup + wg.Add(numGoroutines) + + // Create some test data + testEvents := []tracerdnstype.Event{ + { + DNSName: "test1.com", + Addresses: []string{"1.1.1.1", "2.2.2.2"}, + }, + { + DNSName: "test2.com", + Addresses: []string{"3.3.3.3", "4.4.4.4"}, + }, + { + DNSName: "test3.com", + Addresses: []string{"5.5.5.5", "6.6.6.6"}, + }, + } + + // Launch multiple goroutines to concurrently access the cache + for i := 0; i < numGoroutines; i++ { + go func() { + defer wg.Done() + + for j := 0; j < numOperations; j++ { + // Randomly choose between writing and reading + if rand.Float32() < 0.5 { + // Write operation + event := testEvents[rand.IntN(len(testEvents))] + dm.ReportDNSEvent(event) + } else { + // Read operation + if cached, found := dm.lookupCache.Get("test1.com"); found { + entry := cached.(cacheEntry) + // Verify the slice hasn't been modified + if len(entry.addresses) != 2 { + t.Errorf("Unexpected number of addresses: %d", len(entry.addresses)) + } + } + } + } + }() + } + + // Wait for all goroutines to complete + wg.Wait() + + // Verify final state + for _, event := range testEvents { + if cached, found := dm.lookupCache.Get(event.DNSName); found { + entry := cached.(cacheEntry) + if len(entry.addresses) != len(event.Addresses) { + t.Errorf("Cache entry for %s has wrong number of addresses: got %d, want %d", + event.DNSName, len(entry.addresses), len(event.Addresses)) + } + } + } +} diff --git a/pkg/ebpf/gadgets/http/tracer/bpf/http-sniffer.c b/pkg/ebpf/gadgets/http/tracer/bpf/http-sniffer.c new file mode 100644 index 00000000..0910768b --- /dev/null +++ b/pkg/ebpf/gadgets/http/tracer/bpf/http-sniffer.c @@ -0,0 +1,615 @@ +#include "sniffer_strcuts.h" +#include +#include +#include + +// Used to send http events to user space +struct { + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); +} events SEC(".maps"); + +// Used to manage pre accept connections from client +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __uint(max_entries, 8192); + __type(key, __u64); + __type(value, struct pre_accept_args); +} pre_accept_args_map SEC(".maps"); + +// Used to manage active http connections to monitor +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __uint(max_entries, 8192); + __type(key, __u64); + __type(value, struct pre_connect_args); +} active_connections_args_map SEC(".maps"); + +// Used to manage active http connections to monitor as server +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __uint(max_entries, 8192); + __type(key, __u64); + __type(value, struct active_connection_info); +} accepted_sockets_map SEC(".maps"); + +// Used to store the buffer of packets +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __uint(max_entries, 8192); + __type(key, __u64); + __type(value, struct packet_buffer); +} buffer_packets SEC(".maps"); + +// Used to store the buffer of messages of messages type +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __uint(max_entries, 8192); + __type(key, __u64); + __type(value, struct packet_msg); +} msg_packets SEC(".maps"); + +// Used to allocate http event +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, struct httpevent); +} event_data SEC(".maps"); + +// Used to allocate string +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, char[PACKET_CHUNK_SIZE]); +} empty_char SEC(".maps"); + +// Declared to avoid compiler deletion +const struct httpevent *unusedevent __attribute__((unused)); + +static __always_inline int should_discard() +{ + u64 mntns_id; + mntns_id = gadget_get_mntns_id(); + + if (gadget_should_discard_mntns_id(mntns_id)) + { + return 1; + } + + return 0; +} + +static __always_inline __u64 generate_unique_connection_id(__u64 pid_tgid, __u32 sockfd) +{ + __u32 pid = pid_tgid >> 32; + return ((__u64)pid << 32) | sockfd; +} + +static __always_inline void get_namespace_ids(u64 *mnt_ns_id) +{ + struct task_struct *task = (struct task_struct *)bpf_get_current_task(); + if (task) + { + struct nsproxy *nsproxy = BPF_CORE_READ(task, nsproxy); + if (nsproxy) + { + struct mnt_namespace *mnt_ns = BPF_CORE_READ(nsproxy, mnt_ns); + if (mnt_ns) + { + *mnt_ns_id = BPF_CORE_READ(mnt_ns, ns.inum); + } + } + } +} + +static __always_inline bool is_msg_peek(__u32 flags) +{ + return flags & MSG_PEEK; +} + +static __always_inline int populate_httpevent(struct httpevent *event) +{ + if (!event) + return -1; + + u64 mnt_ns_id = 0; + + get_namespace_ids(&mnt_ns_id); + event->mntns_id = mnt_ns_id; + + u64 pid_tgid = bpf_get_current_pid_tgid(); + event->pid = pid_tgid >> 32; + + u64 uid_gid = bpf_get_current_uid_gid(); + event->uid = uid_gid & 0xFFFFFFFF; + event->gid = uid_gid >> 32; + event->timestamp = bpf_ktime_get_boot_ns(); + + return 0; +} + +static __always_inline void enrich_ip_port(struct trace_event_raw_sys_exit *ctx, __u32 sockfd, struct httpevent *event) +{ + __u64 id = bpf_get_current_pid_tgid(); + __u64 unique_connection_id = generate_unique_connection_id(id, sockfd); + struct active_connection_info *conn_info = bpf_map_lookup_elem(&accepted_sockets_map, &unique_connection_id); + if (conn_info) + { + event->other_ip = conn_info->addr.sin_addr.s_addr; + event->other_port = bpf_ntohs(conn_info->addr.sin_port); + } +} + +static void inline enter_connect(struct trace_event_raw_sys_enter *ctx) +{ + __u64 id = bpf_get_current_pid_tgid(); + struct pre_connect_args connect_args = {}; + connect_args.sockfd = (int)ctx->args[0]; // socketfd to connect with + bpf_probe_read_user(&connect_args.addr, sizeof(connect_args.addr), (void *)ctx->args[1]); + bpf_map_update_elem(&active_connections_args_map, &id, &connect_args, BPF_ANY); +} + +static void inline exit_connect(struct trace_event_raw_sys_exit *ctx) +{ + __u64 id = bpf_get_current_pid_tgid(); + struct active_connection_info conn_info = {}; + + if (ctx->ret == 0 || ctx->ret == EINPROGRESS) + { + struct pre_connect_args *args = bpf_map_lookup_elem(&active_connections_args_map, &id); + + if (args) + { + __u32 sockfd = (__u32)args->sockfd; // For connect, we stored the sockfd earlier. + __u64 unique_connection_id = generate_unique_connection_id(id, sockfd); + conn_info.sockfd = sockfd; + bpf_probe_read_kernel(&conn_info.addr, sizeof(conn_info.addr), &args->addr); + bpf_map_update_elem(&accepted_sockets_map, &unique_connection_id, &conn_info, BPF_ANY); + } + } + + bpf_map_delete_elem(&active_connections_args_map, &id); +} + +static void inline enter_accept(struct trace_event_raw_sys_enter *ctx) +{ + __u64 id = bpf_get_current_pid_tgid(); + struct pre_accept_args accept_args = {}; + accept_args.addr_ptr = (uint64_t)ctx->args[1]; + bpf_map_update_elem(&pre_accept_args_map, &id, &accept_args, BPF_ANY); +} + +static void inline exit_accept(struct trace_event_raw_sys_exit *ctx) +{ + __u64 pid_tgid = bpf_get_current_pid_tgid(); + struct active_connection_info conn_info = {}; + if (ctx->ret >= 0) + { + __u32 sockfd = (__u32)ctx->ret; // new socket for accepted connection + struct pre_accept_args *args = bpf_map_lookup_elem(&pre_accept_args_map, &pid_tgid); + if (args) + { + __u64 unique_connection_id = generate_unique_connection_id(pid_tgid, sockfd); + conn_info.sockfd = sockfd; + bpf_probe_read_user(&conn_info.addr, sizeof(conn_info.addr), (void *)args->addr_ptr); + bpf_map_update_elem(&accepted_sockets_map, &unique_connection_id, &conn_info, BPF_ANY); + } + } + bpf_map_delete_elem(&pre_accept_args_map, &pid_tgid); +} + +// Store the arguments of the receive syscalls in a map +static void inline pre_receive_syscalls(struct trace_event_raw_sys_enter *ctx) +{ + __u64 id = bpf_get_current_pid_tgid(); + __u32 sockfd = (__u32)ctx->args[0]; // For read, recv, recvfrom, write, send, sendto, sockfd is the first argument + __u64 unique_connection_id = generate_unique_connection_id(id, sockfd); + struct active_connection_info *conn_info = bpf_map_lookup_elem(&accepted_sockets_map, &unique_connection_id); + if (conn_info) + { + struct packet_buffer packet = {}; + packet.sockfd = sockfd; + packet.buf = (__u64)ctx->args[1]; + packet.len = ctx->args[2]; + bpf_map_update_elem(&buffer_packets, &id, &packet, BPF_ANY); + } +} + +static __always_inline int get_http_type(struct trace_event_raw_sys_exit *ctx, void *data, int size) +{ + // Check for common HTTP methods + const char *http_methods[] = {"GET ", "POST ", "HEAD ", "PUT ", "DELETE ", "OPTIONS ", "TRACE ", "CONNECT "}; + int num_methods = sizeof(http_methods) / sizeof(http_methods[0]); + + if (size < 4) + { + return 0; + } + + for (int i = 0; i < num_methods; i++) + { + + if (__builtin_memcmp(data, http_methods[i], 4) == 0) + { + return EVENT_TYPE_REQUEST; + } + } + + if (__builtin_memcmp(data, "HTTP", 4) == 0) + { + return EVENT_TYPE_RESPONSE; + } + + return 0; +} + +static __always_inline int process_packet(struct trace_event_raw_sys_exit *ctx, char *syscall) +{ + __u64 id = bpf_get_current_pid_tgid(); + struct packet_buffer *packet = bpf_map_lookup_elem(&buffer_packets, &id); + if (!packet) + return 0; + + if (ctx->ret <= 0) + return 0; + + __u32 total_size = (__u32)ctx->ret; + __u32 key = 0; + + char *buf = bpf_map_lookup_elem(&empty_char, &key); + if (!buf) + return 0; + + int read_size = bpf_probe_read_user(buf, MIN(packet->len, PACKET_CHUNK_SIZE), (void *)packet->buf); + if (read_size < 0) + return 0; + int type = get_http_type(ctx, buf, MIN(total_size, PACKET_CHUNK_SIZE)); + if (!type) + return 0; + + __u32 zero = 0; + struct httpevent *dataevent = bpf_map_lookup_elem(&event_data, &zero); + if (!dataevent) + return 0; + + populate_httpevent(dataevent); + enrich_ip_port(ctx, packet->sockfd, dataevent); + dataevent->type = type; + dataevent->sock_fd = packet->sockfd; + + bpf_probe_read_str(&dataevent->syscall, sizeof(dataevent->syscall), syscall); + bpf_probe_read_user(&dataevent->buf, MIN(total_size, MAX_DATAEVENT_BUFFER), (void *)packet->buf); + bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, dataevent, sizeof(*dataevent)); + bpf_map_delete_elem(&buffer_packets, &id); + return 0; +} + +static __always_inline int pre_process_msg(struct trace_event_raw_sys_enter *ctx) +{ + __u64 id = bpf_get_current_pid_tgid(); + __u32 sockfd = (__u32)ctx->args[0]; // For sendmsg and recvmsg, sockfd is the first argument + __u64 unique_connection_id = generate_unique_connection_id(id, sockfd); + struct active_connection_info *conn_info = bpf_map_lookup_elem(&accepted_sockets_map, &unique_connection_id); + if (conn_info) + { + struct packet_msg write_args = {}; + write_args.fd = sockfd; + + struct user_msghdr msghdr = {}; + if (bpf_probe_read_user(&msghdr, sizeof(msghdr), (void *)ctx->args[1]) != 0) + { + return 0; + } + + write_args.iovec_ptr = (uint64_t)(msghdr.msg_iov); + write_args.iovlen = msghdr.msg_iovlen; + bpf_map_update_elem(&msg_packets, &id, &write_args, BPF_ANY); + } + return 0; +} + +static __always_inline int pre_process_iovec(struct trace_event_raw_sys_enter *ctx) +{ + __u64 id = bpf_get_current_pid_tgid(); + __u32 sockfd = (__u32)ctx->args[0]; // For writev and readv, sockfd is the first argument + __u64 unique_connection_id = generate_unique_connection_id(id, sockfd); + struct active_connection_info *conn_info = bpf_map_lookup_elem(&accepted_sockets_map, &unique_connection_id); + if (conn_info) + { + struct packet_msg write_args = {}; + write_args.fd = sockfd; + write_args.iovec_ptr = (__u64)ctx->args[1]; + write_args.iovlen = (__u64)ctx->args[2]; + bpf_map_update_elem(&msg_packets, &id, &write_args, BPF_ANY); + } + return 0; +} + +static __always_inline int process_msg(struct trace_event_raw_sys_exit *ctx, char *syscall) +{ + __u64 id = bpf_get_current_pid_tgid(); + struct packet_msg *msg = bpf_map_lookup_elem(&msg_packets, &id); + if (!msg) + return 0; + + // Loop through iovec structures + for (__u64 i = 0; i < msg->iovlen && i < 28; i++) + { + struct iovec iov = {}; + int ret = bpf_probe_read_user(&iov, sizeof(iov), (void *)(msg->iovec_ptr + i * sizeof(struct iovec))); + if (ret < 0) + break; + + __u64 seg_len = iov.iov_len; + if (seg_len > PACKET_CHUNK_SIZE) + seg_len = PACKET_CHUNK_SIZE; + + char buffer[PACKET_CHUNK_SIZE] = {0}; + ret = bpf_probe_read_user(buffer, seg_len, iov.iov_base); + if (ret < 0) + break; + + // Check if this segment is an HTTP message + int type = get_http_type(ctx, buffer, seg_len); + if (type) + { + __u32 zero = 0; + struct httpevent *dataevent = bpf_map_lookup_elem(&event_data, &zero); + if (!dataevent) + continue; + + populate_httpevent(dataevent); + enrich_ip_port(ctx, msg->fd, dataevent); + dataevent->type = type; + dataevent->sock_fd = msg->fd; + + __u64 copy_len = seg_len; + if (copy_len > MAX_DATAEVENT_BUFFER) + copy_len = MAX_DATAEVENT_BUFFER; + + bpf_probe_read(dataevent->buf, copy_len, buffer); + bpf_probe_read_str(&dataevent->syscall, sizeof(dataevent->syscall), syscall); + bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, dataevent, sizeof(*dataevent)); + } + } + bpf_map_delete_elem(&msg_packets, &id); + return 0; +} + +SEC("tracepoint/syscalls/sys_enter_accept") +int sys_enter_accept(struct trace_event_raw_sys_enter *ctx) +{ + if (should_discard()) + return 0; + + enter_accept(ctx); + return 0; +} + +SEC("tracepoint/syscalls/sys_exit_accept") +int sys_exit_accept(struct trace_event_raw_sys_exit *ctx) +{ + if (should_discard()) + return 0; + + exit_accept(ctx); + return 0; +} + +SEC("tracepoint/syscalls/sys_enter_accept4") +int sys_enter_accept4(struct trace_event_raw_sys_enter *ctx) +{ + if (should_discard()) + return 0; + + enter_accept(ctx); + return 0; +} + +SEC("tracepoint/syscalls/sys_exit_accept4") +int sys_exit_accept4(struct trace_event_raw_sys_exit *ctx) +{ + if (should_discard()) + return 0; + + exit_accept(ctx); + return 0; +} + +SEC("tracepoint/syscalls/sys_enter_close") +int sys_enter_close(struct trace_event_raw_sys_enter *ctx) +{ + if (should_discard()) + return 0; + + __u64 pid_tgid = bpf_get_current_pid_tgid(); + __u32 sockfd = (__u32)ctx->args[0]; + __u64 unique_connection_id = generate_unique_connection_id(pid_tgid, sockfd); + bpf_map_delete_elem(&accepted_sockets_map, &unique_connection_id); + return 0; +} + +SEC("tracepoint/syscalls/sys_enter_read") +int sys_enter_read(struct trace_event_raw_sys_enter *ctx) +{ + if (should_discard()) + return 0; + + pre_receive_syscalls(ctx); + return 0; +} + +SEC("tracepoint/syscalls/sys_exit_read") +int sys_exit_read(struct trace_event_raw_sys_exit *ctx) +{ + if (should_discard()) + return 0; + + process_packet(ctx, "read"); + return 0; +} + +SEC("tracepoint/syscalls/sys_enter_recvfrom") +int sys_enter_recvfrom(struct trace_event_raw_sys_enter *ctx) +{ + if (should_discard()) + return 0; + + if (is_msg_peek(ctx->args[3])) + return 0; + pre_receive_syscalls(ctx); + return 0; +} + +SEC("tracepoint/syscalls/sys_exit_recvfrom") +int sys_exit_recvfrom(struct trace_event_raw_sys_exit *ctx) +{ + if (should_discard()) + return 0; + + process_packet(ctx, "recvfrom"); + return 0; +} + +SEC("tracepoint/syscalls/sys_enter_write") +int syscall__probe_entry_write(struct trace_event_raw_sys_enter *ctx) +{ + if (should_discard()) + return 0; + + pre_receive_syscalls(ctx); + return 0; +} + +SEC("tracepoint/syscalls/sys_exit_write") +int syscall__probe_ret_write(struct trace_event_raw_sys_exit *ctx) +{ + if (should_discard()) + return 0; + + process_packet(ctx, "write"); + return 0; +} + +SEC("tracepoint/syscalls/sys_enter_sendto") +int syscall__probe_entry_sendto(struct trace_event_raw_sys_enter *ctx) +{ + if (should_discard()) + return 0; + + pre_receive_syscalls(ctx); + return 0; +} + +SEC("tracepoint/syscalls/sys_exit_sendto") +int syscall__probe_ret_sendto(struct trace_event_raw_sys_exit *ctx) +{ + if (should_discard()) + return 0; + + process_packet(ctx, "sendto"); + return 0; +} + +SEC("tracepoint/syscalls/sys_enter_connect") +int syscall__probe_entry_connect(struct trace_event_raw_sys_enter *ctx) +{ + if (should_discard()) + return 0; + + enter_connect(ctx); + return 0; +} + +SEC("tracepoint/syscalls/sys_exit_connect") +int syscall__probe_ret_connect(struct trace_event_raw_sys_exit *ctx) +{ + if (should_discard()) + return 0; + + exit_connect(ctx); + return 0; +} + +SEC("tracepoint/syscalls/sys_enter_sendmsg") +int syscall__probe_entry_sendmsg(struct trace_event_raw_sys_enter *ctx) +{ + if (should_discard()) + return 0; + + pre_process_msg(ctx); + return 0; +} + +SEC("tracepoint/syscalls/sys_exit_sendmsg") +int syscall__probe_ret_sendmsg(struct trace_event_raw_sys_exit *ctx) +{ + if (should_discard()) + return 0; + + process_msg(ctx, "sendmsg"); + return 0; +} + +SEC("tracepoint/syscalls/sys_enter_recvmsg") +int syscall__probe_entry_recvmsg(struct trace_event_raw_sys_enter *ctx) +{ + if (should_discard()) + return 0; + + if (is_msg_peek(ctx->args[2])) + return 0; + pre_process_msg(ctx); + return 0; +} + +SEC("tracepoint/syscalls/sys_exit_recvmsg") +int syscall__probe_ret_recvmsg(struct trace_event_raw_sys_exit *ctx) +{ + if (should_discard()) + return 0; + + process_msg(ctx, "recvmsg"); + return 0; +} + +SEC("tracepoint/syscalls/sys_enter_writev") +int syscall__probe_entry_writev(struct trace_event_raw_sys_enter *ctx) +{ + if (should_discard()) + return 0; + + pre_process_iovec(ctx); + return 0; +} + +SEC("tracepoint/syscalls/sys_exit_writev") +int syscall__probe_ret_writev(struct trace_event_raw_sys_exit *ctx) +{ + if (should_discard()) + return 0; + + process_msg(ctx, "writev"); + return 0; +} + +SEC("tracepoint/syscalls/sys_enter_readv") +int syscall__probe_entry_readv(struct trace_event_raw_sys_enter *ctx) +{ + if (should_discard()) + return 0; + pre_process_iovec(ctx); + return 0; +} + +SEC("tracepoint/syscalls/sys_exit_readv") +int syscall__probe_ret_readv(struct trace_event_raw_sys_exit *ctx) +{ + if (should_discard()) + return 0; + process_msg(ctx, "readv"); + return 0; +} + +char __license[] SEC("license") = "Dual MIT/GPL"; diff --git a/pkg/ebpf/gadgets/http/tracer/bpf/sniffer_strcuts.h b/pkg/ebpf/gadgets/http/tracer/bpf/sniffer_strcuts.h new file mode 100644 index 00000000..7608741b --- /dev/null +++ b/pkg/ebpf/gadgets/http/tracer/bpf/sniffer_strcuts.h @@ -0,0 +1,77 @@ +#pragma once +#include "../../../../include/amd64/vmlinux.h" +#include "../../../../include/types.h" +#include +#include + +#include "../../../../include/mntns_filter.h" + +#define EVENT_TYPE_CONNECT 0 +#define EVENT_TYPE_ACCEPT 1 +#define EVENT_TYPE_REQUEST 2 +#define EVENT_TYPE_RESPONSE 3 +#define EVENT_TYPE_CLOSE 4 + +#define MAX_PACKET_SIZE 200 +#define PACKET_CHUNK_SIZE 200 +#define MAX_DATAEVENT_BUFFER 1028 +#define MAX_SYSCALL 128 +#define MAX_MSG_COUNT 20 + +#define MSG_PEEK 0x02 +#define EINPROGRESS -115 + +#define MIN(a, b) ((a) < (b) ? (a) : (b)) + + + +struct pre_accept_args { + uint64_t addr_ptr; // user_msghdr +}; + +struct pre_connect_args { + int sockfd; + struct sockaddr_in addr; +}; + +struct active_connection_info { + int sockfd; + struct sockaddr_in addr; +}; + + +// Packet structs: + +struct packet_buffer { + int sockfd; + __u64 buf; + size_t len; +}; + +struct packet_msg { + int32_t fd; + uint64_t iovec_ptr; // user_msghdr + size_t iovlen; +}; + +struct packet_mmsg { + int32_t fd; + uint32_t msg_count; + struct packet_msg msgs[MAX_MSG_COUNT]; +}; + +struct httpevent { + gadget_timestamp timestamp; + gadget_mntns_id mntns_id; + __u32 pid; + __u32 uid; + __u32 gid; + + u8 type; + u32 sock_fd; + u8 buf[MAX_DATAEVENT_BUFFER]; + u8 syscall[MAX_SYSCALL]; + + __u32 other_ip; + __u16 other_port; +}; diff --git a/pkg/ebpf/gadgets/http/tracer/http_parse.go b/pkg/ebpf/gadgets/http/tracer/http_parse.go new file mode 100644 index 00000000..d86a45dc --- /dev/null +++ b/pkg/ebpf/gadgets/http/tracer/http_parse.go @@ -0,0 +1,107 @@ +package tracer + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "net" + "net/http" + "strconv" + "time" + + "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets" + eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" + tracerhttptype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/http/types" +) + +func CreateEventFromRequest(bpfEvent *http_snifferHttpevent) (*tracerhttptype.Event, error) { + + ip := make(net.IP, 4) + binary.LittleEndian.PutUint32(ip, bpfEvent.OtherIp) + + request, err := ParseHTTPRequest(FromCString(bpfEvent.Buf[:])) + if err != nil { + return nil, err + } + + direction, err := tracerhttptype.GetPacketDirection(gadgets.FromCString(bpfEvent.Syscall[:])) + if err != nil { + return nil, err + } + + event := tracerhttptype.Event{ + Event: eventtypes.Event{ + Type: eventtypes.NORMAL, + Timestamp: gadgets.WallTimeFromBootTime(bpfEvent.Timestamp), + }, + WithMountNsID: eventtypes.WithMountNsID{MountNsID: bpfEvent.MntnsId}, + Pid: bpfEvent.Pid, + Uid: bpfEvent.Uid, + Gid: bpfEvent.Gid, + OtherPort: bpfEvent.OtherPort, + OtherIp: ip.String(), + Request: request, + Internal: tracerhttptype.IsInternal(ip.String()), + Direction: direction, + } + + return &event, nil +} + +func ParseHTTPRequest(data []byte) (*http.Request, error) { + bufReader := bufio.NewReader(bytes.NewReader(data)) + + req, err := http.ReadRequest(bufReader) + if err != nil { + return nil, err + } + defer req.Body.Close() + + return req, nil +} + +func ParseHTTPResponse(data []byte, req *http.Request) (*http.Response, error) { + bufReader := bufio.NewReader(bytes.NewReader(data)) + + resp, err := http.ReadResponse(bufReader, req) + if err != nil { + return nil, fmt.Errorf("failed to read response: %w", err) + } + + return resp, nil +} + +func ExtractConsistentHeaders(headers http.Header) map[string][]string { + result := make(map[string][]string) + for _, header := range tracerhttptype.ConsistentHeaders { + if value, ok := headers[header]; ok { + switch typedValue := interface{}(value).(type) { + case []string: + result[header] = typedValue + case string: + result[header] = []string{typedValue} + default: + result[header] = []string{fmt.Sprint(typedValue)} + } + } + } + return result +} + +func FromCString(in []byte) []byte { + for i := 0; i < len(in); i++ { + if in[i] == 0 { + return in[:i] + } + } + return in +} + +func GetUniqueIdentifier(event *http_snifferHttpevent) string { + return strconv.FormatUint(uint64(event.Pid), 10) + strconv.FormatUint(uint64(event.SockFd), 10) +} + +func ToTime(t eventtypes.Time) time.Time { + return time.Unix(0, int64(t)) +} diff --git a/pkg/ebpf/gadgets/http/tracer/http_sniffer_bpfel.go b/pkg/ebpf/gadgets/http/tracer/http_sniffer_bpfel.go new file mode 100644 index 00000000..a8f247be --- /dev/null +++ b/pkg/ebpf/gadgets/http/tracer/http_sniffer_bpfel.go @@ -0,0 +1,242 @@ +// Code generated by bpf2go; DO NOT EDIT. +//go:build 386 || amd64 || arm || arm64 || loong64 || mips64le || mipsle || ppc64le || riscv64 + +package tracer + +import ( + "bytes" + _ "embed" + "fmt" + "io" + + "github.com/cilium/ebpf" +) + +type http_snifferActiveConnectionInfo struct { + Sockfd int32 + Addr struct { + SinFamily uint16 + SinPort uint16 + SinAddr struct{ S_addr uint32 } + Pad [8]uint8 + } +} + +type http_snifferHttpevent struct { + Timestamp uint64 + MntnsId uint64 + Pid uint32 + Uid uint32 + Gid uint32 + Type uint8 + _ [3]byte + SockFd uint32 + Buf [1028]uint8 + Syscall [128]uint8 + OtherIp uint32 + OtherPort uint16 + _ [2]byte +} + +type http_snifferPacketBuffer struct { + Sockfd int32 + _ [4]byte + Buf uint64 + Len uint64 +} + +// loadHttp_sniffer returns the embedded CollectionSpec for http_sniffer. +func loadHttp_sniffer() (*ebpf.CollectionSpec, error) { + reader := bytes.NewReader(_Http_snifferBytes) + spec, err := ebpf.LoadCollectionSpecFromReader(reader) + if err != nil { + return nil, fmt.Errorf("can't load http_sniffer: %w", err) + } + + return spec, err +} + +// loadHttp_snifferObjects loads http_sniffer and converts it into a struct. +// +// The following types are suitable as obj argument: +// +// *http_snifferObjects +// *http_snifferPrograms +// *http_snifferMaps +// +// See ebpf.CollectionSpec.LoadAndAssign documentation for details. +func loadHttp_snifferObjects(obj interface{}, opts *ebpf.CollectionOptions) error { + spec, err := loadHttp_sniffer() + if err != nil { + return err + } + + return spec.LoadAndAssign(obj, opts) +} + +// http_snifferSpecs contains maps and programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type http_snifferSpecs struct { + http_snifferProgramSpecs + http_snifferMapSpecs +} + +// http_snifferSpecs contains programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type http_snifferProgramSpecs struct { + SysEnterAccept *ebpf.ProgramSpec `ebpf:"sys_enter_accept"` + SysEnterAccept4 *ebpf.ProgramSpec `ebpf:"sys_enter_accept4"` + SysEnterClose *ebpf.ProgramSpec `ebpf:"sys_enter_close"` + SysEnterRead *ebpf.ProgramSpec `ebpf:"sys_enter_read"` + SysEnterRecvfrom *ebpf.ProgramSpec `ebpf:"sys_enter_recvfrom"` + SysExitAccept *ebpf.ProgramSpec `ebpf:"sys_exit_accept"` + SysExitAccept4 *ebpf.ProgramSpec `ebpf:"sys_exit_accept4"` + SysExitRead *ebpf.ProgramSpec `ebpf:"sys_exit_read"` + SysExitRecvfrom *ebpf.ProgramSpec `ebpf:"sys_exit_recvfrom"` + SyscallProbeEntryConnect *ebpf.ProgramSpec `ebpf:"syscall__probe_entry_connect"` + SyscallProbeEntryReadv *ebpf.ProgramSpec `ebpf:"syscall__probe_entry_readv"` + SyscallProbeEntryRecvmsg *ebpf.ProgramSpec `ebpf:"syscall__probe_entry_recvmsg"` + SyscallProbeEntrySendmsg *ebpf.ProgramSpec `ebpf:"syscall__probe_entry_sendmsg"` + SyscallProbeEntrySendto *ebpf.ProgramSpec `ebpf:"syscall__probe_entry_sendto"` + SyscallProbeEntryWrite *ebpf.ProgramSpec `ebpf:"syscall__probe_entry_write"` + SyscallProbeEntryWritev *ebpf.ProgramSpec `ebpf:"syscall__probe_entry_writev"` + SyscallProbeRetConnect *ebpf.ProgramSpec `ebpf:"syscall__probe_ret_connect"` + SyscallProbeRetReadv *ebpf.ProgramSpec `ebpf:"syscall__probe_ret_readv"` + SyscallProbeRetRecvmsg *ebpf.ProgramSpec `ebpf:"syscall__probe_ret_recvmsg"` + SyscallProbeRetSendmsg *ebpf.ProgramSpec `ebpf:"syscall__probe_ret_sendmsg"` + SyscallProbeRetSendto *ebpf.ProgramSpec `ebpf:"syscall__probe_ret_sendto"` + SyscallProbeRetWrite *ebpf.ProgramSpec `ebpf:"syscall__probe_ret_write"` + SyscallProbeRetWritev *ebpf.ProgramSpec `ebpf:"syscall__probe_ret_writev"` +} + +// http_snifferMapSpecs contains maps before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type http_snifferMapSpecs struct { + AcceptedSocketsMap *ebpf.MapSpec `ebpf:"accepted_sockets_map"` + ActiveConnectionsArgsMap *ebpf.MapSpec `ebpf:"active_connections_args_map"` + BufferPackets *ebpf.MapSpec `ebpf:"buffer_packets"` + EmptyChar *ebpf.MapSpec `ebpf:"empty_char"` + EventData *ebpf.MapSpec `ebpf:"event_data"` + Events *ebpf.MapSpec `ebpf:"events"` + GadgetMntnsFilterMap *ebpf.MapSpec `ebpf:"gadget_mntns_filter_map"` + MsgPackets *ebpf.MapSpec `ebpf:"msg_packets"` + PreAcceptArgsMap *ebpf.MapSpec `ebpf:"pre_accept_args_map"` +} + +// http_snifferObjects contains all objects after they have been loaded into the kernel. +// +// It can be passed to loadHttp_snifferObjects or ebpf.CollectionSpec.LoadAndAssign. +type http_snifferObjects struct { + http_snifferPrograms + http_snifferMaps +} + +func (o *http_snifferObjects) Close() error { + return _Http_snifferClose( + &o.http_snifferPrograms, + &o.http_snifferMaps, + ) +} + +// http_snifferMaps contains all maps after they have been loaded into the kernel. +// +// It can be passed to loadHttp_snifferObjects or ebpf.CollectionSpec.LoadAndAssign. +type http_snifferMaps struct { + AcceptedSocketsMap *ebpf.Map `ebpf:"accepted_sockets_map"` + ActiveConnectionsArgsMap *ebpf.Map `ebpf:"active_connections_args_map"` + BufferPackets *ebpf.Map `ebpf:"buffer_packets"` + EmptyChar *ebpf.Map `ebpf:"empty_char"` + EventData *ebpf.Map `ebpf:"event_data"` + Events *ebpf.Map `ebpf:"events"` + GadgetMntnsFilterMap *ebpf.Map `ebpf:"gadget_mntns_filter_map"` + MsgPackets *ebpf.Map `ebpf:"msg_packets"` + PreAcceptArgsMap *ebpf.Map `ebpf:"pre_accept_args_map"` +} + +func (m *http_snifferMaps) Close() error { + return _Http_snifferClose( + m.AcceptedSocketsMap, + m.ActiveConnectionsArgsMap, + m.BufferPackets, + m.EmptyChar, + m.EventData, + m.Events, + m.GadgetMntnsFilterMap, + m.MsgPackets, + m.PreAcceptArgsMap, + ) +} + +// http_snifferPrograms contains all programs after they have been loaded into the kernel. +// +// It can be passed to loadHttp_snifferObjects or ebpf.CollectionSpec.LoadAndAssign. +type http_snifferPrograms struct { + SysEnterAccept *ebpf.Program `ebpf:"sys_enter_accept"` + SysEnterAccept4 *ebpf.Program `ebpf:"sys_enter_accept4"` + SysEnterClose *ebpf.Program `ebpf:"sys_enter_close"` + SysEnterRead *ebpf.Program `ebpf:"sys_enter_read"` + SysEnterRecvfrom *ebpf.Program `ebpf:"sys_enter_recvfrom"` + SysExitAccept *ebpf.Program `ebpf:"sys_exit_accept"` + SysExitAccept4 *ebpf.Program `ebpf:"sys_exit_accept4"` + SysExitRead *ebpf.Program `ebpf:"sys_exit_read"` + SysExitRecvfrom *ebpf.Program `ebpf:"sys_exit_recvfrom"` + SyscallProbeEntryConnect *ebpf.Program `ebpf:"syscall__probe_entry_connect"` + SyscallProbeEntryReadv *ebpf.Program `ebpf:"syscall__probe_entry_readv"` + SyscallProbeEntryRecvmsg *ebpf.Program `ebpf:"syscall__probe_entry_recvmsg"` + SyscallProbeEntrySendmsg *ebpf.Program `ebpf:"syscall__probe_entry_sendmsg"` + SyscallProbeEntrySendto *ebpf.Program `ebpf:"syscall__probe_entry_sendto"` + SyscallProbeEntryWrite *ebpf.Program `ebpf:"syscall__probe_entry_write"` + SyscallProbeEntryWritev *ebpf.Program `ebpf:"syscall__probe_entry_writev"` + SyscallProbeRetConnect *ebpf.Program `ebpf:"syscall__probe_ret_connect"` + SyscallProbeRetReadv *ebpf.Program `ebpf:"syscall__probe_ret_readv"` + SyscallProbeRetRecvmsg *ebpf.Program `ebpf:"syscall__probe_ret_recvmsg"` + SyscallProbeRetSendmsg *ebpf.Program `ebpf:"syscall__probe_ret_sendmsg"` + SyscallProbeRetSendto *ebpf.Program `ebpf:"syscall__probe_ret_sendto"` + SyscallProbeRetWrite *ebpf.Program `ebpf:"syscall__probe_ret_write"` + SyscallProbeRetWritev *ebpf.Program `ebpf:"syscall__probe_ret_writev"` +} + +func (p *http_snifferPrograms) Close() error { + return _Http_snifferClose( + p.SysEnterAccept, + p.SysEnterAccept4, + p.SysEnterClose, + p.SysEnterRead, + p.SysEnterRecvfrom, + p.SysExitAccept, + p.SysExitAccept4, + p.SysExitRead, + p.SysExitRecvfrom, + p.SyscallProbeEntryConnect, + p.SyscallProbeEntryReadv, + p.SyscallProbeEntryRecvmsg, + p.SyscallProbeEntrySendmsg, + p.SyscallProbeEntrySendto, + p.SyscallProbeEntryWrite, + p.SyscallProbeEntryWritev, + p.SyscallProbeRetConnect, + p.SyscallProbeRetReadv, + p.SyscallProbeRetRecvmsg, + p.SyscallProbeRetSendmsg, + p.SyscallProbeRetSendto, + p.SyscallProbeRetWrite, + p.SyscallProbeRetWritev, + ) +} + +func _Http_snifferClose(closers ...io.Closer) error { + for _, closer := range closers { + if err := closer.Close(); err != nil { + return err + } + } + return nil +} + +// Do not access this directly. +// +//go:embed http_sniffer_bpfel.o +var _Http_snifferBytes []byte diff --git a/pkg/ebpf/gadgets/http/tracer/http_sniffer_bpfel.o b/pkg/ebpf/gadgets/http/tracer/http_sniffer_bpfel.o new file mode 100644 index 00000000..bc4a10f7 Binary files /dev/null and b/pkg/ebpf/gadgets/http/tracer/http_sniffer_bpfel.o differ diff --git a/pkg/ebpf/gadgets/http/tracer/tracepoint_definitions.go b/pkg/ebpf/gadgets/http/tracer/tracepoint_definitions.go new file mode 100644 index 00000000..b02a5b32 --- /dev/null +++ b/pkg/ebpf/gadgets/http/tracer/tracepoint_definitions.go @@ -0,0 +1,31 @@ +package tracer + +import tracepointlib "github.com/kubescape/node-agent/pkg/ebpf/lib" + +func GetTracepointDefinitions(objs *http_snifferPrograms) []tracepointlib.TracepointInfo { + return []tracepointlib.TracepointInfo{ + {"sys_enter_accept", objs.SysEnterAccept}, + {"sys_enter_accept4", objs.SysEnterAccept4}, + {"sys_exit_accept", objs.SysExitAccept}, + {"sys_exit_accept4", objs.SysExitAccept4}, + {"sys_enter_read", objs.SysEnterRead}, + {"sys_exit_read", objs.SysExitRead}, + {"sys_enter_close", objs.SysEnterClose}, + {"sys_enter_recvfrom", objs.SysEnterRecvfrom}, + {"sys_exit_recvfrom", objs.SysExitRecvfrom}, + {"sys_enter_connect", objs.SyscallProbeEntryConnect}, + {"sys_exit_connect", objs.SyscallProbeRetConnect}, + {"sys_enter_write", objs.SyscallProbeEntryWrite}, + {"sys_exit_write", objs.SyscallProbeRetWrite}, + {"sys_enter_sendto", objs.SyscallProbeEntrySendto}, + {"sys_exit_sendto", objs.SyscallProbeRetSendto}, + {"sys_enter_sendmsg", objs.SyscallProbeEntrySendmsg}, + {"sys_exit_sendmsg", objs.SyscallProbeRetSendmsg}, + {"sys_enter_recvmsg", objs.SyscallProbeEntryRecvmsg}, + {"sys_exit_recvmsg", objs.SyscallProbeRetRecvmsg}, + {"sys_enter_writev", objs.SyscallProbeEntryWritev}, + {"sys_exit_writev", objs.SyscallProbeRetWritev}, + {"sys_enter_readv", objs.SyscallProbeEntryReadv}, + {"sys_exit_readv", objs.SyscallProbeRetReadv}, + } +} diff --git a/pkg/ebpf/gadgets/http/tracer/tracer.go b/pkg/ebpf/gadgets/http/tracer/tracer.go new file mode 100644 index 00000000..346e9d6a --- /dev/null +++ b/pkg/ebpf/gadgets/http/tracer/tracer.go @@ -0,0 +1,228 @@ +package tracer + +import ( + "errors" + "fmt" + "os" + "time" + "unsafe" + + eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/link" + "github.com/cilium/ebpf/perf" + lru "github.com/hashicorp/golang-lru/v2" + gadgetcontext "github.com/inspektor-gadget/inspektor-gadget/pkg/gadget-context" + "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets" + "github.com/kubescape/go-logger" + "github.com/kubescape/node-agent/pkg/ebpf/gadgets/http/types" + tracepointlib "github.com/kubescape/node-agent/pkg/ebpf/lib" +) + +//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -strip /usr/bin/llvm-strip-18 -cc /usr/bin/clang -no-global-types -target bpfel -cc clang -cflags "-g -O2 -Wall" -type active_connection_info -type packet_buffer -type httpevent http_sniffer bpf/http-sniffer.c -- -I./bpf/ + +type Config struct { + MountnsMap *ebpf.Map +} + +type Tracer struct { + config *Config + enricher gadgets.DataEnricherByMntNs + eventCallback func(*types.Event) + + objs http_snifferObjects + + httplinks []link.Link + reader *perf.Reader + eventsMap *lru.Cache[string, *types.Event] // Use golang-lru cache + timeoutDuration time.Duration + timeoutTicker *time.Ticker +} + +func NewTracer(config *Config, enricher gadgets.DataEnricherByMntNs, + eventCallback func(*types.Event), +) (*Tracer, error) { + // Create a new LRU cache with a specified size + cache, err := lru.New[string, *types.Event](types.MaxGroupedEventSize) + if err != nil { + return nil, fmt.Errorf("creating lru cache: %w", err) + } + + t := &Tracer{ + config: config, + enricher: enricher, + eventCallback: eventCallback, + eventsMap: cache, + timeoutDuration: 1 * time.Minute, + } + + if err := t.install(); err != nil { + t.Close() + return nil, err + } + + t.timeoutTicker = time.NewTicker(30 * time.Second) + go t.cleanupOldRequests() + + go t.run() + + return t, nil +} + +func (t *Tracer) Close() { + // Stop the timeout ticker + if t.timeoutTicker != nil { + t.timeoutTicker.Stop() + } + + for _, l := range t.httplinks { + gadgets.CloseLink(l) + } + + if t.reader != nil { + t.reader.Close() + } + + t.objs.Close() +} + +func (t *Tracer) install() error { + var err error + spec, err := loadHttp_sniffer() + if err != nil { + return fmt.Errorf("loading ebpf program: %w", err) + } + + if err := gadgets.LoadeBPFSpec(t.config.MountnsMap, spec, nil, &t.objs); err != nil { + return fmt.Errorf("loading ebpf spec: %w", err) + } + + tracepoints := GetTracepointDefinitions(&t.objs.http_snifferPrograms) + var links []link.Link + for _, tp := range tracepoints { + l, err := tracepointlib.AttachTracepoint(tp) + if err != nil { + logger.L().Error(fmt.Sprintf("Error attaching tracepoint: %s", err)) + } + links = append(links, l) + } + + t.httplinks = links + + t.reader, err = perf.NewReader(t.objs.http_snifferMaps.Events, gadgets.PerfBufferPages*os.Getpagesize()) + if err != nil { + return fmt.Errorf("creating perf ring buffer: %w", err) + } + + return nil +} + +func (t *Tracer) run() { + for { + record, err := t.reader.Read() + if err != nil { + if errors.Is(err, perf.ErrClosed) { + return + } + + msg := fmt.Sprintf("Error reading perf ring buffer: %s", err) + t.eventCallback(types.Base(eventtypes.Err(msg))) + continue + } + + if record.LostSamples > 0 { + msg := fmt.Sprintf("lost %d samples", record.LostSamples) + t.eventCallback(types.Base(eventtypes.Warn(msg))) + continue + } + + bpfEvent := (*http_snifferHttpevent)(unsafe.Pointer(&record.RawSample[0])) + + if grouped := t.GroupEvents(bpfEvent); grouped != nil { + // We'll only enrich by request properties + if t.enricher != nil { + t.enricher.EnrichByMntNs(&grouped.CommonData, grouped.MountNsID) + } + t.eventCallback(grouped) + } + } +} + +func (t *Tracer) Run(gadgetCtx gadgets.GadgetContext) error { + defer t.Close() + if err := t.install(); err != nil { + return fmt.Errorf("installing tracer: %w", err) + } + + go t.run() + gadgetcontext.WaitForTimeoutOrDone(gadgetCtx) + go t.cleanupOldRequests() + + return nil +} + +func (t *Tracer) GroupEvents(bpfEvent *http_snifferHttpevent) *types.Event { + eventType := types.HTTPDataType(bpfEvent.Type) + + if eventType == types.Request { + event, err := CreateEventFromRequest(bpfEvent) + if err != nil { + msg := fmt.Sprintf("Error parsing request: %s", err) + t.eventCallback(types.Base(eventtypes.Warn(msg))) + return nil + } + t.eventsMap.Add(GetUniqueIdentifier(bpfEvent), event) + } else if eventType == types.Response { + if exists, ok := t.eventsMap.Get(GetUniqueIdentifier(bpfEvent)); ok { + grouped := exists + + response, err := ParseHTTPResponse(FromCString(bpfEvent.Buf[:]), grouped.Request) + if err != nil { + msg := fmt.Sprintf("Error parsing response: %s", err) + t.eventCallback(types.Base(eventtypes.Warn(msg))) + return nil + } + + grouped.Response = response + t.eventsMap.Remove(GetUniqueIdentifier(bpfEvent)) + return grouped + } + } + + return nil +} + +func (t *Tracer) cleanupOldRequests() { + for range t.timeoutTicker.C { + keys := t.eventsMap.Keys() + for _, key := range keys { + if event, ok := t.eventsMap.Peek(key); ok { + if time.Since(ToTime(event.Timestamp)) > t.timeoutDuration { + t.eventsMap.Remove(key) + } + } + } + } +} + +func (t *Tracer) SetMountNsMap(mountnsMap *ebpf.Map) { + t.config.MountnsMap = mountnsMap +} + +func (t *Tracer) SetEventHandler(handler any) { + nh, ok := handler.(func(ev *types.Event)) + if !ok { + panic("event handler invalid") + } + t.eventCallback = nh +} + +type GadgetDesc struct{} + +func (g *GadgetDesc) NewInstance() (gadgets.Gadget, error) { + tracer := &Tracer{ + config: &Config{}, + } + return tracer, nil +} diff --git a/pkg/ebpf/gadgets/http/types/methods.go b/pkg/ebpf/gadgets/http/types/methods.go new file mode 100644 index 00000000..751c186e --- /dev/null +++ b/pkg/ebpf/gadgets/http/types/methods.go @@ -0,0 +1,36 @@ +package types + +import ( + "fmt" + "net" + + "github.com/inspektor-gadget/inspektor-gadget/pkg/columns" + eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/consts" +) + +func GetPacketDirection(syscall string) (consts.NetworkDirection, error) { + if readSyscalls[syscall] { + return consts.Inbound, nil + } else if writeSyscalls[syscall] { + return consts.Outbound, nil + } else { + return "", fmt.Errorf("unknown syscall %s", syscall) + } +} + +func IsInternal(ip string) bool { + ipAddress := net.ParseIP(ip) + return ipAddress.IsPrivate() +} + +func GetColumns() *columns.Columns[Event] { + httpColumns := columns.MustCreateColumns[Event]() + return httpColumns +} + +func Base(ev eventtypes.Event) *Event { + return &Event{ + Event: ev, + } +} diff --git a/pkg/ebpf/gadgets/http/types/types.go b/pkg/ebpf/gadgets/http/types/types.go new file mode 100644 index 00000000..87600f6d --- /dev/null +++ b/pkg/ebpf/gadgets/http/types/types.go @@ -0,0 +1,56 @@ +package types + +import ( + "net/http" + + eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/consts" +) + +const MaxGroupedEventSize int = 10000 + +type HTTPDataType int + +const ( + Request HTTPDataType = 2 + Response HTTPDataType = 3 +) + +var ConsistentHeaders = []string{ + "Accept-Encoding", + "Accept-Language", + "Connection", + "Host", + "Upgrade-Insecure-Requests", +} + +var writeSyscalls = map[string]bool{ + "write": true, + "writev": true, + "sendto": true, + "sendmsg": true, +} + +var readSyscalls = map[string]bool{ + "read": true, + "readv": true, + "recvfrom": true, + "recvmsg": true, +} + +type HTTPPacket interface { +} + +type Event struct { + eventtypes.Event + eventtypes.WithMountNsID + Pid uint32 `json:"pid,omitempty" column:"pid,template:pid"` + Uid uint32 `json:"uid,omitempty" column:"uid,template:uid"` + Gid uint32 `json:"gid,omitempty" column:"gid,template:gid"` + OtherPort uint16 `json:"other_port,omitempty" column:"other_port,template:other_port"` + OtherIp string `json:"other_ip,omitempty" column:"other_ip,template:other_ip"` + Internal bool `json:"internal,omitempty" column:"internal,template:internal"` + Direction consts.NetworkDirection `json:"direction,omitempty" column:"direction,template:direction"` + Request *http.Request + Response *http.Response +} diff --git a/pkg/ebpf/gadgets/ptrace/tracer/bpf/ptrace_detector.c b/pkg/ebpf/gadgets/ptrace/tracer/bpf/ptrace_detector.c new file mode 100644 index 00000000..b5af06ee --- /dev/null +++ b/pkg/ebpf/gadgets/ptrace/tracer/bpf/ptrace_detector.c @@ -0,0 +1,84 @@ +#include "ptrace_detector.h" + +struct { + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(u32)); +} events SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, 1); + __type(key, u32); + __type(value, struct event); +} empty_event SEC(".maps"); + + +static __always_inline int should_discard() +{ + u64 mntns_id; + mntns_id = gadget_get_mntns_id(); + + if (gadget_should_discard_mntns_id(mntns_id)) + { + return 1; + } + + return 0; +} + +static __always_inline char * get_exe_path(struct task_struct* current_task ) { + struct file *exe_file = BPF_CORE_READ(current_task, mm, exe_file); + char *exepath; + exepath = get_path_str(&exe_file->f_path); + return exepath; +} + + +static __always_inline void populate_event(struct event* event) { + u64 mntns_id = gadget_get_mntns_id(); + u64 pid_tgid = bpf_get_current_pid_tgid(); + event->pid = pid_tgid >> 32; + + u64 uid_gid = bpf_get_current_uid_gid(); + event->uid = uid_gid & 0xFFFFFFFF; + event->gid = uid_gid >> 32; + event->timestamp = bpf_ktime_get_boot_ns(); + event->mntns_id = mntns_id; + bpf_get_current_comm(&event->comm, sizeof(event->comm)); +} + +SEC("tracepoint/syscalls/sys_enter_ptrace") +int trace_enter_ptrace(struct trace_event_raw_sys_enter *ctx) +{ + long request = (long)ctx->args[0]; + long pid = (long)ctx->args[1]; + + if (should_discard()) { + return 0; + } + + struct event *event; + u32 zero = 0; + event = bpf_map_lookup_elem(&empty_event, &zero); + if (!event) { + return 0; + } + + struct task_struct *current_task = (struct task_struct*)bpf_get_current_task(); + if (!current_task) { + return 0; + } + + if (request == PTRACE_SETREGS || request == PTRACE_POKETEXT || request == PTRACE_POKEDATA) { + char* exepath = get_exe_path(current_task); + bpf_probe_read_kernel_str(event->exepath, MAX_STRING_SIZE, exepath); + event->ppid = BPF_CORE_READ(current_task, real_parent, pid); + event->request = request; + populate_event(event); + bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, event, sizeof(struct event)); + } + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/pkg/ebpf/gadgets/ptrace/tracer/bpf/ptrace_detector.h b/pkg/ebpf/gadgets/ptrace/tracer/bpf/ptrace_detector.h new file mode 100644 index 00000000..456c7da0 --- /dev/null +++ b/pkg/ebpf/gadgets/ptrace/tracer/bpf/ptrace_detector.h @@ -0,0 +1,37 @@ +#include "../../../../include/amd64/vmlinux.h" +#include "../../../../include/types.h" +#include +#include + +#include "../../../../include/mntns_filter.h" +#include "../../../../include/filesystem.h" +#include "../../../../include/macros.h" +#include "../../../../include/buffer.h" + +#define TASK_COMM_LEN 16 +#define MAX_STRING_SIZE 4096 + +#ifndef PTRACE_SETREGS +#define PTRACE_SETREGS 13 +#endif + +#ifndef PTRACE_POKETEXT +#define PTRACE_POKETEXT 4 +#endif + +#ifndef PTRACE_POKEDATA +#define PTRACE_POKEDATA 5 +#endif + + +struct event { + gadget_timestamp timestamp; + gadget_mntns_id mntns_id; + __u32 pid; + __u32 ppid; + __u32 uid; + __u32 gid; + __u32 request; + __u8 comm[TASK_COMM_LEN]; + __u8 exepath[MAX_STRING_SIZE]; +}; diff --git a/pkg/ebpf/gadgets/ptrace/tracer/ptrace_bpfel.go b/pkg/ebpf/gadgets/ptrace/tracer/ptrace_bpfel.go new file mode 100644 index 00000000..ed5d7bd9 --- /dev/null +++ b/pkg/ebpf/gadgets/ptrace/tracer/ptrace_bpfel.go @@ -0,0 +1,144 @@ +// Code generated by bpf2go; DO NOT EDIT. +//go:build 386 || amd64 || arm || arm64 || loong64 || mips64le || mipsle || ppc64le || riscv64 + +package tracer + +import ( + "bytes" + _ "embed" + "fmt" + "io" + + "github.com/cilium/ebpf" +) + +type ptraceEvent struct { + Timestamp uint64 + MntnsId uint64 + Pid uint32 + Ppid uint32 + Uid uint32 + Gid uint32 + Request uint32 + Comm [16]uint8 + Exepath [4096]uint8 + _ [4]byte +} + +// loadPtrace returns the embedded CollectionSpec for ptrace. +func loadPtrace() (*ebpf.CollectionSpec, error) { + reader := bytes.NewReader(_PtraceBytes) + spec, err := ebpf.LoadCollectionSpecFromReader(reader) + if err != nil { + return nil, fmt.Errorf("can't load ptrace: %w", err) + } + + return spec, err +} + +// loadPtraceObjects loads ptrace and converts it into a struct. +// +// The following types are suitable as obj argument: +// +// *ptraceObjects +// *ptracePrograms +// *ptraceMaps +// +// See ebpf.CollectionSpec.LoadAndAssign documentation for details. +func loadPtraceObjects(obj interface{}, opts *ebpf.CollectionOptions) error { + spec, err := loadPtrace() + if err != nil { + return err + } + + return spec.LoadAndAssign(obj, opts) +} + +// ptraceSpecs contains maps and programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type ptraceSpecs struct { + ptraceProgramSpecs + ptraceMapSpecs +} + +// ptraceSpecs contains programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type ptraceProgramSpecs struct { + TraceEnterPtrace *ebpf.ProgramSpec `ebpf:"trace_enter_ptrace"` +} + +// ptraceMapSpecs contains maps before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type ptraceMapSpecs struct { + Bufs *ebpf.MapSpec `ebpf:"bufs"` + EmptyEvent *ebpf.MapSpec `ebpf:"empty_event"` + Events *ebpf.MapSpec `ebpf:"events"` + GadgetHeap *ebpf.MapSpec `ebpf:"gadget_heap"` + GadgetMntnsFilterMap *ebpf.MapSpec `ebpf:"gadget_mntns_filter_map"` +} + +// ptraceObjects contains all objects after they have been loaded into the kernel. +// +// It can be passed to loadPtraceObjects or ebpf.CollectionSpec.LoadAndAssign. +type ptraceObjects struct { + ptracePrograms + ptraceMaps +} + +func (o *ptraceObjects) Close() error { + return _PtraceClose( + &o.ptracePrograms, + &o.ptraceMaps, + ) +} + +// ptraceMaps contains all maps after they have been loaded into the kernel. +// +// It can be passed to loadPtraceObjects or ebpf.CollectionSpec.LoadAndAssign. +type ptraceMaps struct { + Bufs *ebpf.Map `ebpf:"bufs"` + EmptyEvent *ebpf.Map `ebpf:"empty_event"` + Events *ebpf.Map `ebpf:"events"` + GadgetHeap *ebpf.Map `ebpf:"gadget_heap"` + GadgetMntnsFilterMap *ebpf.Map `ebpf:"gadget_mntns_filter_map"` +} + +func (m *ptraceMaps) Close() error { + return _PtraceClose( + m.Bufs, + m.EmptyEvent, + m.Events, + m.GadgetHeap, + m.GadgetMntnsFilterMap, + ) +} + +// ptracePrograms contains all programs after they have been loaded into the kernel. +// +// It can be passed to loadPtraceObjects or ebpf.CollectionSpec.LoadAndAssign. +type ptracePrograms struct { + TraceEnterPtrace *ebpf.Program `ebpf:"trace_enter_ptrace"` +} + +func (p *ptracePrograms) Close() error { + return _PtraceClose( + p.TraceEnterPtrace, + ) +} + +func _PtraceClose(closers ...io.Closer) error { + for _, closer := range closers { + if err := closer.Close(); err != nil { + return err + } + } + return nil +} + +// Do not access this directly. +// +//go:embed ptrace_bpfel.o +var _PtraceBytes []byte diff --git a/pkg/ebpf/gadgets/ptrace/tracer/ptrace_bpfel.o b/pkg/ebpf/gadgets/ptrace/tracer/ptrace_bpfel.o new file mode 100644 index 00000000..6d06ae23 Binary files /dev/null and b/pkg/ebpf/gadgets/ptrace/tracer/ptrace_bpfel.o differ diff --git a/pkg/ebpf/gadgets/ptrace/tracer/tracer.go b/pkg/ebpf/gadgets/ptrace/tracer/tracer.go new file mode 100644 index 00000000..7503b76d --- /dev/null +++ b/pkg/ebpf/gadgets/ptrace/tracer/tracer.go @@ -0,0 +1,184 @@ +package tracer + +import ( + "errors" + "fmt" + "os" + + tracepointlib "github.com/kubescape/node-agent/pkg/ebpf/lib" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/link" + "github.com/cilium/ebpf/perf" + gadgetcontext "github.com/inspektor-gadget/inspektor-gadget/pkg/gadget-context" + "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets" + eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" + "github.com/kubescape/go-logger" + "github.com/kubescape/node-agent/pkg/ebpf/gadgets/ptrace/tracer/types" +) + +//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -strip /usr/bin/llvm-strip-18 -no-global-types -target bpfel -cc clang -cflags "-g -O2 -Wall -D __TARGET_ARCH_x86" -type event ptrace bpf/ptrace_detector.c -- -I./bpf/ +const ( + EVENT_TYPE_CONNECT = iota + EVENT_TYPE_ACCEPT + EVENT_TYPE_REQUEST + EVENT_TYPE_RESPONSE + EVENT_TYPE_CLOSE +) + +type Config struct { + MountnsMap *ebpf.Map +} + +type Tracer struct { + config *Config + enricher gadgets.DataEnricherByMntNs + eventCallback func(*types.Event) + + objs ptraceObjects + + ptracelinks []link.Link + reader *perf.Reader +} + +func NewTracer(config *Config, enricher gadgets.DataEnricherByMntNs, + eventCallback func(*types.Event), +) (*Tracer, error) { + t := &Tracer{ + config: config, + enricher: enricher, + eventCallback: eventCallback, + } + + if err := t.install(); err != nil { + t.Close() + return nil, err + } + + go t.run() + + return t, nil +} + +func (t *Tracer) Close() { + for _, l := range t.ptracelinks { + gadgets.CloseLink(l) + } + + if t.reader != nil { + t.reader.Close() + } + + t.objs.Close() + +} + +func (t *Tracer) install() error { + var err error + spec, err := loadPtrace() + if err != nil { + return fmt.Errorf("loading ebpf program: %w", err) + } + + if err := gadgets.LoadeBPFSpec(t.config.MountnsMap, spec, nil, &t.objs); err != nil { + return fmt.Errorf("loading ebpf spec: %w", err) + } + + var links []link.Link + tp := tracepointlib.TracepointInfo{Syscall: "sys_enter_ptrace", ObjFunc: t.objs.ptracePrograms.TraceEnterPtrace} + l, err := tracepointlib.AttachTracepoint(tp) + if err != nil { + logger.L().Error(fmt.Sprintf("Error attaching tracepoint: %s", err)) + } + links = append(links, l) + + t.ptracelinks = links + + t.reader, err = perf.NewReader(t.objs.ptraceMaps.Events, gadgets.PerfBufferPages*os.Getpagesize()) + if err != nil { + return fmt.Errorf("creating perf ring buffer: %w", err) + } + + return nil +} + +func (t *Tracer) run() { + for { + record, err := t.reader.Read() + if err != nil { + if errors.Is(err, perf.ErrClosed) { + // nothing to do, we're done + return + } + msg := fmt.Sprintf("Error reading perf ring buffer: %s", err) + t.eventCallback(types.Base(eventtypes.Err(msg))) + continue + } + + if record.LostSamples > 0 { + msg := fmt.Sprintf("lost %d samples", record.LostSamples) + t.eventCallback(types.Base(eventtypes.Warn(msg))) + continue + } + + bpfEvent := tracepointlib.ConvertToEvent[ptraceEvent](&record) + event := t.parseEvent(bpfEvent) + t.eventCallback(event) + } +} + +func (t *Tracer) Run(gadgetCtx gadgets.GadgetContext) error { + defer t.Close() + if err := t.install(); err != nil { + return fmt.Errorf("installing tracer: %w", err) + } + + go t.run() + gadgetcontext.WaitForTimeoutOrDone(gadgetCtx) + + return nil +} + +func (t *Tracer) SetMountNsMap(mountnsMap *ebpf.Map) { + t.config.MountnsMap = mountnsMap +} + +func (t *Tracer) SetEventHandler(handler any) { + nh, ok := handler.(func(ev *types.Event)) + if !ok { + panic("event handler invalid") + } + t.eventCallback = nh +} + +func (t *Tracer) parseEvent(bpfEvent *ptraceEvent) *types.Event { + event := types.Event{ + Event: eventtypes.Event{ + Type: eventtypes.NORMAL, + Timestamp: gadgets.WallTimeFromBootTime(bpfEvent.Timestamp), + }, + WithMountNsID: eventtypes.WithMountNsID{MountNsID: bpfEvent.MntnsId}, + Pid: bpfEvent.Pid, + PPid: bpfEvent.Ppid, + Uid: bpfEvent.Uid, + Gid: bpfEvent.Gid, + Request: bpfEvent.Request, + Comm: gadgets.FromCString(bpfEvent.Comm[:]), + ExePath: gadgets.FromCString(bpfEvent.Exepath[:]), + } + + if t.enricher != nil { + t.enricher.EnrichByMntNs(&event.CommonData, event.MountNsID) + } + + return &event +} + +type GadgetDesc struct{} + +func (g *GadgetDesc) NewInstance() (gadgets.Gadget, error) { + tracer := &Tracer{ + config: &Config{}, + } + return tracer, nil +} diff --git a/pkg/ebpf/gadgets/ptrace/tracer/types/types.go b/pkg/ebpf/gadgets/ptrace/tracer/types/types.go new file mode 100644 index 00000000..fb443f3e --- /dev/null +++ b/pkg/ebpf/gadgets/ptrace/tracer/types/types.go @@ -0,0 +1,21 @@ +package types + +import eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" + +type Event struct { + eventtypes.Event + eventtypes.WithMountNsID + Pid uint32 `json:"pid,omitempty" column:"pid,template:pid"` + PPid uint32 `json:"ppid,omitempty" column:"ppid,template:ppid"` + Uid uint32 `json:"uid,omitempty" column:"uid,template:uid"` + Gid uint32 `json:"gid,omitempty" column:"gid,template:gid"` + Request uint32 `json:"request,omitempty" column:"request,template:request"` + Comm string `json:"comm,omitempty" column:"comm,template:comm"` + ExePath string `json:"exe_path,omitempty" column:"exe_path,template:exe_path"` +} + +func Base(ev eventtypes.Event) *Event { + return &Event{ + Event: ev, + } +} diff --git a/pkg/ebpf/gadgets/ssh/tracer/bpf/ssh.bpf.c b/pkg/ebpf/gadgets/ssh/tracer/bpf/ssh.bpf.c index 8fc637d5..91feba6d 100644 --- a/pkg/ebpf/gadgets/ssh/tracer/bpf/ssh.bpf.c +++ b/pkg/ebpf/gadgets/ssh/tracer/bpf/ssh.bpf.c @@ -90,8 +90,8 @@ int ssh_detector(struct __sk_buff *skb) { event->gid = (__u32)(skb_val->uid_gid >> 32); __builtin_memcpy(&event->comm, skb_val->task, sizeof(event->comm)); - event->src_ip = iph.saddr; - event->dst_ip = iph.daddr; + event->src_ip = bpf_ntohl(iph.saddr); + event->dst_ip = bpf_ntohl(iph.daddr); event->src_port = bpf_ntohs(tcph.source); event->dst_port = bpf_ntohs(tcph.dest); diff --git a/pkg/ebpf/gadgets/ssh/tracer/ssh_bpfel.o b/pkg/ebpf/gadgets/ssh/tracer/ssh_bpfel.o index 44b9eff2..f06c58fe 100644 Binary files a/pkg/ebpf/gadgets/ssh/tracer/ssh_bpfel.o and b/pkg/ebpf/gadgets/ssh/tracer/ssh_bpfel.o differ diff --git a/pkg/ebpf/gadgets/ssh/tracer/tracer.go b/pkg/ebpf/gadgets/ssh/tracer/tracer.go index 0fb8e9a8..deba0b36 100644 --- a/pkg/ebpf/gadgets/ssh/tracer/tracer.go +++ b/pkg/ebpf/gadgets/ssh/tracer/tracer.go @@ -13,7 +13,7 @@ import ( "github.com/kubescape/node-agent/pkg/ebpf/gadgets/ssh/types" ) -//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -no-global-types -target bpfel -cc clang -cflags "-g -O2 -Wall" -type event ssh bpf/ssh.bpf.c -- -I./bpf/ +//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -no-global-types -target bpfel -cc clang -cflags "-g -O2 -Wall" -type event ssh bpf/ssh.bpf.c -- -I./bpf/ -I /usr/include/x86_64-linux-gnu -D__x86_64__ type Tracer struct { *networktracer.Tracer[types.Event] diff --git a/pkg/ebpf/include/sockets-map.h b/pkg/ebpf/include/sockets-map.h index 5ed51d87..84fbc6b3 100644 --- a/pkg/ebpf/include/sockets-map.h +++ b/pkg/ebpf/include/sockets-map.h @@ -43,6 +43,7 @@ #define SE_NEXTHDR_DEST 60 /* Destination options header. */ #define SE_TASK_COMM_LEN 16 +#define SE_PATH_MAX 4096 struct sockets_key { __u32 netns; @@ -58,8 +59,12 @@ struct sockets_value { __u64 pid_tgid; __u64 uid_gid; char task[SE_TASK_COMM_LEN]; + char ptask[SE_TASK_COMM_LEN]; __u64 sock; __u64 deletion_timestamp; + char cwd[SE_PATH_MAX]; + char exepath[SE_PATH_MAX]; + __u32 ppid; char ipv6only; }; diff --git a/pkg/ebpf/include/types.h b/pkg/ebpf/include/types.h index cc49b63d..7862848e 100644 --- a/pkg/ebpf/include/types.h +++ b/pkg/ebpf/include/types.h @@ -1,4 +1,3 @@ -#pragma once /* SPDX-License-Identifier: Apache-2.0 */ #ifndef __TYPES_H @@ -68,4 +67,4 @@ typedef __u64 gadget_gauge__u64; typedef __u32 gadget_histogram_slot__u32; typedef __u64 gadget_histogram_slot__u64; -#endif /* __TYPES_H */ +#endif /* __TYPES_H */ \ No newline at end of file diff --git a/pkg/ebpf/lib/tracepointlib.go b/pkg/ebpf/lib/tracepointlib.go new file mode 100644 index 00000000..bc62881f --- /dev/null +++ b/pkg/ebpf/lib/tracepointlib.go @@ -0,0 +1,27 @@ +package tracepointlib + +import ( + "fmt" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/link" + "github.com/cilium/ebpf/perf" +) + +type TracepointInfo struct { + Syscall string + ObjFunc interface{} +} + +func AttachTracepoint(tracepoint TracepointInfo) (link.Link, error) { + l, err := link.Tracepoint("syscalls", tracepoint.Syscall, tracepoint.ObjFunc.(*ebpf.Program), nil) + if err != nil { + return nil, fmt.Errorf("failed to attach tracepoint %s: %v", tracepoint.Syscall, err) + } + return l, nil +} + +func ConvertToEvent[T any](record *perf.Record) *T { + return (*T)(unsafe.Pointer(&record.RawSample[0])) +} diff --git a/pkg/exporters/exporters_bus.go b/pkg/exporters/exporters_bus.go index 5ff8c9f8..a747f3ec 100644 --- a/pkg/exporters/exporters_bus.go +++ b/pkg/exporters/exporters_bus.go @@ -3,6 +3,7 @@ package exporters import ( "os" + "github.com/armosec/armoapi-go/armotypes" "github.com/kubescape/node-agent/pkg/malwaremanager" "github.com/kubescape/node-agent/pkg/ruleengine" @@ -27,7 +28,7 @@ type ExporterBus struct { } // InitExporters initializes all exporters. -func InitExporters(exportersConfig ExportersConfig, clusterName string, nodeName string) *ExporterBus { +func InitExporters(exportersConfig ExportersConfig, clusterName string, nodeName string, cloudMetadata *armotypes.CloudMetadata) *ExporterBus { var exporters []Exporter for _, url := range exportersConfig.AlertManagerExporterUrls { alertMan := InitAlertManagerExporter(url) @@ -35,7 +36,7 @@ func InitExporters(exportersConfig ExportersConfig, clusterName string, nodeName exporters = append(exporters, alertMan) } } - stdoutExp := InitStdoutExporter(exportersConfig.StdoutExporter) + stdoutExp := InitStdoutExporter(exportersConfig.StdoutExporter, cloudMetadata) if stdoutExp != nil { exporters = append(exporters, stdoutExp) } @@ -54,7 +55,7 @@ func InitExporters(exportersConfig ExportersConfig, clusterName string, nodeName } } if exportersConfig.HTTPExporterConfig != nil { - httpExp, err := InitHTTPExporter(*exportersConfig.HTTPExporterConfig, clusterName, nodeName) + httpExp, err := InitHTTPExporter(*exportersConfig.HTTPExporterConfig, clusterName, nodeName, cloudMetadata) if err != nil { logger.L().Error("failed to initialize http exporter", helpers.Error(err)) } diff --git a/pkg/exporters/http_exporter.go b/pkg/exporters/http_exporter.go index bbb681fe..82fc80dd 100644 --- a/pkg/exporters/http_exporter.go +++ b/pkg/exporters/http_exporter.go @@ -43,6 +43,7 @@ type HTTPExporter struct { alertCountLock sync.Mutex alertCountStart time.Time alertLimitNotified bool + cloudMetadata *apitypes.CloudMetadata } type HTTPAlertsList struct { @@ -52,8 +53,9 @@ type HTTPAlertsList struct { } type HTTPAlertsListSpec struct { - Alerts []apitypes.RuntimeAlert `json:"alerts"` - ProcessTree apitypes.ProcessTree `json:"processTree"` + Alerts []apitypes.RuntimeAlert `json:"alerts"` + ProcessTree apitypes.ProcessTree `json:"processTree"` + CloudMetadata apitypes.CloudMetadata `json:"cloudMetadata"` } func (config *HTTPExporterConfig) Validate() error { @@ -78,7 +80,7 @@ func (config *HTTPExporterConfig) Validate() error { } // InitHTTPExporter initializes an HTTPExporter with the given URL, headers, timeout, and method -func InitHTTPExporter(config HTTPExporterConfig, clusterName string, nodeName string) (*HTTPExporter, error) { +func InitHTTPExporter(config HTTPExporterConfig, clusterName string, nodeName string, cloudMetadata *apitypes.CloudMetadata) (*HTTPExporter, error) { if err := config.Validate(); err != nil { return nil, err } @@ -90,6 +92,7 @@ func InitHTTPExporter(config HTTPExporterConfig, clusterName string, nodeName st httpClient: &http.Client{ Timeout: time.Duration(config.TimeoutSeconds) * time.Second, }, + cloudMetadata: cloudMetadata, }, nil } @@ -141,9 +144,17 @@ func (exporter *HTTPExporter) SendRuleAlert(failedRule ruleengine.RuleFailure) { func (exporter *HTTPExporter) sendInAlertList(httpAlert apitypes.RuntimeAlert, processTree apitypes.ProcessTree) { // create the HTTPAlertsListSpec struct // TODO: accumulate alerts and send them in a batch + var cloudMetadata apitypes.CloudMetadata + if exporter.cloudMetadata == nil { + cloudMetadata = apitypes.CloudMetadata{} + } else { + cloudMetadata = *exporter.cloudMetadata + } + httpAlertsListSpec := HTTPAlertsListSpec{ - Alerts: []apitypes.RuntimeAlert{httpAlert}, - ProcessTree: processTree, + Alerts: []apitypes.RuntimeAlert{httpAlert}, + ProcessTree: processTree, + CloudMetadata: cloudMetadata, } // create the HTTPAlertsList struct httpAlertsList := HTTPAlertsList{ diff --git a/pkg/exporters/http_exporter_test.go b/pkg/exporters/http_exporter_test.go index f80b5422..60313392 100644 --- a/pkg/exporters/http_exporter_test.go +++ b/pkg/exporters/http_exporter_test.go @@ -33,7 +33,7 @@ func TestSendRuleAlert(t *testing.T) { // Create an HTTPExporter with the mock server URL exporter, err := InitHTTPExporter(HTTPExporterConfig{ URL: server.URL, - }, "", "") + }, "", "", nil) assert.NoError(t, err) // Create a mock rule failure @@ -96,7 +96,7 @@ func TestSendRuleAlertRateReached(t *testing.T) { exporter, err := InitHTTPExporter(HTTPExporterConfig{ URL: server.URL, MaxAlertsPerMinute: 1, - }, "", "") + }, "", "", nil) assert.NoError(t, err) // Create a mock rule failure @@ -162,7 +162,7 @@ func TestSendMalwareAlertHTTPExporter(t *testing.T) { // Create an HTTPExporter with the mock server URL exporter, err := InitHTTPExporter(HTTPExporterConfig{ URL: server.URL, - }, "", "") + }, "", "", nil) assert.NoError(t, err) // Create a mock malware description @@ -234,13 +234,13 @@ func TestSendMalwareAlertHTTPExporter(t *testing.T) { func TestValidateHTTPExporterConfig(t *testing.T) { // Test case: URL is empty - _, err := InitHTTPExporter(HTTPExporterConfig{}, "", "") + _, err := InitHTTPExporter(HTTPExporterConfig{}, "", "", nil) assert.Error(t, err) // Test case: URL is not empty exp, err := InitHTTPExporter(HTTPExporterConfig{ URL: "http://localhost:9093", - }, "cluster", "node") + }, "cluster", "node", nil) assert.NoError(t, err) assert.Equal(t, "POST", exp.config.Method) assert.Equal(t, 5, exp.config.TimeoutSeconds) @@ -258,7 +258,7 @@ func TestValidateHTTPExporterConfig(t *testing.T) { Headers: map[string]string{ "Authorization": "Bearer token", }, - }, "", "") + }, "", "", nil) assert.NoError(t, err) assert.Equal(t, "PUT", exp.config.Method) assert.Equal(t, 2, exp.config.TimeoutSeconds) @@ -269,6 +269,6 @@ func TestValidateHTTPExporterConfig(t *testing.T) { _, err = InitHTTPExporter(HTTPExporterConfig{ URL: "http://localhost:9093", Method: "DELETE", - }, "", "") + }, "", "", nil) assert.Error(t, err) } diff --git a/pkg/exporters/stdout_exporter.go b/pkg/exporters/stdout_exporter.go index 64770dc3..e138b2e6 100644 --- a/pkg/exporters/stdout_exporter.go +++ b/pkg/exporters/stdout_exporter.go @@ -3,6 +3,7 @@ package exporters import ( "os" + apitypes "github.com/armosec/armoapi-go/armotypes" "github.com/kubescape/node-agent/pkg/malwaremanager" "github.com/kubescape/node-agent/pkg/ruleengine" @@ -10,10 +11,11 @@ import ( ) type StdoutExporter struct { - logger *log.Logger + logger *log.Logger + cloudmetadata *apitypes.CloudMetadata } -func InitStdoutExporter(useStdout *bool) *StdoutExporter { +func InitStdoutExporter(useStdout *bool, cloudmetadata *apitypes.CloudMetadata) *StdoutExporter { if useStdout == nil { useStdout = new(bool) *useStdout = os.Getenv("STDOUT_ENABLED") != "false" @@ -27,7 +29,8 @@ func InitStdoutExporter(useStdout *bool) *StdoutExporter { logger.SetOutput(os.Stderr) return &StdoutExporter{ - logger: logger, + logger: logger, + cloudmetadata: cloudmetadata, } } @@ -39,6 +42,7 @@ func (exporter *StdoutExporter) SendRuleAlert(failedRule ruleengine.RuleFailure) "RuntimeProcessDetails": failedRule.GetRuntimeProcessDetails(), "RuntimeK8sDetails": failedRule.GetRuntimeAlertK8sDetails(), "RuleID": failedRule.GetRuleId(), + "CloudMetadata": exporter.cloudmetadata, }).Error(failedRule.GetBaseRuntimeAlert().AlertName) } @@ -50,5 +54,6 @@ func (exporter *StdoutExporter) SendMalwareAlert(malwareResult malwaremanager.Ma "RuntimeProcessDetails": malwareResult.GetRuntimeProcessDetails(), "RuntimeK8sDetails": malwareResult.GetRuntimeAlertK8sDetails(), "RuleID": "R3000", + "CloudMetadata": exporter.cloudmetadata, }).Error(malwareResult.GetBasicRuntimeAlert().AlertName) } diff --git a/pkg/exporters/stdout_exporter_test.go b/pkg/exporters/stdout_exporter_test.go index e9d1f935..ca415e7f 100644 --- a/pkg/exporters/stdout_exporter_test.go +++ b/pkg/exporters/stdout_exporter_test.go @@ -13,42 +13,42 @@ import ( func TestInitStdoutExporter(t *testing.T) { // Test when useStdout is nil useStdout := new(bool) - exporter := InitStdoutExporter(nil) + exporter := InitStdoutExporter(nil, nil) assert.NotNil(t, exporter) // Test when useStdout is true useStdout = new(bool) *useStdout = true - exporter = InitStdoutExporter(useStdout) + exporter = InitStdoutExporter(useStdout, nil) assert.NotNil(t, exporter) assert.NotNil(t, exporter.logger) // Test when useStdout is false useStdout = new(bool) *useStdout = false - exporter = InitStdoutExporter(useStdout) + exporter = InitStdoutExporter(useStdout, nil) assert.Nil(t, exporter) // Test when STDOUT_ENABLED environment variable is set to "false" os.Setenv("STDOUT_ENABLED", "false") - exporter = InitStdoutExporter(nil) + exporter = InitStdoutExporter(nil, nil) assert.Nil(t, exporter) // Test when STDOUT_ENABLED environment variable is set to "true" os.Setenv("STDOUT_ENABLED", "true") - exporter = InitStdoutExporter(nil) + exporter = InitStdoutExporter(nil, nil) assert.NotNil(t, exporter) assert.NotNil(t, exporter.logger) // Test when STDOUT_ENABLED environment variable is not set os.Unsetenv("STDOUT_ENABLED") - exporter = InitStdoutExporter(nil) + exporter = InitStdoutExporter(nil, nil) assert.NotNil(t, exporter) assert.NotNil(t, exporter.logger) } func TestStdoutExporter_SendAlert(t *testing.T) { - exporter := InitStdoutExporter(nil) + exporter := InitStdoutExporter(nil, nil) assert.NotNil(t, exporter) exporter.SendRuleAlert(&ruleengine.GenericRuleFailure{ diff --git a/pkg/malwaremanager/malware_manager_interface.go b/pkg/malwaremanager/malware_manager_interface.go index 40653c93..00ce1884 100644 --- a/pkg/malwaremanager/malware_manager_interface.go +++ b/pkg/malwaremanager/malware_manager_interface.go @@ -5,14 +5,11 @@ import ( apitypes "github.com/armosec/armoapi-go/armotypes" containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" - tracerexectype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/exec/types" - traceropentype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/open/types" igtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" ) type MalwareManagerClient interface { - ReportFileExec(k8sContainerID string, event tracerexectype.Event) - ReportFileOpen(k8sContainerID string, event traceropentype.Event) + ReportEvent(eventType utils.EventType, event utils.K8sEvent) ContainerCallback(notif containercollection.PubSubEvent) } @@ -44,5 +41,5 @@ type MalwareResult interface { type MalwareScanner interface { // Scan scans the event for malware. - Scan(eventType utils.EventType, event interface{}, containerPid uint32) MalwareResult + Scan(eventType utils.EventType, event utils.K8sEvent, containerPid uint32) MalwareResult } diff --git a/pkg/malwaremanager/malwaremanager_mock.go b/pkg/malwaremanager/malwaremanager_mock.go index ccb6fac4..e0a3f3c9 100644 --- a/pkg/malwaremanager/malwaremanager_mock.go +++ b/pkg/malwaremanager/malwaremanager_mock.go @@ -2,8 +2,7 @@ package malwaremanager import ( containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" - tracerexectype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/exec/types" - traceropentype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/open/types" + "github.com/kubescape/node-agent/pkg/utils" ) type MalwareManagerMock struct { @@ -15,11 +14,7 @@ func CreateMalwareManagerMock() *MalwareManagerMock { return &MalwareManagerMock{} } -func (r MalwareManagerMock) ReportFileExec(_ string, _ tracerexectype.Event) { - // noop -} - -func (r MalwareManagerMock) ReportFileOpen(_ string, _ traceropentype.Event) { +func (r MalwareManagerMock) ReportEvent(_ utils.EventType, _ utils.K8sEvent) { // noop } diff --git a/pkg/malwaremanager/v1/clamav/clamav.go b/pkg/malwaremanager/v1/clamav/clamav.go index 71409856..10c893f6 100644 --- a/pkg/malwaremanager/v1/clamav/clamav.go +++ b/pkg/malwaremanager/v1/clamav/clamav.go @@ -8,6 +8,7 @@ import ( "github.com/kubescape/go-logger" "github.com/kubescape/go-logger/helpers" "github.com/kubescape/node-agent/pkg/malwaremanager" + "github.com/kubescape/node-agent/pkg/utils" nautils "github.com/kubescape/node-agent/pkg/utils" ) @@ -19,6 +20,7 @@ var _ malwaremanager.MalwareScanner = (*ClamAVClient)(nil) const ( FixSuggestions = "Please remove the file from the system. If the file is required, please contact your security team for further investigation." + maxFileSize = 50 * 1024 * 1024 // 50MB ) func CreateClamAVClient(clamavSocket string) (*ClamAVClient, error) { @@ -38,7 +40,7 @@ func CreateClamAVClient(clamavSocket string) (*ClamAVClient, error) { return &clamavClient, nil } -func (c *ClamAVClient) Scan(eventType nautils.EventType, event interface{}, containerPid uint32) malwaremanager.MalwareResult { +func (c *ClamAVClient) Scan(eventType nautils.EventType, event utils.K8sEvent, containerPid uint32) malwaremanager.MalwareResult { // Check if the event is of type tracerexectype.Event or traceropentype.Event. switch eventType { case nautils.ExecveEventType: diff --git a/pkg/malwaremanager/v1/clamav/exec.go b/pkg/malwaremanager/v1/clamav/exec.go index fea5da4e..ba34eb26 100644 --- a/pkg/malwaremanager/v1/clamav/exec.go +++ b/pkg/malwaremanager/v1/clamav/exec.go @@ -38,22 +38,20 @@ func (c *ClamAVClient) handleExecEvent(event *types.Event, containerPid uint32) for result := range response { if result.Status == clamd.RES_FOUND { // A malware was found, send an alert. - sha256hash, err := utils.CalculateSHA256FileHash(result.Path) - if err != nil { - logger.L().Error("Error calculating hash of %s", helpers.String("path", result.Path), helpers.Error(err)) - } - sha1hash, err := utils.CalculateSHA1FileHash(result.Path) - if err != nil { - logger.L().Error("Error calculating hash of %s", helpers.String("path", result.Path), helpers.Error(err)) - } - md5hash, err := utils.CalculateMD5FileHash(result.Path) - if err != nil { - logger.L().Error("Error calculating hash of %s", helpers.String("path", result.Path), helpers.Error(err)) - } size, err := utils.GetFileSize(result.Path) if err != nil { logger.L().Error("Error getting file size of %s", helpers.String("path", result.Path), helpers.Error(err)) } + + sha1hash := "" + md5hash := "" + if size != 0 && size < maxFileSize { + sha1hash, md5hash, err = utils.CalculateFileHashes(result.Path) + if err != nil { + logger.L().Error("Error getting file hashes", helpers.Error(err)) + } + } + path := strings.TrimPrefix(result.Path, os.Getenv("HOST_ROOT")) return &malwaremanager2.GenericMalwareResult{ @@ -62,7 +60,6 @@ func (c *ClamAVClient) handleExecEvent(event *types.Event, containerPid uint32) InfectedPID: event.Pid, FixSuggestions: FixSuggestions, SHA1Hash: sha1hash, - SHA256Hash: sha256hash, MD5Hash: md5hash, Severity: 10, // TODO: Get severity from api. Size: humanize.IBytes(uint64(size)), diff --git a/pkg/malwaremanager/v1/clamav/open.go b/pkg/malwaremanager/v1/clamav/open.go index a22c99bd..96cab1f4 100644 --- a/pkg/malwaremanager/v1/clamav/open.go +++ b/pkg/malwaremanager/v1/clamav/open.go @@ -43,22 +43,19 @@ func (c *ClamAVClient) handleOpenEvent(event *types.Event, containerPid uint32) for result := range response { if result.Status == clamd.RES_FOUND { // A malware was found, send an alert. - sha256hash, err := utils.CalculateSHA256FileHash(result.Path) - if err != nil { - logger.L().Error("Error calculating hash of %s", helpers.String("path", result.Path), helpers.Error(err)) - } - sha1hash, err := utils.CalculateSHA1FileHash(result.Path) - if err != nil { - logger.L().Error("Error calculating hash of %s", helpers.String("path", result.Path), helpers.Error(err)) - } - md5hash, err := utils.CalculateMD5FileHash(result.Path) - if err != nil { - logger.L().Error("Error calculating hash of %s", helpers.String("path", result.Path), helpers.Error(err)) - } size, err := utils.GetFileSize(result.Path) if err != nil { logger.L().Error("Error getting file size of %s", helpers.String("path", result.Path), helpers.Error(err)) } + + sha1hash := "" + md5hash := "" + if size != 0 && size < maxFileSize { + sha1hash, md5hash, err = utils.CalculateFileHashes(result.Path) + if err != nil { + logger.L().Error("Error getting file hashes", helpers.Error(err)) + } + } path := strings.TrimPrefix(result.Path, os.Getenv("HOST_ROOT")) return &malwaremanager2.GenericMalwareResult{ @@ -67,7 +64,6 @@ func (c *ClamAVClient) handleOpenEvent(event *types.Event, containerPid uint32) InfectedPID: event.Pid, FixSuggestions: FixSuggestions, SHA1Hash: sha1hash, - SHA256Hash: sha256hash, MD5Hash: md5hash, Severity: 10, // TODO: Get severity from api. Size: humanize.IBytes(uint64(size)), diff --git a/pkg/malwaremanager/v1/malware_manager.go b/pkg/malwaremanager/v1/malware_manager.go index ba34f6ec..aee48edf 100644 --- a/pkg/malwaremanager/v1/malware_manager.go +++ b/pkg/malwaremanager/v1/malware_manager.go @@ -26,7 +26,10 @@ import ( "github.com/kubescape/k8s-interface/workloadinterface" ) -const ScannedFilesMaxBufferLength = 10000 +const ( + ScannedFilesMaxBufferLength = 10000 + maxFileSize = 50 * 1024 * 1024 // 50MB +) type MalwareManager struct { scannedFiles maps.SafeMap[string, mapset.Set[string]] @@ -75,7 +78,7 @@ func (mm *MalwareManager) ContainerCallback(notif containercollection.PubSubEven return } - t := time.NewTicker(mm.cfg.InitialDelay) + t := time.NewTicker(utils.AddJitter(mm.cfg.InitialDelay, mm.cfg.MaxJitterPercentage)) switch notif.Type { case containercollection.EventTypeAddContainer: @@ -140,9 +143,28 @@ func (mm *MalwareManager) getWorkloadIdentifier(podNamespace, podName string) (s return generatedWlid, nil } -func (mm *MalwareManager) ReportFileExec(_ string, event tracerexectype.Event) { +func (mm *MalwareManager) ReportEvent(eventType utils.EventType, event utils.K8sEvent) { + switch eventType { + case utils.ExecveEventType: + exec, ok := event.(*tracerexectype.Event) + if !ok { + logger.L().Error("MalwareManager - failed to cast event to execve event") + return + } + mm.reportFileExec(exec) + case utils.OpenEventType: + open, ok := event.(*traceropentype.Event) + if !ok { + logger.L().Error("MalwareManager - failed to cast event to open event") + return + } + mm.reportFileOpen(open) + } +} + +func (mm *MalwareManager) reportFileExec(event *tracerexectype.Event) { for _, scanner := range mm.malwareScanners { - if result := scanner.Scan(utils.ExecveEventType, &event, mm.containerIdToPid.Get(event.Runtime.ContainerID)); result != nil { + if result := scanner.Scan(utils.ExecveEventType, event, mm.containerIdToPid.Get(event.Runtime.ContainerID)); result != nil { result = mm.enrichMalwareResult(result) result.SetWorkloadDetails(mm.podToWlid.Get(utils.CreateK8sPodID(event.GetNamespace(), event.GetPod()))) mm.exporter.SendMalwareAlert(result) @@ -150,7 +172,7 @@ func (mm *MalwareManager) ReportFileExec(_ string, event tracerexectype.Event) { } if mm.scannedFiles.Has(event.Runtime.ContainerID) && mm.scannedFiles.Get(event.Runtime.ContainerID).Cardinality() <= ScannedFilesMaxBufferLength { - hostFilePath, err := utils.GetHostFilePathFromEvent(&event, mm.containerIdToPid.Get(event.Runtime.ContainerID)) + hostFilePath, err := utils.GetHostFilePathFromEvent(event, mm.containerIdToPid.Get(event.Runtime.ContainerID)) if err != nil { return } @@ -159,7 +181,7 @@ func (mm *MalwareManager) ReportFileExec(_ string, event tracerexectype.Event) { } } -func (mm *MalwareManager) ReportFileOpen(_ string, event traceropentype.Event) { +func (mm *MalwareManager) reportFileOpen(event *traceropentype.Event) { // TODO: Add a check if the file is being opened for read. // Skip directories. @@ -167,7 +189,7 @@ func (mm *MalwareManager) ReportFileOpen(_ string, event traceropentype.Event) { return } - hostFilePath, err := utils.GetHostFilePathFromEvent(&event, mm.containerIdToPid.Get(event.Runtime.ContainerID)) + hostFilePath, err := utils.GetHostFilePathFromEvent(event, mm.containerIdToPid.Get(event.Runtime.ContainerID)) if err != nil { return } @@ -186,7 +208,7 @@ func (mm *MalwareManager) ReportFileOpen(_ string, event traceropentype.Event) { } for _, scanner := range mm.malwareScanners { - if result := scanner.Scan(utils.OpenEventType, &event, mm.containerIdToPid.Get(event.Runtime.ContainerID)); result != nil { + if result := scanner.Scan(utils.OpenEventType, event, mm.containerIdToPid.Get(event.Runtime.ContainerID)); result != nil { result = mm.enrichMalwareResult(result) result.SetWorkloadDetails(mm.podToWlid.Get(utils.CreateK8sPodID(event.GetNamespace(), event.GetPod()))) mm.exporter.SendMalwareAlert(result) @@ -209,38 +231,25 @@ func (mm *MalwareManager) enrichMalwareResult(malwareResult malwaremanager.Malwa baseRuntimeAlert.Timestamp = time.Unix(0, int64(malwareResult.GetTriggerEvent().Timestamp)) - if baseRuntimeAlert.MD5Hash == "" && hostPath != "" { - md5hash, err := utils.CalculateMD5FileHash(hostPath) - if err != nil { - md5hash = "" - } - baseRuntimeAlert.MD5Hash = md5hash - } - - if baseRuntimeAlert.SHA1Hash == "" && hostPath != "" { - sha1hash, err := utils.CalculateSHA1FileHash(hostPath) + var size int64 = 0 + if hostPath != "" { + size, err = utils.GetFileSize(hostPath) if err != nil { - sha1hash = "" + size = 0 } - - baseRuntimeAlert.SHA1Hash = sha1hash } - if baseRuntimeAlert.SHA256Hash == "" && hostPath != "" { - sha256hash, err := utils.CalculateSHA256FileHash(hostPath) - if err != nil { - sha256hash = "" - } - - baseRuntimeAlert.SHA256Hash = sha256hash + if baseRuntimeAlert.Size == "" && hostPath != "" && size != 0 { + baseRuntimeAlert.Size = humanize.Bytes(uint64(size)) } - if baseRuntimeAlert.Size == "" && hostPath != "" { - size, err := utils.GetFileSize(hostPath) - if err != nil { - baseRuntimeAlert.Size = "" - } else { - baseRuntimeAlert.Size = humanize.Bytes(uint64(size)) + if size != 0 && size < maxFileSize && hostPath != "" { + if baseRuntimeAlert.MD5Hash == "" || baseRuntimeAlert.SHA1Hash == "" { + sha1hash, md5hash, err := utils.CalculateFileHashes(hostPath) + if err == nil { + baseRuntimeAlert.MD5Hash = md5hash + baseRuntimeAlert.SHA1Hash = sha1hash + } } } diff --git a/pkg/networkmanager/v2/network_manager.go b/pkg/networkmanager/v2/network_manager.go index 83258ce6..13cddc70 100644 --- a/pkg/networkmanager/v2/network_manager.go +++ b/pkg/networkmanager/v2/network_manager.go @@ -6,23 +6,8 @@ import ( "fmt" "time" - "github.com/kubescape/node-agent/pkg/config" - "github.com/kubescape/node-agent/pkg/dnsmanager" - "github.com/kubescape/node-agent/pkg/k8sclient" - "github.com/kubescape/node-agent/pkg/networkmanager" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/storage" - "github.com/kubescape/node-agent/pkg/utils" - - "k8s.io/utils/ptr" - - "github.com/cenkalti/backoff/v4" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - - helpersv1 "github.com/kubescape/k8s-interface/instanceidhandler/v1/helpers" - "github.com/armosec/utils-k8s-go/wlid" + "github.com/cenkalti/backoff/v4" mapset "github.com/deckarep/golang-set/v2" "github.com/google/uuid" "github.com/goradd/maps" @@ -31,12 +16,24 @@ import ( "github.com/kubescape/go-logger" "github.com/kubescape/go-logger/helpers" "github.com/kubescape/k8s-interface/instanceidhandler/v1" + helpersv1 "github.com/kubescape/k8s-interface/instanceidhandler/v1/helpers" "github.com/kubescape/k8s-interface/workloadinterface" + "github.com/kubescape/node-agent/pkg/config" + "github.com/kubescape/node-agent/pkg/dnsmanager" + "github.com/kubescape/node-agent/pkg/k8sclient" + "github.com/kubescape/node-agent/pkg/networkmanager" + "github.com/kubescape/node-agent/pkg/objectcache" + "github.com/kubescape/node-agent/pkg/storage" + "github.com/kubescape/node-agent/pkg/utils" "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" storageUtils "github.com/kubescape/storage/pkg/utils" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "istio.io/pkg/cache" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" ) type NetworkManager struct { @@ -46,8 +43,8 @@ type NetworkManager struct { containerMutexes storageUtils.MapMutex[string] // key is k8sContainerID trackedContainers mapset.Set[string] // key is k8sContainerID removedContainers mapset.Set[string] // key is k8sContainerID - droppedEvents maps.SafeMap[string, bool] // key is k8sContainerID - savedEvents maps.SafeMap[string, mapset.Set[networkmanager.NetworkEvent]] // key is k8sContainerID + droppedEventsContainers mapset.Set[string] // key is k8sContainerID + savedEvents maps.SafeMap[string, cache.ExpiringCache] // key is k8sContainerID toSaveEvents maps.SafeMap[string, mapset.Set[networkmanager.NetworkEvent]] // key is k8sContainerID watchedContainerChannels maps.SafeMap[string, chan error] // key is ContainerID k8sClient k8sclient.K8sClientInterface @@ -61,17 +58,18 @@ var _ networkmanager.NetworkManagerClient = (*NetworkManager)(nil) func CreateNetworkManager(ctx context.Context, cfg config.Config, clusterName string, k8sClient k8sclient.K8sClientInterface, storageClient storage.StorageClient, dnsResolverClient dnsmanager.DNSResolver, preRunningContainerIDs mapset.Set[string], k8sObjectCache objectcache.K8sObjectCache) *NetworkManager { return &NetworkManager{ - cfg: cfg, - clusterName: clusterName, - ctx: ctx, - dnsResolverClient: dnsResolverClient, - k8sClient: k8sClient, - k8sObjectCache: k8sObjectCache, - storageClient: storageClient, - containerMutexes: storageUtils.NewMapMutex[string](), - trackedContainers: mapset.NewSet[string](), - removedContainers: mapset.NewSet[string](), - preRunningContainerIDs: preRunningContainerIDs, + cfg: cfg, + clusterName: clusterName, + ctx: ctx, + dnsResolverClient: dnsResolverClient, + k8sClient: k8sClient, + k8sObjectCache: k8sObjectCache, + storageClient: storageClient, + containerMutexes: storageUtils.NewMapMutex[string](), + trackedContainers: mapset.NewSet[string](), + removedContainers: mapset.NewSet[string](), + droppedEventsContainers: mapset.NewSet[string](), + preRunningContainerIDs: preRunningContainerIDs, } } @@ -135,11 +133,12 @@ func (nm *NetworkManager) deleteResources(watchedContainer *utils.WatchedContain // delete resources watchedContainer.UpdateDataTicker.Stop() nm.trackedContainers.Remove(watchedContainer.K8sContainerID) - nm.droppedEvents.Delete(watchedContainer.K8sContainerID) + nm.droppedEventsContainers.Remove(watchedContainer.K8sContainerID) nm.savedEvents.Delete(watchedContainer.K8sContainerID) nm.toSaveEvents.Delete(watchedContainer.K8sContainerID) nm.watchedContainerChannels.Delete(watchedContainer.ContainerID) } + func (nm *NetworkManager) ContainerReachedMaxTime(containerID string) { if channel := nm.watchedContainerChannels.Get(containerID); channel != nil { channel <- utils.ContainerReachedMaxTime @@ -183,7 +182,7 @@ func (nm *NetworkManager) monitorContainer(ctx context.Context, container *conta // adjust ticker after first tick if !watchedContainer.InitialDelayExpired { watchedContainer.InitialDelayExpired = true - watchedContainer.UpdateDataTicker.Reset(nm.cfg.UpdateDataPeriod) + watchedContainer.UpdateDataTicker.Reset(utils.AddJitter(nm.cfg.UpdateDataPeriod, nm.cfg.MaxJitterPercentage)) } watchedContainer.SetStatus(utils.WatchedContainerStatusReady) nm.saveNetworkEvents(ctx, watchedContainer, container.K8s.Namespace) @@ -250,7 +249,7 @@ func (nm *NetworkManager) saveNetworkEvents(ctx context.Context, watchedContaine // sleep for container index second to desynchronize the profiles saving time.Sleep(time.Duration(watchedContainer.ContainerIndex) * time.Second) - if droppedEvents := nm.droppedEvents.Get(watchedContainer.K8sContainerID); droppedEvents { + if nm.droppedEventsContainers.ContainsOne(watchedContainer.K8sContainerID) { watchedContainer.SetStatus(utils.WatchedContainerStatusMissingRuntime) } @@ -413,7 +412,11 @@ func (nm *NetworkManager) saveNetworkEvents(ctx context.Context, watchedContaine watchedContainer.ResetStatusUpdatedFlag() // record saved events - nm.savedEvents.Get(watchedContainer.K8sContainerID).Append(toSaveEvents.ToSlice()...) + savedEvents := nm.savedEvents.Get(watchedContainer.K8sContainerID) + toSaveEvents.Each(func(event networkmanager.NetworkEvent) bool { + savedEvents.Set(event, nil) + return false + }) logger.L().Debug("NetworkManager - saved neighborhood", helpers.Int("events", toSaveEvents.Cardinality()), helpers.String("slug", slug), @@ -433,7 +436,7 @@ func (nm *NetworkManager) startNetworkMonitoring(ctx context.Context, container watchedContainer := &utils.WatchedContainerData{ ContainerID: container.Runtime.ContainerID, - UpdateDataTicker: time.NewTicker(utils.AddRandomDuration(5, 10, nm.cfg.InitialDelay)), // get out of sync with the relevancy manager + UpdateDataTicker: time.NewTicker(utils.AddJitter(nm.cfg.InitialDelay, nm.cfg.MaxJitterPercentage)), SyncChannel: syncChannel, K8sContainerID: k8sContainerID, NsMntId: container.Mntns, @@ -489,8 +492,7 @@ func (nm *NetworkManager) ContainerCallback(notif containercollection.PubSubEven helpers.String("k8s workload", k8sContainerID)) return } - nm.droppedEvents.Set(k8sContainerID, false) - nm.savedEvents.Set(k8sContainerID, mapset.NewSet[networkmanager.NetworkEvent]()) + nm.savedEvents.Set(k8sContainerID, cache.NewTTL(5*nm.cfg.UpdateDataPeriod, nm.cfg.UpdateDataPeriod)) nm.toSaveEvents.Set(k8sContainerID, mapset.NewSet[networkmanager.NetworkEvent]()) nm.removedContainers.Remove(k8sContainerID) // make sure container is not in the removed list nm.trackedContainers.Add(k8sContainerID) @@ -526,18 +528,14 @@ func (nm *NetworkManager) ReportNetworkEvent(k8sContainerID string, event tracer networkEvent.SetDestinationPodLabels(event.DstEndpoint.PodLabels) // skip if we already saved this event - savedEvents := nm.savedEvents.Get(k8sContainerID) - if savedEvents.Contains(networkEvent) { + if _, ok := nm.savedEvents.Get(k8sContainerID).Get(networkEvent); ok { return } nm.toSaveEvents.Get(k8sContainerID).Add(networkEvent) } func (nm *NetworkManager) ReportDroppedEvent(k8sContainerID string) { - if err := nm.waitForContainer(k8sContainerID); err != nil { - return - } - nm.droppedEvents.Set(k8sContainerID, true) + nm.droppedEventsContainers.Add(k8sContainerID) } func (nm *NetworkManager) createNetworkNeighbor(networkEvent networkmanager.NetworkEvent, namespace string) *v1beta1.NetworkNeighbor { diff --git a/pkg/nodeprofilemanager/v1/nodeprofile_manager.go b/pkg/nodeprofilemanager/v1/nodeprofile_manager.go index f7104790..ce1985ed 100644 --- a/pkg/nodeprofilemanager/v1/nodeprofile_manager.go +++ b/pkg/nodeprofilemanager/v1/nodeprofile_manager.go @@ -51,7 +51,7 @@ var _ nodeprofilemanager.NodeProfileManagerClient = (*NodeProfileManager)(nil) func (n *NodeProfileManager) Start(ctx context.Context) { go func() { - time.Sleep(n.config.InitialDelay) + time.Sleep(utils.AddJitter(n.config.InitialDelay, n.config.MaxJitterPercentage)) for { time.Sleep(n.config.NodeProfileInterval) profile, err := n.getProfile() diff --git a/pkg/objectcache/applicationprofilecache/applicationprofilecache.go b/pkg/objectcache/applicationprofilecache/applicationprofilecache.go index 93b77511..eae254ef 100644 --- a/pkg/objectcache/applicationprofilecache/applicationprofilecache.go +++ b/pkg/objectcache/applicationprofilecache/applicationprofilecache.go @@ -3,6 +3,8 @@ package applicationprofilecache import ( "context" "fmt" + "strings" + "time" mapset "github.com/deckarep/golang-set/v2" "github.com/goradd/maps" @@ -11,13 +13,14 @@ import ( "github.com/kubescape/k8s-interface/instanceidhandler/v1" helpersv1 "github.com/kubescape/k8s-interface/instanceidhandler/v1/helpers" "github.com/kubescape/k8s-interface/workloadinterface" - "github.com/kubescape/node-agent/pkg/k8sclient" "github.com/kubescape/node-agent/pkg/objectcache" + "github.com/kubescape/node-agent/pkg/utils" "github.com/kubescape/node-agent/pkg/watcher" "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + versioned "github.com/kubescape/storage/pkg/generated/clientset/versioned/typed/softwarecomposition/v1beta1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - k8sruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) @@ -45,28 +48,102 @@ func newApplicationProfileState(ap *v1beta1.ApplicationProfile) applicationProfi } type ApplicationProfileCacheImpl struct { - containerToSlug maps.SafeMap[string, string] // cache the containerID to slug mapping, this will enable a quick lookup of the application profile - slugToAppProfile maps.SafeMap[string, *v1beta1.ApplicationProfile] // cache the application profile - slugToContainers maps.SafeMap[string, mapset.Set[string]] // cache the containerIDs that belong to the application profile, this will enable removing from cache AP without pods - slugToState maps.SafeMap[string, applicationProfileState] // cache the containerID to slug mapping, this will enable a quick lookup of the application profile - k8sClient k8sclient.K8sClientInterface - allProfiles mapset.Set[string] // cache all the application profiles that are ready. this will enable removing from cache AP without pods that are running on the same node - nodeName string + containerToSlug maps.SafeMap[string, string] // cache the containerID to slug mapping, this will enable a quick lookup of the application profile + slugToAppProfile maps.SafeMap[string, *v1beta1.ApplicationProfile] // cache the application profile + slugToContainers maps.SafeMap[string, mapset.Set[string]] // cache the containerIDs that belong to the application profile, this will enable removing from cache AP without pods + slugToState maps.SafeMap[string, applicationProfileState] // cache the containerID to slug mapping, this will enable a quick lookup of the application profile + storageClient versioned.SpdxV1beta1Interface + allProfiles mapset.Set[string] // cache all the application profiles that are ready. this will enable removing from cache AP without pods that are running on the same node + nodeName string + maxDelaySeconds int // maximum delay in seconds before getting the full object from the storage + userManagedProfiles maps.SafeMap[string, *v1beta1.ApplicationProfile] } -func NewApplicationProfileCache(nodeName string, k8sClient k8sclient.K8sClientInterface) *ApplicationProfileCacheImpl { +func NewApplicationProfileCache(nodeName string, storageClient versioned.SpdxV1beta1Interface, maxDelaySeconds int) *ApplicationProfileCacheImpl { return &ApplicationProfileCacheImpl{ - nodeName: nodeName, - k8sClient: k8sClient, - containerToSlug: maps.SafeMap[string, string]{}, - slugToContainers: maps.SafeMap[string, mapset.Set[string]]{}, - allProfiles: mapset.NewSet[string](), + nodeName: nodeName, + maxDelaySeconds: maxDelaySeconds, + storageClient: storageClient, + containerToSlug: maps.SafeMap[string, string]{}, + slugToAppProfile: maps.SafeMap[string, *v1beta1.ApplicationProfile]{}, + slugToContainers: maps.SafeMap[string, mapset.Set[string]]{}, + slugToState: maps.SafeMap[string, applicationProfileState]{}, + allProfiles: mapset.NewSet[string](), + userManagedProfiles: maps.SafeMap[string, *v1beta1.ApplicationProfile]{}, } - } // ------------------ objectcache.ApplicationProfileCache methods ----------------------- +func (ap *ApplicationProfileCacheImpl) handleUserManagedProfile(appProfile *v1beta1.ApplicationProfile) { + baseProfileName := strings.TrimPrefix(appProfile.GetName(), "ug-") + baseProfileUniqueName := objectcache.UniqueName(appProfile.GetNamespace(), baseProfileName) + + // Get the full user managed profile from the storage + userManagedProfile, err := ap.getApplicationProfile(appProfile.GetNamespace(), appProfile.GetName()) + if err != nil { + logger.L().Error("failed to get full application profile", helpers.Error(err)) + return + } + + // Store the user-managed profile temporarily + ap.userManagedProfiles.Set(baseProfileUniqueName, userManagedProfile) + + // If we have the base profile cached, fetch a fresh copy and merge. + // If the base profile is not cached yet, the merge will be attempted when it's added. + if ap.slugToAppProfile.Has(baseProfileUniqueName) { + // Fetch fresh base profile from cluster + freshBaseProfile, err := ap.getApplicationProfile(appProfile.GetNamespace(), baseProfileName) + if err != nil { + logger.L().Error("failed to get fresh base profile for merging", + helpers.String("name", baseProfileName), + helpers.String("namespace", appProfile.GetNamespace()), + helpers.Error(err)) + return + } + + mergedProfile := ap.performMerge(freshBaseProfile, userManagedProfile) + ap.slugToAppProfile.Set(baseProfileUniqueName, mergedProfile) + + // Clean up the user-managed profile after successful merge + ap.userManagedProfiles.Delete(baseProfileUniqueName) + + logger.L().Debug("merged user-managed profile with fresh base profile", + helpers.String("name", baseProfileName), + helpers.String("namespace", appProfile.GetNamespace())) + } +} + +func (ap *ApplicationProfileCacheImpl) addApplicationProfile(obj runtime.Object) { + appProfile := obj.(*v1beta1.ApplicationProfile) + apName := objectcache.MetaUniqueName(appProfile) + + if isUserManagedProfile(appProfile) { + ap.handleUserManagedProfile(appProfile) + return + } + + // Original behavior for normal profiles + apState := newApplicationProfileState(appProfile) + ap.slugToState.Set(apName, apState) + + if apState.status != helpersv1.Completed { + if ap.slugToAppProfile.Has(apName) { + ap.slugToAppProfile.Delete(apName) + ap.allProfiles.Remove(apName) + } + return + } + + ap.allProfiles.Add(apName) + + if ap.slugToContainers.Has(apName) { + time.AfterFunc(utils.RandomDuration(ap.maxDelaySeconds, time.Second), func() { + ap.addFullApplicationProfile(appProfile, apName) + }) + } +} + func (ap *ApplicationProfileCacheImpl) GetApplicationProfile(containerID string) *v1beta1.ApplicationProfile { if s := ap.containerToSlug.Get(containerID); s != "" { return ap.slugToAppProfile.Get(s) @@ -102,48 +179,42 @@ func (ap *ApplicationProfileCacheImpl) WatchResources() []watcher.WatchResource // ------------------ watcher.Watcher methods ----------------------- -func (ap *ApplicationProfileCacheImpl) AddHandler(ctx context.Context, obj *unstructured.Unstructured) { - switch obj.GetKind() { - case "Pod": - ap.addPod(obj) - case "ApplicationProfile": - ap.addApplicationProfile(ctx, obj) +func (ap *ApplicationProfileCacheImpl) AddHandler(ctx context.Context, obj runtime.Object) { + if pod, ok := obj.(*corev1.Pod); ok { + ap.addPod(pod) + } else if appProfile, ok := obj.(*v1beta1.ApplicationProfile); ok { + ap.addApplicationProfile(appProfile) } } -func (ap *ApplicationProfileCacheImpl) ModifyHandler(ctx context.Context, obj *unstructured.Unstructured) { - switch obj.GetKind() { - case "Pod": - ap.addPod(obj) - case "ApplicationProfile": - ap.addApplicationProfile(ctx, obj) + +func (ap *ApplicationProfileCacheImpl) ModifyHandler(ctx context.Context, obj runtime.Object) { + if pod, ok := obj.(*corev1.Pod); ok { + ap.addPod(pod) + } else if appProfile, ok := obj.(*v1beta1.ApplicationProfile); ok { + ap.addApplicationProfile(appProfile) } } -func (ap *ApplicationProfileCacheImpl) DeleteHandler(_ context.Context, obj *unstructured.Unstructured) { - switch obj.GetKind() { - case "Pod": - ap.deletePod(obj) - case "ApplicationProfile": - ap.deleteApplicationProfile(obj) + +func (ap *ApplicationProfileCacheImpl) DeleteHandler(_ context.Context, obj runtime.Object) { + if pod, ok := obj.(*corev1.Pod); ok { + ap.deletePod(pod) + } else if appProfile, ok := obj.(*v1beta1.ApplicationProfile); ok { + ap.deleteApplicationProfile(appProfile) } } // ------------------ watch pod methods ----------------------- -func (ap *ApplicationProfileCacheImpl) addPod(podU *unstructured.Unstructured) { +func (ap *ApplicationProfileCacheImpl) addPod(obj runtime.Object) { + pod := obj.(*corev1.Pod) - slug, err := getSlug(podU) + slug, err := getSlug(pod) if err != nil { - logger.L().Error("ApplicationProfileCacheImpl: failed to get slug", helpers.String("namespace", podU.GetNamespace()), helpers.String("pod", podU.GetName()), helpers.Error(err)) + logger.L().Error("ApplicationProfileCacheImpl: failed to get slug", helpers.String("namespace", pod.GetNamespace()), helpers.String("pod", pod.GetName()), helpers.Error(err)) return } - uniqueSlug := objectcache.UniqueName(podU.GetNamespace(), slug) - - pod, err := objectcache.UnstructuredToPod(podU) - if err != nil { - logger.L().Error("ApplicationProfileCacheImpl: failed to unmarshal pod", helpers.String("namespace", podU.GetNamespace()), helpers.String("pod", podU.GetName()), helpers.Error(err)) - return - } + uniqueSlug := objectcache.UniqueName(pod.GetNamespace(), slug) // in case of modified pod, remove the old containers terminatedContainers := objectcache.ListTerminatedContainers(pod) @@ -174,7 +245,7 @@ func (ap *ApplicationProfileCacheImpl) addPod(podU *unstructured.Unstructured) { if ap.allProfiles.Contains(uniqueSlug) && !ap.slugToAppProfile.Has(uniqueSlug) { // get the application profile - appProfile, err := ap.getApplicationProfile(podU.GetNamespace(), slug) + appProfile, err := ap.getApplicationProfile(pod.GetNamespace(), slug) if err != nil { logger.L().Error("failed to get application profile", helpers.Error(err)) continue @@ -187,19 +258,15 @@ func (ap *ApplicationProfileCacheImpl) addPod(podU *unstructured.Unstructured) { } -func (ap *ApplicationProfileCacheImpl) deletePod(obj *unstructured.Unstructured) { - - pod, err := objectcache.UnstructuredToPod(obj) - if err != nil { - logger.L().Error("ApplicationProfileCacheImpl: failed to unmarshal pod", helpers.String("namespace", obj.GetNamespace()), helpers.String("pod", obj.GetName()), helpers.Error(err)) - return - } +func (ap *ApplicationProfileCacheImpl) deletePod(obj runtime.Object) { + pod := obj.(*corev1.Pod) containers := objectcache.ListContainersIDs(pod) for _, container := range containers { ap.removeContainer(container) } } + func (ap *ApplicationProfileCacheImpl) removeContainer(containerID string) { uniqueSlug := ap.containerToSlug.Get(containerID) @@ -219,79 +286,114 @@ func (ap *ApplicationProfileCacheImpl) removeContainer(containerID string) { } // ------------------ watch application profile methods ----------------------- -func (ap *ApplicationProfileCacheImpl) addApplicationProfile(_ context.Context, obj *unstructured.Unstructured) { - apName := objectcache.UnstructuredUniqueName(obj) - appProfile, err := unstructuredToApplicationProfile(obj) +func (ap *ApplicationProfileCacheImpl) addFullApplicationProfile(appProfile *v1beta1.ApplicationProfile, apName string) { + fullAP, err := ap.getApplicationProfile(appProfile.GetNamespace(), appProfile.GetName()) if err != nil { - logger.L().Error("failed to unmarshal application profile", helpers.String("name", apName), helpers.Error(err)) + logger.L().Error("failed to get full application profile", helpers.Error(err)) return } - apState := newApplicationProfileState(appProfile) - ap.slugToState.Set(apName, apState) - // the cache holds only completed application profiles. - // check if the application profile is completed - // if status was completed and now is not (e.g. mode changed from complete to partial), remove from cache - if apState.status != helpersv1.Completed { - if ap.slugToAppProfile.Has(apName) { - ap.slugToAppProfile.Delete(apName) - ap.allProfiles.Remove(apName) - } - return + // Check if there's a pending user-managed profile to merge + if ap.userManagedProfiles.Has(apName) { + userManagedProfile := ap.userManagedProfiles.Get(apName) + fullAP = ap.performMerge(fullAP, userManagedProfile) + // Clean up the user-managed profile after successful merge + ap.userManagedProfiles.Delete(apName) + logger.L().Debug("merged pending user-managed profile", helpers.String("name", apName)) } - // add to the cache - ap.allProfiles.Add(apName) - - if ap.slugToContainers.Has(apName) { - // get the full application profile from the storage - // the watch only returns the metadata - fullAP, err := ap.getApplicationProfile(appProfile.GetNamespace(), appProfile.GetName()) - if err != nil { - logger.L().Error("failed to get full application profile", helpers.Error(err)) - return - } - ap.slugToAppProfile.Set(apName, fullAP) - for _, i := range ap.slugToContainers.Get(apName).ToSlice() { - ap.containerToSlug.Set(i, apName) - } - - logger.L().Debug("added pod to application profile cache", helpers.String("name", apName)) + ap.slugToAppProfile.Set(apName, fullAP) + for _, i := range ap.slugToContainers.Get(apName).ToSlice() { + ap.containerToSlug.Set(i, apName) } + logger.L().Debug("added pod to application profile cache", helpers.String("name", apName)) } -func (ap *ApplicationProfileCacheImpl) deleteApplicationProfile(obj *unstructured.Unstructured) { - apName := objectcache.UnstructuredUniqueName(obj) - ap.slugToAppProfile.Delete(apName) - ap.slugToState.Delete(apName) - ap.allProfiles.Remove(apName) +func (ap *ApplicationProfileCacheImpl) performMerge(normalProfile, userManagedProfile *v1beta1.ApplicationProfile) *v1beta1.ApplicationProfile { + mergedProfile := normalProfile.DeepCopy() + + // Merge spec + mergedProfile.Spec.Containers = ap.mergeContainers(mergedProfile.Spec.Containers, userManagedProfile.Spec.Containers) + mergedProfile.Spec.InitContainers = ap.mergeContainers(mergedProfile.Spec.InitContainers, userManagedProfile.Spec.InitContainers) + mergedProfile.Spec.EphemeralContainers = ap.mergeContainers(mergedProfile.Spec.EphemeralContainers, userManagedProfile.Spec.EphemeralContainers) - logger.L().Info("deleted application profile from cache", helpers.String("uniqueSlug", apName)) + return mergedProfile } -func (ap *ApplicationProfileCacheImpl) getApplicationProfile(namespace, name string) (*v1beta1.ApplicationProfile, error) { +func (ap *ApplicationProfileCacheImpl) mergeContainers(normalContainers, userManagedContainers []v1beta1.ApplicationProfileContainer) []v1beta1.ApplicationProfileContainer { + if len(userManagedContainers) != len(normalContainers) { + // If the number of containers don't match, we can't merge + logger.L().Error("failed to merge user-managed profile with base profile", + helpers.Int("normalContainers len", len(normalContainers)), + helpers.Int("userManagedContainers len", len(userManagedContainers)), + helpers.String("reason", "number of containers don't match")) + return normalContainers + } - u, err := ap.k8sClient.GetDynamicClient().Resource(groupVersionResource).Namespace(namespace).Get(context.Background(), name, metav1.GetOptions{}) - if err != nil { - return nil, err + // Assuming the normalContainers are already in the correct Pod order + // We'll merge user containers at their corresponding positions + for i := range normalContainers { + for _, userContainer := range userManagedContainers { + if normalContainers[i].Name == userContainer.Name { + ap.mergeContainer(&normalContainers[i], &userContainer) + break + } + } } - return unstructuredToApplicationProfile(u) + return normalContainers } -func unstructuredToApplicationProfile(obj *unstructured.Unstructured) (*v1beta1.ApplicationProfile, error) { +func (ap *ApplicationProfileCacheImpl) mergeContainer(normalContainer, userContainer *v1beta1.ApplicationProfileContainer) { + normalContainer.Capabilities = append(normalContainer.Capabilities, userContainer.Capabilities...) + normalContainer.Execs = append(normalContainer.Execs, userContainer.Execs...) + normalContainer.Opens = append(normalContainer.Opens, userContainer.Opens...) + normalContainer.Syscalls = append(normalContainer.Syscalls, userContainer.Syscalls...) + normalContainer.Endpoints = append(normalContainer.Endpoints, userContainer.Endpoints...) +} - ap := &v1beta1.ApplicationProfile{} - err := k8sruntime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, ap) - if err != nil { - return nil, err +func (ap *ApplicationProfileCacheImpl) deleteApplicationProfile(obj runtime.Object) { + appProfile := obj.(*v1beta1.ApplicationProfile) + apName := objectcache.MetaUniqueName(appProfile) + + if isUserManagedProfile(appProfile) { + // For user-managed profiles, we need to use the base name for cleanup + baseProfileName := strings.TrimPrefix(appProfile.GetName(), "ug-") + baseProfileUniqueName := objectcache.UniqueName(appProfile.GetNamespace(), baseProfileName) + ap.userManagedProfiles.Delete(baseProfileUniqueName) + + logger.L().Debug("deleted user-managed profile from cache", + helpers.String("profileName", appProfile.GetName()), + helpers.String("baseProfile", baseProfileName)) + } else { + // For normal profiles, clean up all related data + ap.slugToAppProfile.Delete(apName) + ap.slugToState.Delete(apName) + ap.allProfiles.Remove(apName) + + // Log the deletion of normal profile + logger.L().Debug("deleted application profile from cache", + helpers.String("uniqueSlug", apName)) } - return ap, nil + // Clean up any orphaned user-managed profiles + ap.cleanupOrphanedUserManagedProfiles() +} + +func (ap *ApplicationProfileCacheImpl) getApplicationProfile(namespace, name string) (*v1beta1.ApplicationProfile, error) { + return ap.storageClient.ApplicationProfiles(namespace).Get(context.Background(), name, metav1.GetOptions{}) } -func getSlug(p *unstructured.Unstructured) (string, error) { - pod := workloadinterface.NewWorkloadObj(p.Object) +func getSlug(p *corev1.Pod) (string, error) { + // need to set APIVersion and Kind before unstructured conversion, preparing for instanceID extraction + p.APIVersion = "v1" + p.Kind = "Pod" + + unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&p) + if err != nil { + return "", fmt.Errorf("failed to convert runtime object to unstructured: %w", err) + } + pod := workloadinterface.NewWorkloadObj(unstructuredObj) if pod == nil { return "", fmt.Errorf("failed to get workload object") } @@ -312,5 +414,28 @@ func getSlug(p *unstructured.Unstructured) (string, error) { return "", fmt.Errorf("failed to get slug") } return slug, nil +} + +// Add cleanup method for any orphaned user-managed profiles +func (ap *ApplicationProfileCacheImpl) cleanupOrphanedUserManagedProfiles() { + // This could be called periodically or during certain operations + ap.userManagedProfiles.Range(func(key string, value *v1beta1.ApplicationProfile) bool { + if ap.slugToAppProfile.Has(key) { + // If base profile exists but merge didn't happen for some reason, + // attempt merge again and cleanup + if baseProfile := ap.slugToAppProfile.Get(key); baseProfile != nil { + mergedProfile := ap.performMerge(baseProfile, value) + ap.slugToAppProfile.Set(key, mergedProfile) + ap.userManagedProfiles.Delete(key) + logger.L().Debug("cleaned up orphaned user-managed profile", helpers.String("name", key)) + } + } + return true + }) +} +func isUserManagedProfile(appProfile *v1beta1.ApplicationProfile) bool { + return appProfile.Annotations != nil && + appProfile.Annotations["kubescape.io/managed-by"] == "User" && + strings.HasPrefix(appProfile.GetName(), "ug-") } diff --git a/pkg/objectcache/applicationprofilecache/applicationprofilecache_test.go b/pkg/objectcache/applicationprofilecache/applicationprofilecache_test.go index 87e3dd1c..3c1797e5 100644 --- a/pkg/objectcache/applicationprofilecache/applicationprofilecache_test.go +++ b/pkg/objectcache/applicationprofilecache/applicationprofilecache_test.go @@ -5,20 +5,19 @@ import ( "fmt" "slices" "testing" + "time" mapset "github.com/deckarep/golang-set/v2" - "github.com/kubescape/k8s-interface/k8sinterface" "github.com/kubescape/node-agent/mocks" "github.com/kubescape/node-agent/pkg/objectcache" "github.com/kubescape/node-agent/pkg/watcher" "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + "github.com/kubescape/storage/pkg/generated/clientset/versioned/fake" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - dynamicfake "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes/scheme" ) @@ -30,49 +29,49 @@ func init() { func Test_AddHandlers(t *testing.T) { tests := []struct { - f func(ap *ApplicationProfileCacheImpl, ctx context.Context, obj *unstructured.Unstructured) - obj *unstructured.Unstructured + f func(ap *ApplicationProfileCacheImpl, ctx context.Context, obj runtime.Object) + obj runtime.Object name string slug string length int }{ { name: "add application profile", - obj: mocks.GetUnstructured(mocks.TestKindAP, mocks.TestNginx), + obj: mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx), f: (*ApplicationProfileCacheImpl).AddHandler, slug: "default/replicaset-nginx-77b4fdf86c", length: 1, }, { name: "add pod", - obj: mocks.GetUnstructured(mocks.TestKindPod, mocks.TestCollection), + obj: mocks.GetRuntime(mocks.TestKindPod, mocks.TestCollection), f: (*ApplicationProfileCacheImpl).AddHandler, slug: "default/replicaset-collection-94c495554", length: 6, }, { name: "modify application profile", - obj: mocks.GetUnstructured(mocks.TestKindAP, mocks.TestNginx), + obj: mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx), f: (*ApplicationProfileCacheImpl).ModifyHandler, length: 1, }, { name: "modify pod", - obj: mocks.GetUnstructured(mocks.TestKindPod, mocks.TestCollection), + obj: mocks.GetRuntime(mocks.TestKindPod, mocks.TestCollection), f: (*ApplicationProfileCacheImpl).ModifyHandler, slug: "default/replicaset-collection-94c495554", length: 6, }, { name: "delete application profile", - obj: mocks.GetUnstructured(mocks.TestKindAP, mocks.TestNginx), + obj: mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx), f: (*ApplicationProfileCacheImpl).DeleteHandler, slug: "default/replicaset-nginx-77b4fdf86c", length: 0, }, { name: "delete pod", - obj: mocks.GetUnstructured(mocks.TestKindPod, mocks.TestCollection), + obj: mocks.GetRuntime(mocks.TestKindPod, mocks.TestCollection), f: (*ApplicationProfileCacheImpl).DeleteHandler, slug: "default/replicaset-collection-94c495554", length: 0, @@ -80,14 +79,14 @@ func Test_AddHandlers(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tt.obj.SetNamespace("default") - k8sClient := k8sinterface.NewKubernetesApiMock() - ap := NewApplicationProfileCache("", k8sClient) + tt.obj.(metav1.Object).SetNamespace("default") + storageClient := fake.NewSimpleClientset().SpdxV1beta1() + ap := NewApplicationProfileCache("", storageClient, 0) ap.slugToContainers.Set(tt.slug, mapset.NewSet[string]()) tt.f(ap, context.Background(), tt.obj) - switch mocks.TestKinds(tt.obj.GetKind()) { + switch mocks.TestKinds(tt.obj.GetObjectKind().GroupVersionKind().Kind) { case mocks.TestKindAP: assert.Equal(t, tt.length, ap.allProfiles.Cardinality()) case mocks.TestKindPod: @@ -101,17 +100,17 @@ func Test_addApplicationProfile(t *testing.T) { // add single application profile tests := []struct { - obj *unstructured.Unstructured + obj runtime.Object name string annotations map[string]string - preCreatedPods []*unstructured.Unstructured // pre created pods - preCreatedAP []*unstructured.Unstructured // pre created application profiles + preCreatedPods []runtime.Object // pre created pods + preCreatedAP []runtime.Object // pre created application profiles shouldAdd bool shouldAddToPod bool }{ { name: "add single application profile nginx", - obj: mocks.GetUnstructured(mocks.TestKindAP, mocks.TestNginx), + obj: mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx), annotations: map[string]string{ "kubescape.io/status": "completed", "kubescape.io/completion": "complete", @@ -120,7 +119,7 @@ func Test_addApplicationProfile(t *testing.T) { }, { name: "add application profile with partial annotation", - obj: mocks.GetUnstructured(mocks.TestKindAP, mocks.TestCollection), + obj: mocks.GetRuntime(mocks.TestKindAP, mocks.TestCollection), annotations: map[string]string{ "kubescape.io/status": "completed", "kubescape.io/completion": "partial", @@ -129,7 +128,7 @@ func Test_addApplicationProfile(t *testing.T) { }, { name: "ignore single application profile with incomplete annotation", - obj: mocks.GetUnstructured(mocks.TestKindAP, mocks.TestCollection), + obj: mocks.GetRuntime(mocks.TestKindAP, mocks.TestCollection), annotations: map[string]string{ "kubescape.io/status": "ready", "kubescape.io/completion": "complete", @@ -138,8 +137,8 @@ func Test_addApplicationProfile(t *testing.T) { }, { name: "add application profile to pod", - obj: mocks.GetUnstructured(mocks.TestKindAP, mocks.TestCollection), - preCreatedPods: []*unstructured.Unstructured{mocks.GetUnstructured(mocks.TestKindPod, mocks.TestCollection)}, + obj: mocks.GetRuntime(mocks.TestKindAP, mocks.TestCollection), + preCreatedPods: []runtime.Object{mocks.GetRuntime(mocks.TestKindPod, mocks.TestCollection)}, annotations: map[string]string{ "kubescape.io/status": "completed", "kubescape.io/completion": "complete", @@ -149,8 +148,8 @@ func Test_addApplicationProfile(t *testing.T) { }, { name: "add application profile without pod", - obj: mocks.GetUnstructured(mocks.TestKindAP, mocks.TestCollection), - preCreatedPods: []*unstructured.Unstructured{mocks.GetUnstructured(mocks.TestKindPod, mocks.TestNginx)}, + obj: mocks.GetRuntime(mocks.TestKindAP, mocks.TestCollection), + preCreatedPods: []runtime.Object{mocks.GetRuntime(mocks.TestKindPod, mocks.TestNginx)}, annotations: map[string]string{ "kubescape.io/status": "completed", "kubescape.io/completion": "complete", @@ -162,41 +161,39 @@ func Test_addApplicationProfile(t *testing.T) { for i, tt := range tests { t.Run(tt.name, func(t *testing.T) { if len(tt.annotations) != 0 { - tt.obj.SetAnnotations(tt.annotations) + tt.obj.(metav1.Object).SetAnnotations(tt.annotations) } namespace := fmt.Sprintf("default-%d", i) - k8sClient := k8sinterface.NewKubernetesApiMock() var runtimeObjs []runtime.Object - tt.obj.SetNamespace(namespace) - runtimeObjs = append(runtimeObjs, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}) for i := range tt.preCreatedPods { - tt.preCreatedPods[i].SetNamespace(namespace) - runtimeObjs = append(runtimeObjs, mocks.UnstructuredToRuntime(tt.preCreatedPods[i])) + tt.preCreatedPods[i].(metav1.Object).SetNamespace(namespace) } for i := range tt.preCreatedAP { - tt.preCreatedAP[i].SetNamespace(namespace) - runtimeObjs = append(runtimeObjs, mocks.UnstructuredToRuntime(tt.preCreatedAP[i])) + tt.preCreatedAP[i].(metav1.Object).SetNamespace(namespace) + runtimeObjs = append(runtimeObjs, tt.preCreatedAP[i]) } - runtimeObjs = append(runtimeObjs, mocks.UnstructuredToRuntime(tt.obj)) + tt.obj.(metav1.Object).SetNamespace(namespace) + runtimeObjs = append(runtimeObjs, tt.obj) - k8sClient.DynamicClient = dynamicfake.NewSimpleDynamicClient(scheme.Scheme, runtimeObjs...) + storageClient := fake.NewSimpleClientset(runtimeObjs...).SpdxV1beta1() - ap := NewApplicationProfileCache("", k8sClient) + ap := NewApplicationProfileCache("", storageClient, 0) for i := range tt.preCreatedPods { ap.addPod(tt.preCreatedPods[i]) } for i := range tt.preCreatedAP { - ap.addApplicationProfile(context.Background(), tt.preCreatedAP[i]) + ap.addApplicationProfile(tt.preCreatedAP[i]) } - ap.addApplicationProfile(context.Background(), tt.obj) + ap.addApplicationProfile(tt.obj) + time.Sleep(1 * time.Second) // add is async // test if the application profile is added to the cache - apName := objectcache.UnstructuredUniqueName(tt.obj) + apName := objectcache.MetaUniqueName(tt.obj.(metav1.Object)) if tt.shouldAdd { assert.Equal(t, 1, ap.allProfiles.Cardinality()) } else { @@ -207,7 +204,7 @@ func Test_addApplicationProfile(t *testing.T) { assert.True(t, ap.slugToContainers.Has(apName)) assert.True(t, ap.slugToAppProfile.Has(apName)) for i := range tt.preCreatedPods { - p, _ := objectcache.UnstructuredToPod(tt.preCreatedPods[i]) + p := tt.preCreatedPods[i].(*corev1.Pod) for _, c := range objectcache.ListContainersIDs(p) { assert.NotNil(t, ap.GetApplicationProfile(c)) } @@ -216,7 +213,7 @@ func Test_addApplicationProfile(t *testing.T) { assert.False(t, ap.slugToContainers.Has(apName)) assert.False(t, ap.slugToAppProfile.Has(apName)) for i := range tt.preCreatedPods { - p, _ := objectcache.UnstructuredToPod(tt.preCreatedPods[i]) + p := tt.preCreatedPods[i].(*corev1.Pod) for _, c := range objectcache.ListContainersIDs(p) { assert.Nil(t, ap.GetApplicationProfile(c)) } @@ -228,7 +225,7 @@ func Test_addApplicationProfile(t *testing.T) { func Test_deleteApplicationProfile(t *testing.T) { tests := []struct { - obj *unstructured.Unstructured + obj runtime.Object name string slug string slugs []string @@ -236,21 +233,21 @@ func Test_deleteApplicationProfile(t *testing.T) { }{ { name: "delete application profile nginx", - obj: mocks.GetUnstructured(mocks.TestKindAP, mocks.TestNginx), + obj: mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx), slug: "/replicaset-nginx-77b4fdf86c", slugs: []string{"/replicaset-nginx-77b4fdf86c"}, shouldDelete: true, }, { name: "delete application profile from many", - obj: mocks.GetUnstructured(mocks.TestKindAP, mocks.TestNginx), + obj: mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx), slug: "/replicaset-nginx-77b4fdf86c", slugs: []string{"/replicaset-nginx-11111", "/replicaset-nginx-77b4fdf86c", "/replicaset-nginx-22222"}, shouldDelete: true, }, { name: "ignore delete application profile nginx", - obj: mocks.GetUnstructured(mocks.TestKindAP, mocks.TestCollection), + obj: mocks.GetRuntime(mocks.TestKindAP, mocks.TestCollection), slug: "/replicaset-nginx-77b4fdf86c", slugs: []string{"/replicaset-nginx-77b4fdf86c"}, shouldDelete: false, @@ -258,7 +255,7 @@ func Test_deleteApplicationProfile(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ap := NewApplicationProfileCache("", nil) + ap := NewApplicationProfileCache("", nil, 0) ap.allProfiles.Append(tt.slugs...) for _, i := range tt.slugs { @@ -284,7 +281,7 @@ func Test_deleteApplicationProfile(t *testing.T) { func Test_deletePod(t *testing.T) { tests := []struct { - obj *unstructured.Unstructured + obj runtime.Object name string containers []string slug string @@ -293,19 +290,19 @@ func Test_deletePod(t *testing.T) { }{ { name: "delete pod", - obj: mocks.GetUnstructured(mocks.TestKindPod, mocks.TestNginx), + obj: mocks.GetRuntime(mocks.TestKindPod, mocks.TestNginx), containers: []string{"b0416f7a782e62badf28e03fc9b82305cd02e9749dc24435d8592fab66349c78"}, shouldDelete: true, }, { name: "pod not deleted", - obj: mocks.GetUnstructured(mocks.TestKindPod, mocks.TestNginx), + obj: mocks.GetRuntime(mocks.TestKindPod, mocks.TestNginx), containers: []string{"blabla"}, shouldDelete: false, }, { name: "delete pod with slug", - obj: mocks.GetUnstructured(mocks.TestKindPod, mocks.TestNginx), + obj: mocks.GetRuntime(mocks.TestKindPod, mocks.TestNginx), containers: []string{"b0416f7a782e62badf28e03fc9b82305cd02e9749dc24435d8592fab66349c78"}, slug: "/replicaset-nginx-77b4fdf86c", otherSlugs: []string{"1111111", "222222"}, @@ -313,7 +310,7 @@ func Test_deletePod(t *testing.T) { }, { name: "delete pod with slug", - obj: mocks.GetUnstructured(mocks.TestKindPod, mocks.TestNginx), + obj: mocks.GetRuntime(mocks.TestKindPod, mocks.TestNginx), containers: []string{"b0416f7a782e62badf28e03fc9b82305cd02e9749dc24435d8592fab66349c78"}, slug: "/replicaset-nginx-77b4fdf86c", shouldDelete: true, @@ -321,7 +318,7 @@ func Test_deletePod(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ap := NewApplicationProfileCache("", nil) + ap := NewApplicationProfileCache("", nil, 0) for _, i := range tt.otherSlugs { ap.slugToContainers.Set(i, mapset.NewSet[string]()) ap.slugToAppProfile.Set(i, &v1beta1.ApplicationProfile{}) @@ -429,7 +426,7 @@ func Test_GetApplicationProfile(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ap := NewApplicationProfileCache("", k8sinterface.NewKubernetesApiMock()) + ap := NewApplicationProfileCache("", fake.NewSimpleClientset().SpdxV1beta1(), 0) for _, c := range tt.pods { ap.containerToSlug.Set(c.containerID, c.slug) @@ -454,8 +451,8 @@ func Test_addApplicationProfile_existing(t *testing.T) { } // add single application profile tests := []struct { - obj1 *unstructured.Unstructured - obj2 *unstructured.Unstructured + obj1 runtime.Object + obj2 runtime.Object annotations1 map[string]string annotations2 map[string]string name string @@ -464,8 +461,8 @@ func Test_addApplicationProfile_existing(t *testing.T) { }{ { name: "application profile already exists", - obj1: mocks.GetUnstructured(mocks.TestKindAP, mocks.TestNginx), - obj2: mocks.GetUnstructured(mocks.TestKindAP, mocks.TestNginx), + obj1: mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx), + obj2: mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx), pods: []podToSlug{ { podName: "nginx-77b4fdf86c", @@ -476,8 +473,8 @@ func Test_addApplicationProfile_existing(t *testing.T) { }, { name: "remove application profile", - obj1: mocks.GetUnstructured(mocks.TestKindAP, mocks.TestNginx), - obj2: mocks.GetUnstructured(mocks.TestKindAP, mocks.TestNginx), + obj1: mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx), + obj2: mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx), pods: []podToSlug{ { podName: "nginx-77b4fdf86c", @@ -496,20 +493,19 @@ func Test_addApplicationProfile_existing(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if len(tt.annotations1) != 0 { - tt.obj1.SetAnnotations(tt.annotations1) + tt.obj1.(metav1.Object).SetAnnotations(tt.annotations1) } if len(tt.annotations2) != 0 { - tt.obj2.SetAnnotations(tt.annotations2) + tt.obj2.(metav1.Object).SetAnnotations(tt.annotations2) } - k8sClient := k8sinterface.NewKubernetesApiMock() var runtimeObjs []runtime.Object - runtimeObjs = append(runtimeObjs, mocks.UnstructuredToRuntime(tt.obj1)) + runtimeObjs = append(runtimeObjs, tt.obj1) - k8sClient.DynamicClient = dynamicfake.NewSimpleDynamicClient(scheme.Scheme, runtimeObjs...) + storageClient := fake.NewSimpleClientset(runtimeObjs...).SpdxV1beta1() - ap := NewApplicationProfileCache("", k8sClient) + ap := NewApplicationProfileCache("", storageClient, 0) // add pods for i := range tt.pods { @@ -517,8 +513,9 @@ func Test_addApplicationProfile_existing(t *testing.T) { ap.slugToContainers.Set(tt.pods[i].slug, mapset.NewSet(tt.pods[i].podName)) } - ap.addApplicationProfile(context.Background(), tt.obj1) - ap.addApplicationProfile(context.Background(), tt.obj2) + ap.addApplicationProfile(tt.obj1) + time.Sleep(1 * time.Second) // add is async + ap.addApplicationProfile(tt.obj2) // test if the application profile is added to the cache if tt.storeInCache { @@ -530,45 +527,19 @@ func Test_addApplicationProfile_existing(t *testing.T) { } } -func Test_unstructuredToApplicationProfile(t *testing.T) { - - tests := []struct { - obj *unstructured.Unstructured - name string - }{ - { - name: "nginx application profile", - obj: mocks.GetUnstructured(mocks.TestKindAP, mocks.TestNginx), - }, - { - name: "collection application profile", - obj: mocks.GetUnstructured(mocks.TestKindAP, mocks.TestCollection), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - p, err := unstructuredToApplicationProfile(tt.obj) - assert.NoError(t, err) - assert.Equal(t, tt.obj.GetName(), p.GetName()) - assert.Equal(t, tt.obj.GetLabels(), p.GetLabels()) - assert.Equal(t, tt.obj.GetAnnotations(), p.GetAnnotations()) - }) - } -} - func Test_getApplicationProfile(t *testing.T) { type args struct { name string } tests := []struct { name string - obj *unstructured.Unstructured + obj runtime.Object args args wantErr bool }{ { name: "nginx application profile", - obj: mocks.GetUnstructured(mocks.TestKindAP, mocks.TestNginx), + obj: mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx), args: args{ name: "replicaset-nginx-77b4fdf86c", }, @@ -576,7 +547,7 @@ func Test_getApplicationProfile(t *testing.T) { }, { name: "collection application profile", - obj: mocks.GetUnstructured(mocks.TestKindAP, mocks.TestCollection), + obj: mocks.GetRuntime(mocks.TestKindAP, mocks.TestCollection), args: args{ name: "replicaset-collection-94c495554", }, @@ -584,7 +555,7 @@ func Test_getApplicationProfile(t *testing.T) { }, { name: "collection application profile", - obj: mocks.GetUnstructured(mocks.TestKindAP, mocks.TestCollection), + obj: mocks.GetRuntime(mocks.TestKindAP, mocks.TestCollection), args: args{ name: "replicaset-nginx-77b4fdf86c", }, @@ -593,11 +564,10 @@ func Test_getApplicationProfile(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - k8sClient := k8sinterface.NewKubernetesApiMock() - k8sClient.DynamicClient = dynamicfake.NewSimpleDynamicClient(scheme.Scheme, mocks.UnstructuredToRuntime(tt.obj)) + storageClient := fake.NewSimpleClientset(tt.obj).SpdxV1beta1() ap := &ApplicationProfileCacheImpl{ - k8sClient: k8sClient, + storageClient: storageClient, } a, err := ap.getApplicationProfile("", tt.args.name) @@ -606,14 +576,14 @@ func Test_getApplicationProfile(t *testing.T) { return } assert.NoError(t, err) - assert.Equal(t, tt.obj.GetName(), a.GetName()) - assert.Equal(t, tt.obj.GetLabels(), a.GetLabels()) + assert.Equal(t, tt.obj.(metav1.Object).GetName(), a.GetName()) + assert.Equal(t, tt.obj.(metav1.Object).GetLabels(), a.GetLabels()) }) } } func Test_WatchResources(t *testing.T) { - ap := NewApplicationProfileCache("test-node", nil) + ap := NewApplicationProfileCache("test-node", nil, 0) expectedPodWatchResource := watcher.NewWatchResource(schema.GroupVersionResource{ Group: "", @@ -635,38 +605,21 @@ func Test_WatchResources(t *testing.T) { func TestGetSlug(t *testing.T) { tests := []struct { name string - obj *unstructured.Unstructured + obj runtime.Object expected string expectErr bool }{ { name: "Test with valid object", - obj: mocks.GetUnstructured(mocks.TestKindPod, mocks.TestCollection), + obj: mocks.GetRuntime(mocks.TestKindPod, mocks.TestCollection), expected: "replicaset-collection-94c495554", expectErr: false, }, - { - name: "Test with invalid object", - obj: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "Unknown", - "metadata": map[string]interface{}{ - "name": "unknown-1", - }, - }, - }, - expected: "", - expectErr: true, - }, { name: "Test with object without instanceIDs", - obj: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Pod", - "metadata": map[string]interface{}{ - "name": "unknown-1", - }, + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unknown-1", }, }, expected: "", @@ -676,8 +629,8 @@ func TestGetSlug(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tt.obj.SetNamespace("default") - result, err := getSlug(tt.obj) + tt.obj.(metav1.Object).SetNamespace("default") + result, err := getSlug(tt.obj.(*corev1.Pod)) if tt.expectErr { assert.Error(t, err) } else { @@ -692,18 +645,18 @@ func Test_addPod(t *testing.T) { // add single application profile tests := []struct { - obj *unstructured.Unstructured + obj runtime.Object name string addedContainers []string ignoredContainers []string preCreatedAPAnnotations map[string]string - preCreatedAP *unstructured.Unstructured // pre created application profiles + preCreatedAP runtime.Object // pre created application profiles shouldAddToProfile bool }{ { name: "add pod with partial application profile", - obj: mocks.GetUnstructured(mocks.TestKindPod, mocks.TestCollection), - preCreatedAP: mocks.GetUnstructured(mocks.TestKindAP, mocks.TestCollection), + obj: mocks.GetRuntime(mocks.TestKindPod, mocks.TestCollection), + preCreatedAP: mocks.GetRuntime(mocks.TestKindAP, mocks.TestCollection), preCreatedAPAnnotations: map[string]string{ "kubescape.io/status": "completed", "kubescape.io/completion": "partial", @@ -720,8 +673,8 @@ func Test_addPod(t *testing.T) { }, { name: "add pod with application profile", - obj: mocks.GetUnstructured(mocks.TestKindPod, mocks.TestCollection), - preCreatedAP: mocks.GetUnstructured(mocks.TestKindAP, mocks.TestCollection), + obj: mocks.GetRuntime(mocks.TestKindPod, mocks.TestCollection), + preCreatedAP: mocks.GetRuntime(mocks.TestKindAP, mocks.TestCollection), preCreatedAPAnnotations: map[string]string{ "kubescape.io/status": "completed", "kubescape.io/completion": "complete", @@ -740,30 +693,28 @@ func Test_addPod(t *testing.T) { for i, tt := range tests { t.Run(tt.name, func(t *testing.T) { if len(tt.preCreatedAPAnnotations) != 0 { - tt.preCreatedAP.SetAnnotations(tt.preCreatedAPAnnotations) + tt.preCreatedAP.(metav1.Object).SetAnnotations(tt.preCreatedAPAnnotations) } namespace := fmt.Sprintf("default-%d", i) - k8sClient := k8sinterface.NewKubernetesApiMock() var runtimeObjs []runtime.Object - tt.obj.SetNamespace(namespace) - runtimeObjs = append(runtimeObjs, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}) - tt.preCreatedAP.SetNamespace(namespace) - runtimeObjs = append(runtimeObjs, mocks.UnstructuredToRuntime(tt.preCreatedAP)) - runtimeObjs = append(runtimeObjs, mocks.UnstructuredToRuntime(tt.obj)) + tt.preCreatedAP.(metav1.Object).SetNamespace(namespace) + runtimeObjs = append(runtimeObjs, tt.preCreatedAP) - k8sClient.DynamicClient = dynamicfake.NewSimpleDynamicClient(scheme.Scheme, runtimeObjs...) + storageClient := fake.NewSimpleClientset(runtimeObjs...).SpdxV1beta1() - ap := NewApplicationProfileCache("", k8sClient) + ap := NewApplicationProfileCache("", storageClient, 0) - ap.addApplicationProfile(context.Background(), tt.preCreatedAP) + ap.addApplicationProfile(tt.preCreatedAP) + time.Sleep(1 * time.Second) // add is async + tt.obj.(metav1.Object).SetNamespace(namespace) ap.addPod(tt.obj) // test if the application profile is added to the cache assert.Equal(t, 1, ap.allProfiles.Cardinality()) - assert.True(t, ap.slugToContainers.Has(objectcache.UnstructuredUniqueName(tt.preCreatedAP))) + assert.True(t, ap.slugToContainers.Has(objectcache.MetaUniqueName(tt.preCreatedAP.(metav1.Object)))) c := ap.containerToSlug.Keys() slices.Sort(c) @@ -783,3 +734,202 @@ func Test_addPod(t *testing.T) { }) } } + +func Test_MergeApplicationProfiles(t *testing.T) { + tests := []struct { + name string + normalProfile *v1beta1.ApplicationProfile + userProfile *v1beta1.ApplicationProfile + expectedMerged *v1beta1.ApplicationProfile + }{ + { + name: "merge profiles with overlapping containers", + normalProfile: &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-profile", + Namespace: "default", + }, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: "container1", + Capabilities: []string{ + "NET_ADMIN", + }, + Syscalls: []string{ + "open", + }, + Opens: []v1beta1.OpenCalls{ + { + Path: "/etc/config", + }, + }, + }, + }, + }, + }, + userProfile: &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ug-test-profile", // Added ug- prefix + Namespace: "default", + Annotations: map[string]string{ + "kubescape.io/managed-by": "User", + }, + }, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: "container1", + Capabilities: []string{ + "SYS_ADMIN", + }, + Syscalls: []string{ + "write", + }, + Opens: []v1beta1.OpenCalls{ + { + Path: "/etc/secret", + }, + }, + }, + }, + }, + }, + expectedMerged: &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-profile", // Keeps original name without ug- prefix + Namespace: "default", + }, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: "container1", + Capabilities: []string{ + "NET_ADMIN", + "SYS_ADMIN", + }, + Syscalls: []string{ + "open", + "write", + }, + Opens: []v1beta1.OpenCalls{ + { + Path: "/etc/config", + }, + { + Path: "/etc/secret", + }, + }, + }, + }, + }, + }, + }, + { + name: "merge profiles with init containers", + normalProfile: &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-profile", + Namespace: "default", + }, + Spec: v1beta1.ApplicationProfileSpec{ + InitContainers: []v1beta1.ApplicationProfileContainer{ + { + Name: "init1", + Execs: []v1beta1.ExecCalls{ + { + Path: "mount", + }, + }, + }, + }, + }, + }, + userProfile: &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ug-test-profile", // Added ug- prefix + Namespace: "default", + Annotations: map[string]string{ + "kubescape.io/managed-by": "User", + }, + }, + Spec: v1beta1.ApplicationProfileSpec{ + InitContainers: []v1beta1.ApplicationProfileContainer{ + { + Name: "init1", + Execs: []v1beta1.ExecCalls{ + { + Path: "chmod", + }, + }, + Syscalls: []string{ + "chmod", + }, + }, + }, + }, + }, + expectedMerged: &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-profile", // Keeps original name without ug- prefix + Namespace: "default", + }, + Spec: v1beta1.ApplicationProfileSpec{ + InitContainers: []v1beta1.ApplicationProfileContainer{ + { + Name: "init1", + Execs: []v1beta1.ExecCalls{ + { + Path: "mount", + }, + { + Path: "chmod", + }, + }, + Syscalls: []string{ + "chmod", + }, + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cache := NewApplicationProfileCache("test-node", nil, 0) + merged := cache.performMerge(tt.normalProfile, tt.userProfile) + + // Verify object metadata + assert.Equal(t, tt.expectedMerged.Name, merged.Name) + assert.Equal(t, tt.expectedMerged.Namespace, merged.Namespace) + + // Verify user-managed annotation is removed + _, hasAnnotation := merged.Annotations["kubescape.io/managed-by"] + assert.False(t, hasAnnotation) + + // Verify containers + assert.Equal(t, len(tt.expectedMerged.Spec.Containers), len(merged.Spec.Containers)) + for i, container := range tt.expectedMerged.Spec.Containers { + assert.Equal(t, container.Name, merged.Spec.Containers[i].Name) + assert.ElementsMatch(t, container.Capabilities, merged.Spec.Containers[i].Capabilities) + assert.ElementsMatch(t, container.Syscalls, merged.Spec.Containers[i].Syscalls) + assert.ElementsMatch(t, container.Opens, merged.Spec.Containers[i].Opens) + assert.ElementsMatch(t, container.Execs, merged.Spec.Containers[i].Execs) + assert.ElementsMatch(t, container.Endpoints, merged.Spec.Containers[i].Endpoints) + } + + // Verify init containers + assert.Equal(t, len(tt.expectedMerged.Spec.InitContainers), len(merged.Spec.InitContainers)) + for i, container := range tt.expectedMerged.Spec.InitContainers { + assert.Equal(t, container.Name, merged.Spec.InitContainers[i].Name) + assert.ElementsMatch(t, container.Capabilities, merged.Spec.InitContainers[i].Capabilities) + assert.ElementsMatch(t, container.Syscalls, merged.Spec.InitContainers[i].Syscalls) + assert.ElementsMatch(t, container.Opens, merged.Spec.InitContainers[i].Opens) + assert.ElementsMatch(t, container.Execs, merged.Spec.InitContainers[i].Execs) + assert.ElementsMatch(t, container.Endpoints, merged.Spec.InitContainers[i].Endpoints) + } + }) + } +} diff --git a/pkg/objectcache/dnscache/dnscache.go b/pkg/objectcache/dnscache/dnscache.go new file mode 100644 index 00000000..6d1dff09 --- /dev/null +++ b/pkg/objectcache/dnscache/dnscache.go @@ -0,0 +1,33 @@ +package dnscache + +import ( + "github.com/kubescape/go-logger" + "github.com/kubescape/node-agent/pkg/dnsmanager" + "github.com/kubescape/node-agent/pkg/objectcache" +) + +var _ objectcache.DnsCache = (*DnsCacheImpl)(nil) + +type DnsCacheImpl struct { + dnsResolver dnsmanager.DNSResolver +} + +func NewDnsCache(dnsResolver dnsmanager.DNSResolver) *DnsCacheImpl { + return &DnsCacheImpl{ + dnsResolver: dnsResolver, + } +} + +func (d *DnsCacheImpl) ResolveIpToDomain(ip string) string { + if d.dnsResolver == nil { + logger.L().Debug("DNS resolver is not set") + return "" + } + + domain, ok := d.dnsResolver.ResolveIPAddress(ip) + if !ok { + return "" + } + + return domain +} diff --git a/pkg/objectcache/dnscache_interface.go b/pkg/objectcache/dnscache_interface.go new file mode 100644 index 00000000..b874d0d3 --- /dev/null +++ b/pkg/objectcache/dnscache_interface.go @@ -0,0 +1,14 @@ +package objectcache + +type DnsCache interface { + ResolveIpToDomain(ip string) string +} + +var _DnsCache = (*DnsCacheMock)(nil) + +type DnsCacheMock struct { +} + +func (dc *DnsCacheMock) ResolveIpToDomain(_ string) string { + return "" +} diff --git a/pkg/objectcache/helpers.go b/pkg/objectcache/helpers.go index 90f3c644..26b58155 100644 --- a/pkg/objectcache/helpers.go +++ b/pkg/objectcache/helpers.go @@ -1,37 +1,22 @@ package objectcache import ( - "encoding/json" "time" "github.com/kubescape/node-agent/pkg/utils" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) func UniqueName(namespace, name string) string { return namespace + "/" + name } -func UnstructuredUniqueName(obj *unstructured.Unstructured) string { +func MetaUniqueName(obj metav1.Object) string { return UniqueName(obj.GetNamespace(), obj.GetName()) } -func UnstructuredToPod(obj *unstructured.Unstructured) (*corev1.Pod, error) { - bytes, err := obj.MarshalJSON() - if err != nil { - return nil, err - } - - var pod *corev1.Pod - err = json.Unmarshal(bytes, &pod) - if err != nil { - return nil, err - } - return pod, nil -} - // list containerIDs from pod status func ListContainersIDs(pod *corev1.Pod) []string { var containers []string diff --git a/pkg/objectcache/k8scache/k8scache.go b/pkg/objectcache/k8scache/k8scache.go index 625a55ef..c79f9609 100644 --- a/pkg/objectcache/k8scache/k8scache.go +++ b/pkg/objectcache/k8scache/k8scache.go @@ -7,10 +7,8 @@ import ( "github.com/kubescape/node-agent/pkg/objectcache" "github.com/kubescape/node-agent/pkg/watcher" - k8sruntime "k8s.io/apimachinery/pkg/runtime" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "github.com/goradd/maps" @@ -72,35 +70,20 @@ func (k *K8sObjectCacheImpl) GetPods() []*corev1.Pod { return k.pods.Values() } -func (k *K8sObjectCacheImpl) AddHandler(_ context.Context, obj *unstructured.Unstructured) { - switch obj.GetKind() { - case "Pod": - pod, err := unstructuredToPod(obj) - if err != nil { - return - } +func (k *K8sObjectCacheImpl) AddHandler(_ context.Context, obj runtime.Object) { + if pod, ok := obj.(*corev1.Pod); ok { k.pods.Set(podKey(pod.GetNamespace(), pod.GetName()), pod) } } -func (k *K8sObjectCacheImpl) ModifyHandler(_ context.Context, obj *unstructured.Unstructured) { - switch obj.GetKind() { - case "Pod": - pod, err := unstructuredToPod(obj) - if err != nil { - return - } +func (k *K8sObjectCacheImpl) ModifyHandler(_ context.Context, obj runtime.Object) { + if pod, ok := obj.(*corev1.Pod); ok { k.pods.Set(podKey(pod.GetNamespace(), pod.GetName()), pod) } } -func (k *K8sObjectCacheImpl) DeleteHandler(_ context.Context, obj *unstructured.Unstructured) { - switch obj.GetKind() { - case "Pod": - pod, err := unstructuredToPod(obj) - if err != nil { - return - } +func (k *K8sObjectCacheImpl) DeleteHandler(_ context.Context, obj runtime.Object) { + if pod, ok := obj.(*corev1.Pod); ok { k.pods.Delete(podKey(pod.GetNamespace(), pod.GetName())) } } @@ -133,10 +116,3 @@ func (k *K8sObjectCacheImpl) setApiServerIpAddress() error { func podKey(namespace, podName string) string { return namespace + "/" + podName } -func unstructuredToPod(obj *unstructured.Unstructured) (*corev1.Pod, error) { - pod := &corev1.Pod{} - if err := k8sruntime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, pod); err != nil { - return nil, err - } - return pod, nil -} diff --git a/pkg/objectcache/k8scache/k8scache_test.go b/pkg/objectcache/k8scache/k8scache_test.go index d745bff7..bcc48d2f 100644 --- a/pkg/objectcache/k8scache/k8scache_test.go +++ b/pkg/objectcache/k8scache/k8scache_test.go @@ -4,41 +4,15 @@ import ( "context" "testing" + "github.com/kubescape/k8s-interface/k8sinterface" "github.com/kubescape/node-agent/mocks" "github.com/kubescape/node-agent/pkg/watcher" - - "github.com/kubescape/k8s-interface/k8sinterface" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" ) -func TestUnstructuredToPod(t *testing.T) { - - tests := []struct { - obj *unstructured.Unstructured - name string - }{ - { - name: "nginx pod", - obj: mocks.GetUnstructured(mocks.TestKindPod, mocks.TestNginx), - }, - { - name: "collection pod", - obj: mocks.GetUnstructured(mocks.TestKindPod, mocks.TestCollection), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - p, err := unstructuredToPod(tt.obj) - assert.NoError(t, err) - assert.Equal(t, tt.obj.GetName(), p.GetName()) - assert.Equal(t, tt.obj.GetLabels(), p.GetLabels()) - }) - } -} func TestPodSpecKey(t *testing.T) { tests := []struct { name string @@ -110,14 +84,14 @@ func TestK8sObjectCacheImpl_GetPodSpec(t *testing.T) { } tests := []struct { name string - obj []*unstructured.Unstructured + obj []*corev1.Pod args []args }{ { name: "Test with valid namespace and podName", - obj: []*unstructured.Unstructured{ - mocks.GetUnstructured(mocks.TestKindPod, mocks.TestNginx), - mocks.GetUnstructured(mocks.TestKindPod, mocks.TestCollection), + obj: []*corev1.Pod{ + mocks.GetRuntime(mocks.TestKindPod, mocks.TestNginx).(*corev1.Pod), + mocks.GetRuntime(mocks.TestKindPod, mocks.TestCollection).(*corev1.Pod), }, args: []args{ { @@ -154,9 +128,8 @@ func TestK8sObjectCacheImpl_GetPodSpec(t *testing.T) { assert.Nil(t, spec) continue } - p, _ := unstructuredToPod(tt.obj[i]) assert.NotNil(t, spec) - assert.Equal(t, p.Spec, *spec) + assert.Equal(t, tt.obj[i].Spec, *spec) } } @@ -164,7 +137,7 @@ func TestK8sObjectCacheImpl_GetPodSpec(t *testing.T) { { for _, obj := range tt.obj { o := obj.DeepCopy() - o.Object["spec"] = map[string]interface{}{} + o.Spec = corev1.PodSpec{} k.ModifyHandler(context.Background(), o) } @@ -175,11 +148,10 @@ func TestK8sObjectCacheImpl_GetPodSpec(t *testing.T) { continue } o := tt.obj[i].DeepCopy() - o.Object["spec"] = map[string]interface{}{} + o.Spec = corev1.PodSpec{} - p, _ := unstructuredToPod(o) assert.NotNil(t, spec) - assert.Equal(t, p.Spec, *spec) + assert.Equal(t, o.Spec, *spec) } } @@ -198,6 +170,7 @@ func TestK8sObjectCacheImpl_GetPodSpec(t *testing.T) { }) } } + func TestK8sObjectCacheImpl_GetApiServerIpAddress(t *testing.T) { k := &K8sObjectCacheImpl{ apiServerIpAddress: "127.0.0.1", diff --git a/pkg/objectcache/networkneighborhoodcache/networkneighborhoodcache.go b/pkg/objectcache/networkneighborhoodcache/networkneighborhoodcache.go index a63797f5..93fc1083 100644 --- a/pkg/objectcache/networkneighborhoodcache/networkneighborhoodcache.go +++ b/pkg/objectcache/networkneighborhoodcache/networkneighborhoodcache.go @@ -3,23 +3,24 @@ package networkneighborhoodcache import ( "context" "fmt" - - "github.com/kubescape/node-agent/pkg/k8sclient" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/watcher" + "strings" + "time" mapset "github.com/deckarep/golang-set/v2" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - k8sruntime "k8s.io/apimachinery/pkg/runtime" - "github.com/goradd/maps" "github.com/kubescape/go-logger" "github.com/kubescape/go-logger/helpers" "github.com/kubescape/k8s-interface/instanceidhandler/v1" helpersv1 "github.com/kubescape/k8s-interface/instanceidhandler/v1/helpers" "github.com/kubescape/k8s-interface/workloadinterface" + "github.com/kubescape/node-agent/pkg/objectcache" + "github.com/kubescape/node-agent/pkg/utils" + "github.com/kubescape/node-agent/pkg/watcher" "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + versioned "github.com/kubescape/storage/pkg/generated/clientset/versioned/typed/softwarecomposition/v1beta1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) @@ -47,28 +48,99 @@ func newNetworkNeighborhoodState(nn *v1beta1.NetworkNeighborhood) networkNeighbo } type NetworkNeighborhoodCacheImpl struct { - containerToSlug maps.SafeMap[string, string] // cache the containerID to slug mapping, this will enable a quick lookup of the network neighborhood - slugToNetworkNeighborhood maps.SafeMap[string, *v1beta1.NetworkNeighborhood] // cache the network neighborhood - slugToContainers maps.SafeMap[string, mapset.Set[string]] // cache the containerIDs that belong to the network neighborhood, this will enable removing from cache NN without pods - slugToState maps.SafeMap[string, networkNeighborhoodState] // cache the containerID to slug mapping, this will enable a quick lookup of the network neighborhood - k8sClient k8sclient.K8sClientInterface - allNetworkNeighborhoods mapset.Set[string] // cache all the NN that are ready. this will enable removing from cache NN without pods that are running on the same node - nodeName string + containerToSlug maps.SafeMap[string, string] // cache the containerID to slug mapping, this will enable a quick lookup of the network neighborhood + slugToNetworkNeighborhood maps.SafeMap[string, *v1beta1.NetworkNeighborhood] // cache the network neighborhood + slugToContainers maps.SafeMap[string, mapset.Set[string]] // cache the containerIDs that belong to the network neighborhood, this will enable removing from cache NN without pods + slugToState maps.SafeMap[string, networkNeighborhoodState] // cache the containerID to slug mapping, this will enable a quick lookup of the network neighborhood + storageClient versioned.SpdxV1beta1Interface + allNetworkNeighborhoods mapset.Set[string] // cache all the NN that are ready. this will enable removing from cache NN without pods that are running on the same node + nodeName string + maxDelaySeconds int // maximum delay in seconds before getting the full object from the storage + userManagedNetworkNeighborhood maps.SafeMap[string, *v1beta1.NetworkNeighborhood] } -func NewNetworkNeighborhoodCache(nodeName string, k8sClient k8sclient.K8sClientInterface) *NetworkNeighborhoodCacheImpl { +func NewNetworkNeighborhoodCache(nodeName string, storageClient versioned.SpdxV1beta1Interface, maxDelaySeconds int) *NetworkNeighborhoodCacheImpl { return &NetworkNeighborhoodCacheImpl{ - nodeName: nodeName, - k8sClient: k8sClient, - containerToSlug: maps.SafeMap[string, string]{}, - slugToContainers: maps.SafeMap[string, mapset.Set[string]]{}, - allNetworkNeighborhoods: mapset.NewSet[string](), + nodeName: nodeName, + maxDelaySeconds: maxDelaySeconds, + storageClient: storageClient, + containerToSlug: maps.SafeMap[string, string]{}, + slugToContainers: maps.SafeMap[string, mapset.Set[string]]{}, + allNetworkNeighborhoods: mapset.NewSet[string](), + userManagedNetworkNeighborhood: maps.SafeMap[string, *v1beta1.NetworkNeighborhood]{}, } - } // ------------------ objectcache.NetworkNeighborhoodCache methods ----------------------- +func (nn *NetworkNeighborhoodCacheImpl) handleUserManagedNN(netNeighborhood *v1beta1.NetworkNeighborhood) { + baseNNName := strings.TrimPrefix(netNeighborhood.GetName(), "ug-") + baseNNUniqueName := objectcache.UniqueName(netNeighborhood.GetNamespace(), baseNNName) + + // Get the full user managed network neighborhood from the storage + userManagedNN, err := nn.getNetworkNeighborhood(netNeighborhood.GetNamespace(), netNeighborhood.GetName()) + if err != nil { + logger.L().Error("failed to get full network neighborhood", helpers.Error(err)) + return + } + + // Store the user-managed network neighborhood temporarily + nn.userManagedNetworkNeighborhood.Set(baseNNUniqueName, userManagedNN) + + // If we have the base network neighborhood cached, fetch a fresh copy and merge. + // If the base network neighborhood is not cached yet, the merge will be attempted when it's added. + if nn.slugToNetworkNeighborhood.Has(baseNNUniqueName) { + // Fetch fresh base network neighborhood from cluster + freshBaseNN, err := nn.getNetworkNeighborhood(netNeighborhood.GetNamespace(), baseNNName) + if err != nil { + logger.L().Error("failed to get fresh base network neighborhood for merging", + helpers.String("name", baseNNName), + helpers.String("namespace", netNeighborhood.GetNamespace()), + helpers.Error(err)) + return + } + + mergedNN := nn.performMerge(freshBaseNN, userManagedNN) + nn.slugToNetworkNeighborhood.Set(baseNNUniqueName, mergedNN) + + // Clean up the user-managed network neighborhood after successful merge + nn.userManagedNetworkNeighborhood.Delete(baseNNUniqueName) + + logger.L().Debug("merged user-managed network neighborhood with fresh base network neighborhood", + helpers.String("name", baseNNName), + helpers.String("namespace", netNeighborhood.GetNamespace())) + } +} + +func (nn *NetworkNeighborhoodCacheImpl) addNetworkNeighborhood(_ context.Context, obj runtime.Object) { + netNeighborhood := obj.(*v1beta1.NetworkNeighborhood) + nnName := objectcache.MetaUniqueName(netNeighborhood) + + if isUserManagedNN(netNeighborhood) { + nn.handleUserManagedNN(netNeighborhood) + return + } + + nnState := newNetworkNeighborhoodState(netNeighborhood) + nn.slugToState.Set(nnName, nnState) + + if nnState.status != helpersv1.Completed { + if nn.slugToNetworkNeighborhood.Has(nnName) { + nn.slugToNetworkNeighborhood.Delete(nnName) + nn.allNetworkNeighborhoods.Remove(nnName) + } + return + } + + nn.allNetworkNeighborhoods.Add(nnName) + + if nn.slugToContainers.Has(nnName) { + time.AfterFunc(utils.RandomDuration(nn.maxDelaySeconds, time.Second), func() { + nn.addFullNetworkNeighborhood(netNeighborhood, nnName) + }) + } +} + func (nn *NetworkNeighborhoodCacheImpl) GetNetworkNeighborhood(containerID string) *v1beta1.NetworkNeighborhood { if s := nn.containerToSlug.Get(containerID); s != "" { return nn.slugToNetworkNeighborhood.Get(s) @@ -104,48 +176,42 @@ func (nn *NetworkNeighborhoodCacheImpl) WatchResources() []watcher.WatchResource // ------------------ watcher.Watcher methods ----------------------- -func (nn *NetworkNeighborhoodCacheImpl) AddHandler(ctx context.Context, obj *unstructured.Unstructured) { - switch obj.GetKind() { - case "Pod": - nn.addPod(obj) - case "NetworkNeighborhood": - nn.addNetworkNeighborhood(ctx, obj) +func (nn *NetworkNeighborhoodCacheImpl) AddHandler(ctx context.Context, obj runtime.Object) { + if pod, ok := obj.(*corev1.Pod); ok { + nn.addPod(pod) + } else if netNeighborhood, ok := obj.(*v1beta1.NetworkNeighborhood); ok { + nn.addNetworkNeighborhood(ctx, netNeighborhood) } } -func (nn *NetworkNeighborhoodCacheImpl) ModifyHandler(ctx context.Context, obj *unstructured.Unstructured) { - switch obj.GetKind() { - case "Pod": - nn.addPod(obj) - case "NetworkNeighborhood": - nn.addNetworkNeighborhood(ctx, obj) + +func (nn *NetworkNeighborhoodCacheImpl) ModifyHandler(ctx context.Context, obj runtime.Object) { + if pod, ok := obj.(*corev1.Pod); ok { + nn.addPod(pod) + } else if netNeighborhood, ok := obj.(*v1beta1.NetworkNeighborhood); ok { + nn.addNetworkNeighborhood(ctx, netNeighborhood) } } -func (nn *NetworkNeighborhoodCacheImpl) DeleteHandler(_ context.Context, obj *unstructured.Unstructured) { - switch obj.GetKind() { - case "Pod": - nn.deletePod(obj) - case "NetworkNeighborhood": - nn.deleteNetworkNeighborhood(obj) + +func (nn *NetworkNeighborhoodCacheImpl) DeleteHandler(_ context.Context, obj runtime.Object) { + if pod, ok := obj.(*corev1.Pod); ok { + nn.deletePod(pod) + } else if netNeighborhood, ok := obj.(*v1beta1.NetworkNeighborhood); ok { + nn.deleteNetworkNeighborhood(netNeighborhood) } } // ------------------ watch pod methods ----------------------- -func (nn *NetworkNeighborhoodCacheImpl) addPod(podU *unstructured.Unstructured) { +func (nn *NetworkNeighborhoodCacheImpl) addPod(obj runtime.Object) { + pod := obj.(*corev1.Pod) - slug, err := getSlug(podU) + slug, err := getSlug(pod) if err != nil { - logger.L().Error("ApplicationProfileCacheImpl: failed to get slug", helpers.String("namespace", podU.GetNamespace()), helpers.String("pod", podU.GetName()), helpers.Error(err)) + logger.L().Error("NetworkNeighborhoodCacheImpl: failed to get slug", helpers.String("namespace", pod.GetNamespace()), helpers.String("pod", pod.GetName()), helpers.Error(err)) return } - uniqueSlug := objectcache.UniqueName(podU.GetNamespace(), slug) - - pod, err := objectcache.UnstructuredToPod(podU) - if err != nil { - logger.L().Error("ApplicationProfileCacheImpl: failed to unmarshal pod", helpers.String("namespace", podU.GetNamespace()), helpers.String("pod", podU.GetName()), helpers.Error(err)) - return - } + uniqueSlug := objectcache.UniqueName(pod.GetNamespace(), slug) // in case of modified pod, remove the old containers terminatedContainers := objectcache.ListTerminatedContainers(pod) @@ -176,7 +242,7 @@ func (nn *NetworkNeighborhoodCacheImpl) addPod(podU *unstructured.Unstructured) if nn.allNetworkNeighborhoods.Contains(uniqueSlug) && !nn.slugToNetworkNeighborhood.Has(uniqueSlug) { // get the NN - networkNeighborhood, err := nn.getNetworkNeighborhood(podU.GetNamespace(), slug) + networkNeighborhood, err := nn.getNetworkNeighborhood(pod.GetNamespace(), slug) if err != nil { logger.L().Error("failed to get network neighborhood", helpers.Error(err)) continue @@ -189,19 +255,15 @@ func (nn *NetworkNeighborhoodCacheImpl) addPod(podU *unstructured.Unstructured) } -func (nn *NetworkNeighborhoodCacheImpl) deletePod(obj *unstructured.Unstructured) { - - pod, err := objectcache.UnstructuredToPod(obj) - if err != nil { - logger.L().Error("ApplicationProfileCacheImpl: failed to unmarshal pod", helpers.String("namespace", obj.GetNamespace()), helpers.String("pod", obj.GetName()), helpers.Error(err)) - return - } +func (nn *NetworkNeighborhoodCacheImpl) deletePod(obj runtime.Object) { + pod := obj.(*corev1.Pod) containers := objectcache.ListContainersIDs(pod) for _, container := range containers { nn.removeContainer(container) } } + func (nn *NetworkNeighborhoodCacheImpl) removeContainer(containerID string) { uniqueSlug := nn.containerToSlug.Get(containerID) @@ -221,79 +283,239 @@ func (nn *NetworkNeighborhoodCacheImpl) removeContainer(containerID string) { } // ------------------ watch network neighborhood methods ----------------------- -func (nn *NetworkNeighborhoodCacheImpl) addNetworkNeighborhood(_ context.Context, obj *unstructured.Unstructured) { - nnName := objectcache.UnstructuredUniqueName(obj) - appProfile, err := unstructuredToNetworkNeighborhood(obj) +func (nn *NetworkNeighborhoodCacheImpl) addFullNetworkNeighborhood(netNeighborhood *v1beta1.NetworkNeighborhood, nnName string) { + fullNN, err := nn.getNetworkNeighborhood(netNeighborhood.GetNamespace(), netNeighborhood.GetName()) if err != nil { - logger.L().Error("failed to unmarshal network neighborhood", helpers.String("name", nnName), helpers.Error(err)) + logger.L().Error("failed to get full network neighborhood", helpers.Error(err)) return } - apState := newNetworkNeighborhoodState(appProfile) - nn.slugToState.Set(nnName, apState) - // the cache holds only completed network neighborhoods. - // check if the network neighborhood is completed - // if status was completed and now is not (e.g. mode changed from complete to partial), remove from cache - if apState.status != helpersv1.Completed { - if nn.slugToNetworkNeighborhood.Has(nnName) { - nn.slugToNetworkNeighborhood.Delete(nnName) - nn.allNetworkNeighborhoods.Remove(nnName) - } - return + // Check if there's a pending user-managed network neighborhood to merge + if nn.userManagedNetworkNeighborhood.Has(nnName) { + userManagedNN := nn.userManagedNetworkNeighborhood.Get(nnName) + fullNN = nn.performMerge(fullNN, userManagedNN) + // Clean up the user-managed network neighborhood after successful merge + nn.userManagedNetworkNeighborhood.Delete(nnName) + logger.L().Debug("merged pending user-managed network neighborhood", helpers.String("name", nnName)) } - // add to the cache - nn.allNetworkNeighborhoods.Add(nnName) + nn.slugToNetworkNeighborhood.Set(nnName, fullNN) + for _, i := range nn.slugToContainers.Get(nnName).ToSlice() { + nn.containerToSlug.Set(i, nnName) + } + logger.L().Debug("added pod to network neighborhood cache", helpers.String("name", nnName)) +} - if nn.slugToContainers.Has(nnName) { - // get the full network neighborhood from the storage - // the watch only returns the metadata - fullNN, err := nn.getNetworkNeighborhood(appProfile.GetNamespace(), appProfile.GetName()) - if err != nil { - logger.L().Error("failed to get full network neighborhood", helpers.Error(err)) - return +func (nn *NetworkNeighborhoodCacheImpl) performMerge(normalNN, userManagedNN *v1beta1.NetworkNeighborhood) *v1beta1.NetworkNeighborhood { + mergedNN := normalNN.DeepCopy() + + // Merge spec containers + mergedNN.Spec.Containers = nn.mergeContainers(mergedNN.Spec.Containers, userManagedNN.Spec.Containers) + mergedNN.Spec.InitContainers = nn.mergeContainers(mergedNN.Spec.InitContainers, userManagedNN.Spec.InitContainers) + mergedNN.Spec.EphemeralContainers = nn.mergeContainers(mergedNN.Spec.EphemeralContainers, userManagedNN.Spec.EphemeralContainers) + + // Merge LabelSelector + if userManagedNN.Spec.LabelSelector.MatchLabels != nil { + if mergedNN.Spec.LabelSelector.MatchLabels == nil { + mergedNN.Spec.LabelSelector.MatchLabels = make(map[string]string) } - nn.slugToNetworkNeighborhood.Set(nnName, fullNN) - for _, i := range nn.slugToContainers.Get(nnName).ToSlice() { - nn.containerToSlug.Set(i, nnName) + for k, v := range userManagedNN.Spec.LabelSelector.MatchLabels { + mergedNN.Spec.LabelSelector.MatchLabels[k] = v } + } + mergedNN.Spec.LabelSelector.MatchExpressions = append( + mergedNN.Spec.LabelSelector.MatchExpressions, + userManagedNN.Spec.LabelSelector.MatchExpressions..., + ) - logger.L().Debug("added pod to network neighborhood cache", helpers.String("name", nnName)) + return mergedNN +} + +func (nn *NetworkNeighborhoodCacheImpl) mergeContainers(normalContainers, userManagedContainers []v1beta1.NetworkNeighborhoodContainer) []v1beta1.NetworkNeighborhoodContainer { + if len(userManagedContainers) != len(normalContainers) { + // If the number of containers don't match, we can't merge + logger.L().Error("failed to merge user-managed profile with base profile", + helpers.Int("normalContainers len", len(normalContainers)), + helpers.Int("userManagedContainers len", len(userManagedContainers)), + helpers.String("reason", "number of containers don't match")) + return normalContainers } + + // Assuming the normalContainers are already in the correct Pod order + // We'll merge user containers at their corresponding positions + for i := range normalContainers { + for _, userContainer := range userManagedContainers { + if normalContainers[i].Name == userContainer.Name { + nn.mergeContainer(&normalContainers[i], &userContainer) + break + } + } + } + return normalContainers } -func (nn *NetworkNeighborhoodCacheImpl) deleteNetworkNeighborhood(obj *unstructured.Unstructured) { - nnName := objectcache.UnstructuredUniqueName(obj) - nn.slugToNetworkNeighborhood.Delete(nnName) - nn.slugToState.Delete(nnName) - nn.allNetworkNeighborhoods.Remove(nnName) +func (nn *NetworkNeighborhoodCacheImpl) mergeContainer(normalContainer, userContainer *v1beta1.NetworkNeighborhoodContainer) { + // Merge ingress rules + normalContainer.Ingress = nn.mergeNetworkNeighbors(normalContainer.Ingress, userContainer.Ingress) - logger.L().Info("deleted network neighborhood from cache", helpers.String("uniqueSlug", nnName)) + // Merge egress rules + normalContainer.Egress = nn.mergeNetworkNeighbors(normalContainer.Egress, userContainer.Egress) } -func (nn *NetworkNeighborhoodCacheImpl) getNetworkNeighborhood(namespace, name string) (*v1beta1.NetworkNeighborhood, error) { +func (nn *NetworkNeighborhoodCacheImpl) mergeNetworkNeighbors(normalNeighbors, userNeighbors []v1beta1.NetworkNeighbor) []v1beta1.NetworkNeighbor { + // Use map to track existing neighbors by identifier + neighborMap := make(map[string]int) + for i, neighbor := range normalNeighbors { + neighborMap[neighbor.Identifier] = i + } - u, err := nn.k8sClient.GetDynamicClient().Resource(groupVersionResource).Namespace(namespace).Get(context.Background(), name, metav1.GetOptions{}) - if err != nil { - return nil, err + // Merge or append user neighbors + for _, userNeighbor := range userNeighbors { + if idx, exists := neighborMap[userNeighbor.Identifier]; exists { + // Merge existing neighbor + normalNeighbors[idx] = nn.mergeNetworkNeighbor(normalNeighbors[idx], userNeighbor) + } else { + // Append new neighbor + normalNeighbors = append(normalNeighbors, userNeighbor) + } } - return unstructuredToNetworkNeighborhood(u) + + return normalNeighbors } -func unstructuredToNetworkNeighborhood(obj *unstructured.Unstructured) (*v1beta1.NetworkNeighborhood, error) { +func (nn *NetworkNeighborhoodCacheImpl) mergeNetworkNeighbor(normal, user v1beta1.NetworkNeighbor) v1beta1.NetworkNeighbor { + merged := normal.DeepCopy() - ap := &v1beta1.NetworkNeighborhood{} - err := k8sruntime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, ap) - if err != nil { - return nil, err + // Merge DNS names (removing duplicates) + dnsNamesSet := make(map[string]struct{}) + for _, dns := range normal.DNSNames { + dnsNamesSet[dns] = struct{}{} + } + for _, dns := range user.DNSNames { + dnsNamesSet[dns] = struct{}{} + } + merged.DNSNames = make([]string, 0, len(dnsNamesSet)) + for dns := range dnsNamesSet { + merged.DNSNames = append(merged.DNSNames, dns) + } + + // Merge ports based on patchMergeKey (name) + merged.Ports = nn.mergeNetworkPorts(merged.Ports, user.Ports) + + // Merge pod selector if provided + if user.PodSelector != nil { + if merged.PodSelector == nil { + merged.PodSelector = &metav1.LabelSelector{} + } + if user.PodSelector.MatchLabels != nil { + if merged.PodSelector.MatchLabels == nil { + merged.PodSelector.MatchLabels = make(map[string]string) + } + for k, v := range user.PodSelector.MatchLabels { + merged.PodSelector.MatchLabels[k] = v + } + } + merged.PodSelector.MatchExpressions = append( + merged.PodSelector.MatchExpressions, + user.PodSelector.MatchExpressions..., + ) + } + + // Merge namespace selector if provided + if user.NamespaceSelector != nil { + if merged.NamespaceSelector == nil { + merged.NamespaceSelector = &metav1.LabelSelector{} + } + if user.NamespaceSelector.MatchLabels != nil { + if merged.NamespaceSelector.MatchLabels == nil { + merged.NamespaceSelector.MatchLabels = make(map[string]string) + } + for k, v := range user.NamespaceSelector.MatchLabels { + merged.NamespaceSelector.MatchLabels[k] = v + } + } + merged.NamespaceSelector.MatchExpressions = append( + merged.NamespaceSelector.MatchExpressions, + user.NamespaceSelector.MatchExpressions..., + ) + } + + // Take the user's IP address if provided + if user.IPAddress != "" { + merged.IPAddress = user.IPAddress + } + + // Take the user's type if provided + if user.Type != "" { + merged.Type = user.Type + } + + return *merged +} + +func (nn *NetworkNeighborhoodCacheImpl) mergeNetworkPorts(normalPorts, userPorts []v1beta1.NetworkPort) []v1beta1.NetworkPort { + // Use map to track existing ports by name (patchMergeKey) + portMap := make(map[string]int) + for i, port := range normalPorts { + portMap[port.Name] = i + } + + // Merge or append user ports + for _, userPort := range userPorts { + if idx, exists := portMap[userPort.Name]; exists { + // Update existing port + normalPorts[idx] = userPort + } else { + // Append new port + normalPorts = append(normalPorts, userPort) + } } - return ap, nil + return normalPorts } -func getSlug(p *unstructured.Unstructured) (string, error) { - pod := workloadinterface.NewWorkloadObj(p.Object) +func (nn *NetworkNeighborhoodCacheImpl) deleteNetworkNeighborhood(obj runtime.Object) { + netNeighborhood := obj.(*v1beta1.NetworkNeighborhood) + nnName := objectcache.MetaUniqueName(netNeighborhood) + + if isUserManagedNN(netNeighborhood) { + // For user-managed network neighborhoods, we need to use the base name for cleanup + baseNNName := strings.TrimPrefix(netNeighborhood.GetName(), "ug-") + baseNNUniqueName := objectcache.UniqueName(netNeighborhood.GetNamespace(), baseNNName) + nn.userManagedNetworkNeighborhood.Delete(baseNNUniqueName) + + logger.L().Debug("deleted user-managed network neighborhood from cache", + helpers.String("nnName", netNeighborhood.GetName()), + helpers.String("baseNN", baseNNName)) + } else { + // For normal network neighborhoods, clean up all related data + nn.slugToNetworkNeighborhood.Delete(nnName) + nn.slugToState.Delete(nnName) + nn.allNetworkNeighborhoods.Remove(nnName) + + logger.L().Debug("deleted network neighborhood from cache", + helpers.String("uniqueSlug", nnName)) + } + + // Clean up any orphaned user-managed network neighborhoods + nn.cleanupOrphanedUserManagedNNs() +} + +func (nn *NetworkNeighborhoodCacheImpl) getNetworkNeighborhood(namespace, name string) (*v1beta1.NetworkNeighborhood, error) { + return nn.storageClient.NetworkNeighborhoods(namespace).Get(context.Background(), name, metav1.GetOptions{}) +} + +func getSlug(p *corev1.Pod) (string, error) { + // need to set APIVersion and Kind before unstructured conversion, preparing for instanceID extraction + p.APIVersion = "v1" + p.Kind = "Pod" + + unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&p) + if err != nil { + return "", fmt.Errorf("failed to convert runtime object to unstructured: %w", err) + } + pod := workloadinterface.NewWorkloadObj(unstructuredObj) if pod == nil { return "", fmt.Errorf("failed to get workload object") } @@ -314,5 +536,27 @@ func getSlug(p *unstructured.Unstructured) (string, error) { return "", fmt.Errorf("failed to get slug") } return slug, nil +} + +// Add cleanup method for orphaned user-managed network neighborhoods +func (nn *NetworkNeighborhoodCacheImpl) cleanupOrphanedUserManagedNNs() { + nn.userManagedNetworkNeighborhood.Range(func(key string, value *v1beta1.NetworkNeighborhood) bool { + if nn.slugToNetworkNeighborhood.Has(key) { + // If base network neighborhood exists but merge didn't happen for some reason, + // attempt merge again and cleanup + if baseNN := nn.slugToNetworkNeighborhood.Get(key); baseNN != nil { + mergedNN := nn.performMerge(baseNN, value) + nn.slugToNetworkNeighborhood.Set(key, mergedNN) + nn.userManagedNetworkNeighborhood.Delete(key) + logger.L().Debug("cleaned up orphaned user-managed network neighborhood", helpers.String("name", key)) + } + } + return true + }) +} +func isUserManagedNN(nn *v1beta1.NetworkNeighborhood) bool { + return nn.Annotations != nil && + nn.Annotations["kubescape.io/managed-by"] == "User" && + strings.HasPrefix(nn.GetName(), "ug-") } diff --git a/pkg/objectcache/networkneighborhoodcache/networkneighborhoodcache_test.go b/pkg/objectcache/networkneighborhoodcache/networkneighborhoodcache_test.go index c78bda2a..49f415be 100644 --- a/pkg/objectcache/networkneighborhoodcache/networkneighborhoodcache_test.go +++ b/pkg/objectcache/networkneighborhoodcache/networkneighborhoodcache_test.go @@ -5,24 +5,20 @@ import ( "fmt" "slices" "testing" + "time" + mapset "github.com/deckarep/golang-set/v2" "github.com/kubescape/node-agent/mocks" "github.com/kubescape/node-agent/pkg/objectcache" "github.com/kubescape/node-agent/pkg/watcher" - - mapset "github.com/deckarep/golang-set/v2" "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + "github.com/kubescape/storage/pkg/generated/clientset/versioned/fake" + "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - dynamicfake "k8s.io/client-go/dynamic/fake" - "k8s.io/client-go/kubernetes/scheme" - - "github.com/kubescape/k8s-interface/k8sinterface" - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) func init() { @@ -33,49 +29,49 @@ func init() { func Test_AddHandlers(t *testing.T) { tests := []struct { - f func(ap *NetworkNeighborhoodCacheImpl, ctx context.Context, obj *unstructured.Unstructured) - obj *unstructured.Unstructured + f func(ap *NetworkNeighborhoodCacheImpl, ctx context.Context, obj runtime.Object) + obj runtime.Object name string slug string length int }{ { name: "add network neighborhood", - obj: mocks.GetUnstructured(mocks.TestKindNN, mocks.TestNginx), + obj: mocks.GetRuntime(mocks.TestKindNN, mocks.TestNginx), f: (*NetworkNeighborhoodCacheImpl).AddHandler, slug: "default/replicaset-nginx-77b4fdf86c", length: 1, }, { name: "add pod", - obj: mocks.GetUnstructured(mocks.TestKindPod, mocks.TestCollection), + obj: mocks.GetRuntime(mocks.TestKindPod, mocks.TestCollection), f: (*NetworkNeighborhoodCacheImpl).AddHandler, slug: "default/replicaset-collection-94c495554", length: 6, }, { name: "modify network neighborhood", - obj: mocks.GetUnstructured(mocks.TestKindNN, mocks.TestNginx), + obj: mocks.GetRuntime(mocks.TestKindNN, mocks.TestNginx), f: (*NetworkNeighborhoodCacheImpl).ModifyHandler, length: 1, }, { name: "modify pod", - obj: mocks.GetUnstructured(mocks.TestKindPod, mocks.TestCollection), + obj: mocks.GetRuntime(mocks.TestKindPod, mocks.TestCollection), f: (*NetworkNeighborhoodCacheImpl).ModifyHandler, slug: "default/replicaset-collection-94c495554", length: 6, }, { name: "delete network neighborhood", - obj: mocks.GetUnstructured(mocks.TestKindNN, mocks.TestNginx), + obj: mocks.GetRuntime(mocks.TestKindNN, mocks.TestNginx), f: (*NetworkNeighborhoodCacheImpl).DeleteHandler, slug: "default/replicaset-nginx-77b4fdf86c", length: 0, }, { name: "delete pod", - obj: mocks.GetUnstructured(mocks.TestKindPod, mocks.TestCollection), + obj: mocks.GetRuntime(mocks.TestKindPod, mocks.TestCollection), f: (*NetworkNeighborhoodCacheImpl).DeleteHandler, slug: "default/replicaset-collection-94c495554", length: 0, @@ -83,14 +79,14 @@ func Test_AddHandlers(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tt.obj.SetNamespace("default") - k8sClient := k8sinterface.NewKubernetesApiMock() - nn := NewNetworkNeighborhoodCache("", k8sClient) + tt.obj.(metav1.Object).SetNamespace("default") + storageClient := fake.NewSimpleClientset().SpdxV1beta1() + nn := NewNetworkNeighborhoodCache("", storageClient, 0) nn.slugToContainers.Set(tt.slug, mapset.NewSet[string]()) tt.f(nn, context.Background(), tt.obj) - switch mocks.TestKinds(tt.obj.GetKind()) { + switch mocks.TestKinds(tt.obj.GetObjectKind().GroupVersionKind().Kind) { case mocks.TestKindNN: assert.Equal(t, tt.length, nn.allNetworkNeighborhoods.Cardinality()) case mocks.TestKindPod: @@ -104,17 +100,17 @@ func Test_addNetworkNeighborhood(t *testing.T) { // add single network neighborhood tests := []struct { - obj *unstructured.Unstructured + obj runtime.Object name string annotations map[string]string - preCreatedPods []*unstructured.Unstructured // pre created pods - preCreatedAP []*unstructured.Unstructured // pre created network neighborhoods + preCreatedPods []runtime.Object // pre created pods + preCreatedAP []runtime.Object // pre created application profiles shouldAdd bool shouldAddToPod bool }{ { name: "add single network neighborhood nginx", - obj: mocks.GetUnstructured(mocks.TestKindNN, mocks.TestNginx), + obj: mocks.GetRuntime(mocks.TestKindNN, mocks.TestNginx), annotations: map[string]string{ "kubescape.io/status": "completed", "kubescape.io/completion": "complete", @@ -123,7 +119,7 @@ func Test_addNetworkNeighborhood(t *testing.T) { }, { name: "add network neighborhood with partial annotation", - obj: mocks.GetUnstructured(mocks.TestKindNN, mocks.TestCollection), + obj: mocks.GetRuntime(mocks.TestKindNN, mocks.TestCollection), annotations: map[string]string{ "kubescape.io/status": "completed", "kubescape.io/completion": "partial", @@ -132,7 +128,7 @@ func Test_addNetworkNeighborhood(t *testing.T) { }, { name: "ignore single network neighborhood with incomplete annotation", - obj: mocks.GetUnstructured(mocks.TestKindNN, mocks.TestCollection), + obj: mocks.GetRuntime(mocks.TestKindNN, mocks.TestCollection), annotations: map[string]string{ "kubescape.io/status": "ready", "kubescape.io/completion": "complete", @@ -141,8 +137,8 @@ func Test_addNetworkNeighborhood(t *testing.T) { }, { name: "add network neighborhood to pod", - obj: mocks.GetUnstructured(mocks.TestKindNN, mocks.TestCollection), - preCreatedPods: []*unstructured.Unstructured{mocks.GetUnstructured(mocks.TestKindPod, mocks.TestCollection)}, + obj: mocks.GetRuntime(mocks.TestKindNN, mocks.TestCollection), + preCreatedPods: []runtime.Object{mocks.GetRuntime(mocks.TestKindPod, mocks.TestCollection)}, annotations: map[string]string{ "kubescape.io/status": "completed", "kubescape.io/completion": "complete", @@ -152,8 +148,8 @@ func Test_addNetworkNeighborhood(t *testing.T) { }, { name: "add network neighborhood without pod", - obj: mocks.GetUnstructured(mocks.TestKindNN, mocks.TestCollection), - preCreatedPods: []*unstructured.Unstructured{mocks.GetUnstructured(mocks.TestKindPod, mocks.TestNginx)}, + obj: mocks.GetRuntime(mocks.TestKindNN, mocks.TestCollection), + preCreatedPods: []runtime.Object{mocks.GetRuntime(mocks.TestKindPod, mocks.TestNginx)}, annotations: map[string]string{ "kubescape.io/status": "completed", "kubescape.io/completion": "complete", @@ -165,29 +161,26 @@ func Test_addNetworkNeighborhood(t *testing.T) { for i, tt := range tests { t.Run(tt.name, func(t *testing.T) { if len(tt.annotations) != 0 { - tt.obj.SetAnnotations(tt.annotations) + tt.obj.(metav1.Object).SetAnnotations(tt.annotations) } namespace := fmt.Sprintf("default-%d", i) - k8sClient := k8sinterface.NewKubernetesApiMock() var runtimeObjs []runtime.Object - tt.obj.SetNamespace(namespace) - runtimeObjs = append(runtimeObjs, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}) for i := range tt.preCreatedPods { - tt.preCreatedPods[i].SetNamespace(namespace) - runtimeObjs = append(runtimeObjs, mocks.UnstructuredToRuntime(tt.preCreatedPods[i])) + tt.preCreatedPods[i].(metav1.Object).SetNamespace(namespace) } for i := range tt.preCreatedAP { - tt.preCreatedAP[i].SetNamespace(namespace) - runtimeObjs = append(runtimeObjs, mocks.UnstructuredToRuntime(tt.preCreatedAP[i])) + tt.preCreatedAP[i].(metav1.Object).SetNamespace(namespace) + runtimeObjs = append(runtimeObjs, tt.preCreatedAP[i]) } - runtimeObjs = append(runtimeObjs, mocks.UnstructuredToRuntime(tt.obj)) + tt.obj.(metav1.Object).SetNamespace(namespace) + runtimeObjs = append(runtimeObjs, tt.obj) - k8sClient.DynamicClient = dynamicfake.NewSimpleDynamicClient(scheme.Scheme, runtimeObjs...) + storageClient := fake.NewSimpleClientset(runtimeObjs...).SpdxV1beta1() - nn := NewNetworkNeighborhoodCache("", k8sClient) + nn := NewNetworkNeighborhoodCache("", storageClient, 0) for i := range tt.preCreatedPods { nn.addPod(tt.preCreatedPods[i]) @@ -197,9 +190,10 @@ func Test_addNetworkNeighborhood(t *testing.T) { } nn.addNetworkNeighborhood(context.Background(), tt.obj) + time.Sleep(1 * time.Second) // add is async // test if the network neighborhood is added to the cache - apName := objectcache.UnstructuredUniqueName(tt.obj) + apName := objectcache.MetaUniqueName(tt.obj.(metav1.Object)) if tt.shouldAdd { assert.Equal(t, 1, nn.allNetworkNeighborhoods.Cardinality()) } else { @@ -210,7 +204,7 @@ func Test_addNetworkNeighborhood(t *testing.T) { assert.True(t, nn.slugToContainers.Has(apName)) assert.True(t, nn.slugToNetworkNeighborhood.Has(apName)) for i := range tt.preCreatedPods { - p, _ := objectcache.UnstructuredToPod(tt.preCreatedPods[i]) + p := tt.preCreatedPods[i].(*corev1.Pod) for _, c := range objectcache.ListContainersIDs(p) { assert.NotNil(t, nn.GetNetworkNeighborhood(c)) } @@ -219,7 +213,7 @@ func Test_addNetworkNeighborhood(t *testing.T) { assert.False(t, nn.slugToContainers.Has(apName)) assert.False(t, nn.slugToNetworkNeighborhood.Has(apName)) for i := range tt.preCreatedPods { - p, _ := objectcache.UnstructuredToPod(tt.preCreatedPods[i]) + p := tt.preCreatedPods[i].(*corev1.Pod) for _, c := range objectcache.ListContainersIDs(p) { assert.Nil(t, nn.GetNetworkNeighborhood(c)) } @@ -231,7 +225,7 @@ func Test_addNetworkNeighborhood(t *testing.T) { func Test_deleteNetworkNeighborhood(t *testing.T) { tests := []struct { - obj *unstructured.Unstructured + obj runtime.Object name string slug string slugs []string @@ -239,21 +233,21 @@ func Test_deleteNetworkNeighborhood(t *testing.T) { }{ { name: "delete network neighborhood nginx", - obj: mocks.GetUnstructured(mocks.TestKindNN, mocks.TestNginx), + obj: mocks.GetRuntime(mocks.TestKindNN, mocks.TestNginx), slug: "/replicaset-nginx-77b4fdf86c", slugs: []string{"/replicaset-nginx-77b4fdf86c"}, shouldDelete: true, }, { name: "delete network neighborhood from many", - obj: mocks.GetUnstructured(mocks.TestKindNN, mocks.TestNginx), + obj: mocks.GetRuntime(mocks.TestKindNN, mocks.TestNginx), slug: "/replicaset-nginx-77b4fdf86c", slugs: []string{"/replicaset-nginx-11111", "/replicaset-nginx-77b4fdf86c", "/replicaset-nginx-22222"}, shouldDelete: true, }, { name: "ignore delete network neighborhood nginx", - obj: mocks.GetUnstructured(mocks.TestKindNN, mocks.TestCollection), + obj: mocks.GetRuntime(mocks.TestKindNN, mocks.TestCollection), slug: "/replicaset-nginx-77b4fdf86c", slugs: []string{"/replicaset-nginx-77b4fdf86c"}, shouldDelete: false, @@ -261,7 +255,7 @@ func Test_deleteNetworkNeighborhood(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - nn := NewNetworkNeighborhoodCache("", nil) + nn := NewNetworkNeighborhoodCache("", nil, 0) nn.allNetworkNeighborhoods.Append(tt.slugs...) for _, i := range tt.slugs { @@ -287,7 +281,7 @@ func Test_deleteNetworkNeighborhood(t *testing.T) { func Test_deletePod(t *testing.T) { tests := []struct { - obj *unstructured.Unstructured + obj runtime.Object name string containers []string slug string @@ -296,19 +290,19 @@ func Test_deletePod(t *testing.T) { }{ { name: "delete pod", - obj: mocks.GetUnstructured(mocks.TestKindPod, mocks.TestNginx), + obj: mocks.GetRuntime(mocks.TestKindPod, mocks.TestNginx), containers: []string{"b0416f7a782e62badf28e03fc9b82305cd02e9749dc24435d8592fab66349c78"}, shouldDelete: true, }, { name: "pod not deleted", - obj: mocks.GetUnstructured(mocks.TestKindPod, mocks.TestNginx), + obj: mocks.GetRuntime(mocks.TestKindPod, mocks.TestNginx), containers: []string{"blabla"}, shouldDelete: false, }, { name: "delete pod with slug", - obj: mocks.GetUnstructured(mocks.TestKindPod, mocks.TestNginx), + obj: mocks.GetRuntime(mocks.TestKindPod, mocks.TestNginx), containers: []string{"b0416f7a782e62badf28e03fc9b82305cd02e9749dc24435d8592fab66349c78"}, slug: "/replicaset-nginx-77b4fdf86c", otherSlugs: []string{"1111111", "222222"}, @@ -316,7 +310,7 @@ func Test_deletePod(t *testing.T) { }, { name: "delete pod with slug", - obj: mocks.GetUnstructured(mocks.TestKindPod, mocks.TestNginx), + obj: mocks.GetRuntime(mocks.TestKindPod, mocks.TestNginx), containers: []string{"b0416f7a782e62badf28e03fc9b82305cd02e9749dc24435d8592fab66349c78"}, slug: "/replicaset-nginx-77b4fdf86c", shouldDelete: true, @@ -324,7 +318,7 @@ func Test_deletePod(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - nn := NewNetworkNeighborhoodCache("", nil) + nn := NewNetworkNeighborhoodCache("", nil, 0) for _, i := range tt.otherSlugs { nn.slugToContainers.Set(i, mapset.NewSet[string]()) nn.slugToNetworkNeighborhood.Set(i, &v1beta1.NetworkNeighborhood{}) @@ -432,7 +426,7 @@ func Test_GetNetworkNeighborhood(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - nn := NewNetworkNeighborhoodCache("", k8sinterface.NewKubernetesApiMock()) + nn := NewNetworkNeighborhoodCache("", fake.NewSimpleClientset().SpdxV1beta1(), 0) for _, c := range tt.pods { nn.containerToSlug.Set(c.containerID, c.slug) @@ -457,8 +451,8 @@ func Test_addNetworkNeighborhood_existing(t *testing.T) { } // add single network neighborhood tests := []struct { - obj1 *unstructured.Unstructured - obj2 *unstructured.Unstructured + obj1 runtime.Object + obj2 runtime.Object annotations1 map[string]string annotations2 map[string]string name string @@ -467,8 +461,8 @@ func Test_addNetworkNeighborhood_existing(t *testing.T) { }{ { name: "network neighborhood already exists", - obj1: mocks.GetUnstructured(mocks.TestKindNN, mocks.TestNginx), - obj2: mocks.GetUnstructured(mocks.TestKindNN, mocks.TestNginx), + obj1: mocks.GetRuntime(mocks.TestKindNN, mocks.TestNginx), + obj2: mocks.GetRuntime(mocks.TestKindNN, mocks.TestNginx), pods: []podToSlug{ { podName: "nginx-77b4fdf86c", @@ -479,8 +473,8 @@ func Test_addNetworkNeighborhood_existing(t *testing.T) { }, { name: "remove network neighborhood", - obj1: mocks.GetUnstructured(mocks.TestKindNN, mocks.TestNginx), - obj2: mocks.GetUnstructured(mocks.TestKindNN, mocks.TestNginx), + obj1: mocks.GetRuntime(mocks.TestKindNN, mocks.TestNginx), + obj2: mocks.GetRuntime(mocks.TestKindNN, mocks.TestNginx), pods: []podToSlug{ { podName: "nginx-77b4fdf86c", @@ -499,20 +493,19 @@ func Test_addNetworkNeighborhood_existing(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if len(tt.annotations1) != 0 { - tt.obj1.SetAnnotations(tt.annotations1) + tt.obj1.(metav1.Object).SetAnnotations(tt.annotations1) } if len(tt.annotations2) != 0 { - tt.obj2.SetAnnotations(tt.annotations2) + tt.obj2.(metav1.Object).SetAnnotations(tt.annotations2) } - k8sClient := k8sinterface.NewKubernetesApiMock() var runtimeObjs []runtime.Object - runtimeObjs = append(runtimeObjs, mocks.UnstructuredToRuntime(tt.obj1)) + runtimeObjs = append(runtimeObjs, tt.obj1) - k8sClient.DynamicClient = dynamicfake.NewSimpleDynamicClient(scheme.Scheme, runtimeObjs...) + storageClient := fake.NewSimpleClientset(runtimeObjs...).SpdxV1beta1() - nn := NewNetworkNeighborhoodCache("", k8sClient) + nn := NewNetworkNeighborhoodCache("", storageClient, 0) // add pods for i := range tt.pods { @@ -521,6 +514,7 @@ func Test_addNetworkNeighborhood_existing(t *testing.T) { } nn.addNetworkNeighborhood(context.Background(), tt.obj1) + time.Sleep(1 * time.Second) // add is async nn.addNetworkNeighborhood(context.Background(), tt.obj2) // test if the network neighborhood is added to the cache @@ -533,45 +527,19 @@ func Test_addNetworkNeighborhood_existing(t *testing.T) { } } -func Test_unstructuredToNetworkNeighborhood(t *testing.T) { - - tests := []struct { - obj *unstructured.Unstructured - name string - }{ - { - name: "nginx network neighborhood", - obj: mocks.GetUnstructured(mocks.TestKindNN, mocks.TestNginx), - }, - { - name: "collection network neighborhood", - obj: mocks.GetUnstructured(mocks.TestKindNN, mocks.TestCollection), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - p, err := unstructuredToNetworkNeighborhood(tt.obj) - assert.NoError(t, err) - assert.Equal(t, tt.obj.GetName(), p.GetName()) - assert.Equal(t, tt.obj.GetLabels(), p.GetLabels()) - assert.Equal(t, tt.obj.GetAnnotations(), p.GetAnnotations()) - }) - } -} - func Test_getNetworkNeighborhood(t *testing.T) { type args struct { name string } tests := []struct { name string - obj *unstructured.Unstructured + obj runtime.Object args args wantErr bool }{ { name: "nginx network neighborhood", - obj: mocks.GetUnstructured(mocks.TestKindNN, mocks.TestNginx), + obj: mocks.GetRuntime(mocks.TestKindNN, mocks.TestNginx), args: args{ name: "replicaset-nginx-77b4fdf86c", }, @@ -579,7 +547,7 @@ func Test_getNetworkNeighborhood(t *testing.T) { }, { name: "collection network neighborhood", - obj: mocks.GetUnstructured(mocks.TestKindNN, mocks.TestCollection), + obj: mocks.GetRuntime(mocks.TestKindNN, mocks.TestCollection), args: args{ name: "replicaset-collection-94c495554", }, @@ -587,7 +555,7 @@ func Test_getNetworkNeighborhood(t *testing.T) { }, { name: "collection network neighborhood", - obj: mocks.GetUnstructured(mocks.TestKindNN, mocks.TestCollection), + obj: mocks.GetRuntime(mocks.TestKindNN, mocks.TestCollection), args: args{ name: "replicaset-nginx-77b4fdf86c", }, @@ -596,11 +564,10 @@ func Test_getNetworkNeighborhood(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - k8sClient := k8sinterface.NewKubernetesApiMock() - k8sClient.DynamicClient = dynamicfake.NewSimpleDynamicClient(scheme.Scheme, mocks.UnstructuredToRuntime(tt.obj)) + storageClient := fake.NewSimpleClientset(tt.obj).SpdxV1beta1() nn := &NetworkNeighborhoodCacheImpl{ - k8sClient: k8sClient, + storageClient: storageClient, } a, err := nn.getNetworkNeighborhood("", tt.args.name) @@ -609,14 +576,14 @@ func Test_getNetworkNeighborhood(t *testing.T) { return } assert.NoError(t, err) - assert.Equal(t, tt.obj.GetName(), a.GetName()) - assert.Equal(t, tt.obj.GetLabels(), a.GetLabels()) + assert.Equal(t, tt.obj.(metav1.Object).GetName(), a.GetName()) + assert.Equal(t, tt.obj.(metav1.Object).GetLabels(), a.GetLabels()) }) } } func Test_WatchResources(t *testing.T) { - nn := NewNetworkNeighborhoodCache("test-node", nil) + nn := NewNetworkNeighborhoodCache("test-node", nil, 0) expectedPodWatchResource := watcher.NewWatchResource(schema.GroupVersionResource{ Group: "", @@ -638,38 +605,21 @@ func Test_WatchResources(t *testing.T) { func TestGetSlug(t *testing.T) { tests := []struct { name string - obj *unstructured.Unstructured + obj runtime.Object expected string expectErr bool }{ { name: "Test with valid object", - obj: mocks.GetUnstructured(mocks.TestKindPod, mocks.TestCollection), + obj: mocks.GetRuntime(mocks.TestKindPod, mocks.TestCollection), expected: "replicaset-collection-94c495554", expectErr: false, }, - { - name: "Test with invalid object", - obj: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "Unknown", - "metadata": map[string]interface{}{ - "name": "unknown-1", - }, - }, - }, - expected: "", - expectErr: true, - }, { name: "Test with object without instanceIDs", - obj: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Pod", - "metadata": map[string]interface{}{ - "name": "unknown-1", - }, + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unknown-1", }, }, expected: "", @@ -679,8 +629,8 @@ func TestGetSlug(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tt.obj.SetNamespace("default") - result, err := getSlug(tt.obj) + tt.obj.(metav1.Object).SetNamespace("default") + result, err := getSlug(tt.obj.(*corev1.Pod)) if tt.expectErr { assert.Error(t, err) } else { @@ -695,18 +645,18 @@ func Test_addPod(t *testing.T) { // add single network neighborhood tests := []struct { - obj *unstructured.Unstructured + obj runtime.Object name string addedContainers []string ignoredContainers []string preCreatedNNAnnotations map[string]string - preCreatedNN *unstructured.Unstructured // pre created network neighborhoods + preCreatedNN runtime.Object // pre created network neighborhoods shouldAddToNN bool }{ { name: "add pod with partial network neighborhood", - obj: mocks.GetUnstructured(mocks.TestKindPod, mocks.TestCollection), - preCreatedNN: mocks.GetUnstructured(mocks.TestKindNN, mocks.TestCollection), + obj: mocks.GetRuntime(mocks.TestKindPod, mocks.TestCollection), + preCreatedNN: mocks.GetRuntime(mocks.TestKindNN, mocks.TestCollection), preCreatedNNAnnotations: map[string]string{ "kubescape.io/status": "completed", "kubescape.io/completion": "partial", @@ -723,8 +673,8 @@ func Test_addPod(t *testing.T) { }, { name: "add pod with network neighborhood", - obj: mocks.GetUnstructured(mocks.TestKindPod, mocks.TestCollection), - preCreatedNN: mocks.GetUnstructured(mocks.TestKindNN, mocks.TestCollection), + obj: mocks.GetRuntime(mocks.TestKindPod, mocks.TestCollection), + preCreatedNN: mocks.GetRuntime(mocks.TestKindNN, mocks.TestCollection), preCreatedNNAnnotations: map[string]string{ "kubescape.io/status": "completed", "kubescape.io/completion": "complete", @@ -743,30 +693,28 @@ func Test_addPod(t *testing.T) { for i, tt := range tests { t.Run(tt.name, func(t *testing.T) { if len(tt.preCreatedNNAnnotations) != 0 { - tt.preCreatedNN.SetAnnotations(tt.preCreatedNNAnnotations) + tt.preCreatedNN.(metav1.Object).SetAnnotations(tt.preCreatedNNAnnotations) } namespace := fmt.Sprintf("default-%d", i) - k8sClient := k8sinterface.NewKubernetesApiMock() var runtimeObjs []runtime.Object - tt.obj.SetNamespace(namespace) - runtimeObjs = append(runtimeObjs, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}) - tt.preCreatedNN.SetNamespace(namespace) - runtimeObjs = append(runtimeObjs, mocks.UnstructuredToRuntime(tt.preCreatedNN)) - runtimeObjs = append(runtimeObjs, mocks.UnstructuredToRuntime(tt.obj)) + tt.preCreatedNN.(metav1.Object).SetNamespace(namespace) + runtimeObjs = append(runtimeObjs, tt.preCreatedNN) - k8sClient.DynamicClient = dynamicfake.NewSimpleDynamicClient(scheme.Scheme, runtimeObjs...) + storageClient := fake.NewSimpleClientset(runtimeObjs...).SpdxV1beta1() - nn := NewNetworkNeighborhoodCache("", k8sClient) + nn := NewNetworkNeighborhoodCache("", storageClient, 0) nn.addNetworkNeighborhood(context.Background(), tt.preCreatedNN) + time.Sleep(1 * time.Second) // add is async + tt.obj.(metav1.Object).SetNamespace(namespace) nn.addPod(tt.obj) // test if the network neighborhood is added to the cache assert.Equal(t, 1, nn.allNetworkNeighborhoods.Cardinality()) - assert.True(t, nn.slugToContainers.Has(objectcache.UnstructuredUniqueName(tt.preCreatedNN))) + assert.True(t, nn.slugToContainers.Has(objectcache.MetaUniqueName(tt.preCreatedNN.(metav1.Object)))) c := nn.containerToSlug.Keys() slices.Sort(c) @@ -786,3 +734,243 @@ func Test_addPod(t *testing.T) { }) } } + +func Test_performMerge(t *testing.T) { + tests := []struct { + name string + baseNN *v1beta1.NetworkNeighborhood + userNN *v1beta1.NetworkNeighborhood + expectedResult *v1beta1.NetworkNeighborhood + validateResults func(*testing.T, *v1beta1.NetworkNeighborhood) + }{ + { + name: "merge basic network neighbors", + baseNN: &v1beta1.NetworkNeighborhood{ + Spec: v1beta1.NetworkNeighborhoodSpec{ + Containers: []v1beta1.NetworkNeighborhoodContainer{ + { + Name: "container1", + Ingress: []v1beta1.NetworkNeighbor{ + { + Identifier: "ingress1", + Type: "http", + DNSNames: []string{"example.com"}, + Ports: []v1beta1.NetworkPort{ + {Name: "http", Protocol: "TCP", Port: ptr(int32(80))}, + }, + }, + }, + }, + }, + }, + }, + userNN: &v1beta1.NetworkNeighborhood{ + Spec: v1beta1.NetworkNeighborhoodSpec{ + Containers: []v1beta1.NetworkNeighborhoodContainer{ + { + Name: "container1", + Ingress: []v1beta1.NetworkNeighbor{ + { + Identifier: "ingress2", + Type: "https", + DNSNames: []string{"secure.example.com"}, + Ports: []v1beta1.NetworkPort{ + {Name: "https", Protocol: "TCP", Port: ptr(int32(443))}, + }, + }, + }, + }, + }, + }, + }, + validateResults: func(t *testing.T, result *v1beta1.NetworkNeighborhood) { + assert.Len(t, result.Spec.Containers, 1) + assert.Len(t, result.Spec.Containers[0].Ingress, 2) + + // Verify both ingress rules are present + ingressIdentifiers := []string{ + result.Spec.Containers[0].Ingress[0].Identifier, + result.Spec.Containers[0].Ingress[1].Identifier, + } + assert.Contains(t, ingressIdentifiers, "ingress1") + assert.Contains(t, ingressIdentifiers, "ingress2") + }, + }, + { + name: "merge label selectors", + baseNN: &v1beta1.NetworkNeighborhood{ + Spec: v1beta1.NetworkNeighborhoodSpec{ + LabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "base", + }, + }, + Containers: []v1beta1.NetworkNeighborhoodContainer{ + { + Name: "container1", + Egress: []v1beta1.NetworkNeighbor{ + { + Identifier: "egress1", + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "role": "db", + }, + }, + }, + }, + }, + }, + }, + }, + userNN: &v1beta1.NetworkNeighborhood{ + Spec: v1beta1.NetworkNeighborhoodSpec{ + LabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "env": "prod", + }, + }, + Containers: []v1beta1.NetworkNeighborhoodContainer{ + { + Name: "container1", + Egress: []v1beta1.NetworkNeighbor{ + { + Identifier: "egress1", + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "version": "v1", + }, + }, + }, + }, + }, + }, + }, + }, + validateResults: func(t *testing.T, result *v1beta1.NetworkNeighborhood) { + // Verify merged label selectors + assert.Equal(t, "base", result.Spec.LabelSelector.MatchLabels["app"]) + assert.Equal(t, "prod", result.Spec.LabelSelector.MatchLabels["env"]) + + // Verify merged pod selector in egress rule + container := result.Spec.Containers[0] + podSelector := container.Egress[0].PodSelector + assert.Equal(t, "db", podSelector.MatchLabels["role"]) + assert.Equal(t, "v1", podSelector.MatchLabels["version"]) + }, + }, + { + name: "merge network ports", + baseNN: &v1beta1.NetworkNeighborhood{ + Spec: v1beta1.NetworkNeighborhoodSpec{ + Containers: []v1beta1.NetworkNeighborhoodContainer{ + { + Name: "container1", + Egress: []v1beta1.NetworkNeighbor{ + { + Identifier: "egress1", + Ports: []v1beta1.NetworkPort{ + {Name: "http", Protocol: "TCP", Port: ptr(int32(80))}, + }, + }, + }, + }, + }, + }, + }, + userNN: &v1beta1.NetworkNeighborhood{ + Spec: v1beta1.NetworkNeighborhoodSpec{ + Containers: []v1beta1.NetworkNeighborhoodContainer{ + { + Name: "container1", + Egress: []v1beta1.NetworkNeighbor{ + { + Identifier: "egress1", + Ports: []v1beta1.NetworkPort{ + {Name: "http", Protocol: "TCP", Port: ptr(int32(8080))}, // Override existing port + {Name: "https", Protocol: "TCP", Port: ptr(int32(443))}, // Add new port + }, + }, + }, + }, + }, + }, + }, + validateResults: func(t *testing.T, result *v1beta1.NetworkNeighborhood) { + container := result.Spec.Containers[0] + ports := container.Egress[0].Ports + + // Verify ports are properly merged + assert.Len(t, ports, 2) + + // Find HTTP port - should be updated to 8080 + for _, port := range ports { + if port.Name == "http" { + assert.Equal(t, int32(8080), *port.Port) + } + if port.Name == "https" { + assert.Equal(t, int32(443), *port.Port) + } + } + }, + }, + { + name: "merge DNS names", + baseNN: &v1beta1.NetworkNeighborhood{ + Spec: v1beta1.NetworkNeighborhoodSpec{ + Containers: []v1beta1.NetworkNeighborhoodContainer{ + { + Name: "container1", + Egress: []v1beta1.NetworkNeighbor{ + { + Identifier: "egress1", + DNSNames: []string{"example.com", "api.example.com"}, + }, + }, + }, + }, + }, + }, + userNN: &v1beta1.NetworkNeighborhood{ + Spec: v1beta1.NetworkNeighborhoodSpec{ + Containers: []v1beta1.NetworkNeighborhoodContainer{ + { + Name: "container1", + Egress: []v1beta1.NetworkNeighbor{ + { + Identifier: "egress1", + DNSNames: []string{"api.example.com", "admin.example.com"}, + }, + }, + }, + }, + }, + }, + validateResults: func(t *testing.T, result *v1beta1.NetworkNeighborhood) { + container := result.Spec.Containers[0] + dnsNames := container.Egress[0].DNSNames + + // Verify DNS names are properly merged and deduplicated + assert.Len(t, dnsNames, 3) + assert.Contains(t, dnsNames, "example.com") + assert.Contains(t, dnsNames, "api.example.com") + assert.Contains(t, dnsNames, "admin.example.com") + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + nn := NewNetworkNeighborhoodCache("test-node", nil, 0) + result := nn.performMerge(tt.baseNN, tt.userNN) + + if tt.validateResults != nil { + tt.validateResults(t, result) + } + }) + } +} + +// Helper function to create pointer to int32 +func ptr(i int32) *int32 { + return &i +} diff --git a/pkg/objectcache/objectcache_interface.go b/pkg/objectcache/objectcache_interface.go index 0ac6c7f3..8621b0b8 100644 --- a/pkg/objectcache/objectcache_interface.go +++ b/pkg/objectcache/objectcache_interface.go @@ -4,6 +4,7 @@ type ObjectCache interface { K8sObjectCache() K8sObjectCache ApplicationProfileCache() ApplicationProfileCache NetworkNeighborhoodCache() NetworkNeighborhoodCache + DnsCache() DnsCache } var _ ObjectCache = (*ObjectCacheMock)(nil) @@ -24,3 +25,7 @@ func (om *ObjectCacheMock) ApplicationProfileCache() ApplicationProfileCache { func (om *ObjectCacheMock) NetworkNeighborhoodCache() NetworkNeighborhoodCache { return &NetworkNeighborhoodCacheMock{} } + +func (om *ObjectCacheMock) DnsCache() DnsCache { + return &DnsCacheMock{} +} diff --git a/pkg/objectcache/v1/objectcache.go b/pkg/objectcache/v1/objectcache.go index 6c478bae..9986077e 100644 --- a/pkg/objectcache/v1/objectcache.go +++ b/pkg/objectcache/v1/objectcache.go @@ -10,13 +10,15 @@ type ObjectCacheImpl struct { k objectcache.K8sObjectCache ap objectcache.ApplicationProfileCache np objectcache.NetworkNeighborhoodCache + dc objectcache.DnsCache } -func NewObjectCache(k objectcache.K8sObjectCache, ap objectcache.ApplicationProfileCache, np objectcache.NetworkNeighborhoodCache) *ObjectCacheImpl { +func NewObjectCache(k objectcache.K8sObjectCache, ap objectcache.ApplicationProfileCache, np objectcache.NetworkNeighborhoodCache, dc objectcache.DnsCache) *ObjectCacheImpl { return &ObjectCacheImpl{ k: k, ap: ap, np: np, + dc: dc, } } @@ -30,3 +32,7 @@ func (o *ObjectCacheImpl) ApplicationProfileCache() objectcache.ApplicationProfi func (o *ObjectCacheImpl) NetworkNeighborhoodCache() objectcache.NetworkNeighborhoodCache { return o.np } + +func (o *ObjectCacheImpl) DnsCache() objectcache.DnsCache { + return o.dc +} diff --git a/pkg/objectcache/v1/objectcache_test.go b/pkg/objectcache/v1/objectcache_test.go index 7790c10b..207722ea 100644 --- a/pkg/objectcache/v1/objectcache_test.go +++ b/pkg/objectcache/v1/objectcache_test.go @@ -10,18 +10,18 @@ import ( func TestK8sObjectCache(t *testing.T) { k := &objectcache.K8sObjectCacheMock{} - k8sObjectCache := NewObjectCache(k, nil, nil) + k8sObjectCache := NewObjectCache(k, nil, nil, nil) assert.NotNil(t, k8sObjectCache.K8sObjectCache()) } func TestApplicationProfileCache(t *testing.T) { ap := &objectcache.ApplicationProfileCacheMock{} - k8sObjectCache := NewObjectCache(nil, ap, nil) + k8sObjectCache := NewObjectCache(nil, ap, nil, nil) assert.NotNil(t, k8sObjectCache.ApplicationProfileCache()) } func TestNetworkNeighborhoodCache(t *testing.T) { nn := &objectcache.NetworkNeighborhoodCacheMock{} - k8sObjectCache := NewObjectCache(nil, nil, nn) + k8sObjectCache := NewObjectCache(nil, nil, nn, nil) assert.NotNil(t, k8sObjectCache.NetworkNeighborhoodCache()) } diff --git a/pkg/processmanager/process_manager_interface.go b/pkg/processmanager/process_manager_interface.go new file mode 100644 index 00000000..0f36f367 --- /dev/null +++ b/pkg/processmanager/process_manager_interface.go @@ -0,0 +1,20 @@ +package processmanager + +import ( + apitypes "github.com/armosec/armoapi-go/armotypes" + containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" + "github.com/kubescape/node-agent/pkg/utils" +) + +// ProcessManagerClient is the interface for the process manager client. +// It provides methods to get process tree for a container or a PID. +// The manager is responsible for maintaining the process tree for all containers. +type ProcessManagerClient interface { + GetProcessTreeForPID(containerID string, pid int) (apitypes.Process, error) + // PopulateInitialProcesses is called to populate the initial process tree (parsed from /proc) for all containers. + PopulateInitialProcesses() error + + // ReportEvent will be called to report new exec events to the process manager. + ReportEvent(eventType utils.EventType, event utils.K8sEvent) + ContainerCallback(notif containercollection.PubSubEvent) +} diff --git a/pkg/processmanager/process_manager_mock.go b/pkg/processmanager/process_manager_mock.go new file mode 100644 index 00000000..68fcdd14 --- /dev/null +++ b/pkg/processmanager/process_manager_mock.go @@ -0,0 +1,32 @@ +package processmanager + +import ( + apitypes "github.com/armosec/armoapi-go/armotypes" + containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" + "github.com/kubescape/node-agent/pkg/utils" +) + +type ProcessManagerMock struct { +} + +var _ ProcessManagerClient = (*ProcessManagerMock)(nil) + +func CreateProcessManagerMock() *ProcessManagerMock { + return &ProcessManagerMock{} +} + +func (p *ProcessManagerMock) GetProcessTreeForPID(containerID string, pid int) (apitypes.Process, error) { + return apitypes.Process{}, nil +} + +func (p *ProcessManagerMock) PopulateInitialProcesses() error { + return nil +} + +func (p *ProcessManagerMock) ReportEvent(eventType utils.EventType, event utils.K8sEvent) { + // no-op +} + +func (p *ProcessManagerMock) ContainerCallback(notif containercollection.PubSubEvent) { + // no-op +} diff --git a/pkg/processmanager/v1/process_manager.go b/pkg/processmanager/v1/process_manager.go new file mode 100644 index 00000000..74a95e5c --- /dev/null +++ b/pkg/processmanager/v1/process_manager.go @@ -0,0 +1,419 @@ +package processmanager + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/goradd/maps" + containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" + "github.com/prometheus/procfs" + + apitypes "github.com/armosec/armoapi-go/armotypes" + tracerexectype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/exec/types" + "github.com/kubescape/go-logger" + "github.com/kubescape/go-logger/helpers" + "github.com/kubescape/node-agent/pkg/utils" +) + +const ( + cleanupInterval = 1 * time.Minute + maxTreeDepth = 50 +) + +type ProcessManager struct { + containerIdToShimPid maps.SafeMap[string, uint32] + processTree maps.SafeMap[uint32, apitypes.Process] + // For testing purposes we allow to override the function that gets process info from /proc. + getProcessFromProc func(pid int) (apitypes.Process, error) +} + +func CreateProcessManager(ctx context.Context) *ProcessManager { + pm := &ProcessManager{ + getProcessFromProc: getProcessFromProc, + } + go pm.startCleanupRoutine(ctx) + return pm +} + +// PopulateInitialProcesses scans the /proc filesystem to build the initial process tree +// for all registered container shim processes. It establishes parent-child relationships +// between processes and adds them to the process tree if they are descendants of a shim. +func (p *ProcessManager) PopulateInitialProcesses() error { + if len(p.containerIdToShimPid.Keys()) == 0 { + return nil + } + + fs, err := procfs.NewFS("/proc") + if err != nil { + return fmt.Errorf("failed to open procfs: %w", err) + } + + procs, err := fs.AllProcs() + if err != nil { + return fmt.Errorf("failed to read all procs: %w", err) + } + + tempProcesses := make(map[uint32]apitypes.Process, len(procs)) + shimPIDs := make(map[uint32]struct{}) + + p.containerIdToShimPid.Range(func(_ string, shimPID uint32) bool { + shimPIDs[shimPID] = struct{}{} + return true + }) + + // First collect all processes + for _, proc := range procs { + if process, err := p.getProcessFromProc(proc.PID); err == nil { + tempProcesses[process.PID] = process + } + } + + // Then build relationships and add to tree + for pid, process := range tempProcesses { + if p.isDescendantOfShim(pid, process.PPID, shimPIDs, tempProcesses) { + if parent, exists := tempProcesses[process.PPID]; exists { + parent.Children = append(parent.Children, process) + tempProcesses[process.PPID] = parent + } + p.processTree.Set(pid, process) + } + } + + return nil +} + +// isDescendantOfShim checks if a process with the given PID is a descendant of any +// registered shim process. It traverses the process tree upwards until it either finds +// a shim process or reaches the maximum tree depth to prevent infinite loops. +func (p *ProcessManager) isDescendantOfShim(pid uint32, ppid uint32, shimPIDs map[uint32]struct{}, processes map[uint32]apitypes.Process) bool { + visited := make(map[uint32]bool) + currentPID := pid + for depth := 0; depth < maxTreeDepth; depth++ { + if currentPID == 0 || visited[currentPID] { + return false + } + visited[currentPID] = true + + if _, isShim := shimPIDs[ppid]; isShim { + return true + } + + process, exists := processes[ppid] + if !exists { + return false + } + currentPID = ppid + ppid = process.PPID + } + return false +} + +// ContainerCallback handles container lifecycle events (creation and removal). +// For new containers, it identifies the container's shim process and adds it to the tracking system. +// For removed containers, it cleans up the associated processes from the process tree. +func (p *ProcessManager) ContainerCallback(notif containercollection.PubSubEvent) { + containerID := notif.Container.Runtime.BasicRuntimeMetadata.ContainerID + + switch notif.Type { + case containercollection.EventTypeAddContainer: + containerPID := uint32(notif.Container.Pid) + if process, err := p.getProcessFromProc(int(containerPID)); err == nil { + shimPID := process.PPID + p.containerIdToShimPid.Set(containerID, shimPID) + p.addProcess(process) + } else { + logger.L().Warning("Failed to get container process info", + helpers.String("containerID", containerID), + helpers.Error(err)) + } + + case containercollection.EventTypeRemoveContainer: + if shimPID, exists := p.containerIdToShimPid.Load(containerID); exists { + p.removeProcessesUnderShim(shimPID) + p.containerIdToShimPid.Delete(containerID) + } + } +} + +// removeProcessesUnderShim removes all processes that are descendants of the specified +// shim process PID from the process tree. This is typically called when a container +// is being removed. +func (p *ProcessManager) removeProcessesUnderShim(shimPID uint32) { + var pidsToRemove []uint32 + + p.processTree.Range(func(pid uint32, process apitypes.Process) bool { + currentPID := pid + visited := make(map[uint32]bool) + + for currentPID != 0 && !visited[currentPID] { + visited[currentPID] = true + if proc, exists := p.processTree.Load(currentPID); exists { + if proc.PPID == shimPID { + pidsToRemove = append(pidsToRemove, pid) + break + } + currentPID = proc.PPID + } else { + break + } + } + return true + }) + + // Remove in reverse order to handle parent-child relationships + for i := len(pidsToRemove) - 1; i >= 0; i-- { + p.removeProcess(pidsToRemove[i]) + } +} + +// addProcess adds or updates a process in the process tree and maintains the +// parent-child relationships between processes. If the process already exists +// with a different parent, it updates the relationships accordingly. +func (p *ProcessManager) addProcess(process apitypes.Process) { + // First, check if the process already exists and has a different parent + if existingProc, exists := p.processTree.Load(process.PID); exists && existingProc.PPID != process.PPID { + // Remove from old parent's children list + if oldParent, exists := p.processTree.Load(existingProc.PPID); exists { + newChildren := make([]apitypes.Process, 0, len(oldParent.Children)) + for _, child := range oldParent.Children { + if child.PID != process.PID { + newChildren = append(newChildren, child) + } + } + oldParent.Children = newChildren + p.processTree.Set(oldParent.PID, oldParent) + } + } + + // Update the process in the tree + p.processTree.Set(process.PID, process) + + // Update new parent's children list + if parent, exists := p.processTree.Load(process.PPID); exists { + newChildren := make([]apitypes.Process, 0, len(parent.Children)+1) + hasProcess := false + for _, child := range parent.Children { + if child.PID == process.PID { + hasProcess = true + newChildren = append(newChildren, process) + } else { + newChildren = append(newChildren, child) + } + } + if !hasProcess { + newChildren = append(newChildren, process) + } + parent.Children = newChildren + p.processTree.Set(parent.PID, parent) + } +} + +// removeProcess removes a process from the process tree and updates the parent-child +// relationships. Children of the removed process are reassigned to their grandparent +// to maintain the process hierarchy. +func (p *ProcessManager) removeProcess(pid uint32) { + if process, exists := p.processTree.Load(pid); exists { + if parent, exists := p.processTree.Load(process.PPID); exists { + newChildren := make([]apitypes.Process, 0, len(parent.Children)) + for _, child := range parent.Children { + if child.PID != pid { + newChildren = append(newChildren, child) + } + } + parent.Children = newChildren + p.processTree.Set(parent.PID, parent) + } + + for _, child := range process.Children { + if childProcess, exists := p.processTree.Load(child.PID); exists { + childProcess.PPID = process.PPID + p.addProcess(childProcess) + } + } + + p.processTree.Delete(pid) + } +} + +// GetProcessTreeForPID retrieves the process tree for a specific PID within a container. +// It returns the process and all its ancestors up to the container's shim process. +// If the process is not in the tree, it attempts to fetch it from /proc. +func (p *ProcessManager) GetProcessTreeForPID(containerID string, pid int) (apitypes.Process, error) { + if !p.containerIdToShimPid.Has(containerID) { + return apitypes.Process{}, fmt.Errorf("container ID %s not found", containerID) + } + + targetPID := uint32(pid) + if !p.processTree.Has(targetPID) { + process, err := p.getProcessFromProc(pid) + if err != nil { + return apitypes.Process{}, fmt.Errorf("process %d not found: %v", pid, err) + } + p.addProcess(process) + } + + result := p.processTree.Get(targetPID) + currentPID := result.PPID + seen := make(map[uint32]bool) + + for currentPID != p.containerIdToShimPid.Get(containerID) && currentPID != 0 { + if seen[currentPID] { + break + } + seen[currentPID] = true + + if p.processTree.Has(currentPID) { + parent := p.processTree.Get(currentPID) + parentCopy := parent + parentCopy.Children = []apitypes.Process{result} + result = parentCopy + currentPID = parent.PPID + } else { + break + } + } + + return result, nil +} + +// ReportEvent handles process execution events from the system. +// It specifically processes execve events to track new process creations +// and updates the process tree accordingly. +func (p *ProcessManager) ReportEvent(eventType utils.EventType, event utils.K8sEvent) { + if eventType != utils.ExecveEventType { + return + } + + execEvent, ok := event.(*tracerexectype.Event) + if !ok { + return + } + + process := apitypes.Process{ + PID: uint32(execEvent.Pid), + PPID: uint32(execEvent.Ppid), + Comm: execEvent.Comm, + Uid: &execEvent.Uid, + Gid: &execEvent.Gid, + Hardlink: execEvent.ExePath, + UpperLayer: &execEvent.UpperLayer, + Path: execEvent.ExePath, + Cwd: execEvent.Cwd, + Pcomm: execEvent.Pcomm, + Cmdline: strings.Join(execEvent.Args, " "), + Children: []apitypes.Process{}, + } + + p.addProcess(process) +} + +// startCleanupRoutine starts a goroutine that periodically runs the cleanup +// function to remove dead processes from the process tree. It continues until +// the context is cancelled. +// TODO: Register eBPF tracer to get process exit events and remove dead processes immediately. +func (p *ProcessManager) startCleanupRoutine(ctx context.Context) { + ticker := time.NewTicker(cleanupInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + p.cleanup() + case <-ctx.Done(): + return + } + } +} + +// cleanup removes dead processes from the process tree by checking if each +// process in the tree is still alive in the system. +func (p *ProcessManager) cleanup() { + deadPids := make(map[uint32]bool) + p.processTree.Range(func(pid uint32, _ apitypes.Process) bool { + if !isProcessAlive(int(pid)) { + deadPids[pid] = true + } + return true + }) + + for pid := range deadPids { + logger.L().Debug("Removing dead process", helpers.Int("pid", int(pid))) + p.removeProcess(pid) + } +} + +// getProcessFromProc retrieves process information from the /proc filesystem +// for a given PID. It collects various process attributes such as command line, +// working directory, and user/group IDs. +func getProcessFromProc(pid int) (apitypes.Process, error) { + proc, err := procfs.NewProc(pid) + if err != nil { + return apitypes.Process{}, fmt.Errorf("failed to get process info: %v", err) + } + + stat, err := utils.GetProcessStat(pid) + if err != nil { + return apitypes.Process{}, fmt.Errorf("failed to get process stat: %v", err) + } + + var uid, gid uint32 + if status, err := proc.NewStatus(); err == nil { + if len(status.UIDs) > 1 { + uid = uint32(status.UIDs[1]) + } + if len(status.GIDs) > 1 { + gid = uint32(status.GIDs[1]) + } + } + + cmdline, _ := proc.CmdLine() + if len(cmdline) == 0 { + cmdline = []string{stat.Comm} + } + + cwd, _ := proc.Cwd() + path, _ := proc.Executable() + pcomm := func() string { + if stat.PPID <= 0 { + return "" + } + + parentProc, err := procfs.NewProc(stat.PPID) + if err != nil { + return "" + } + + parentStat, err := parentProc.Stat() + if err != nil { + return "" + } + + return parentStat.Comm + }() + + return apitypes.Process{ + PID: uint32(pid), + PPID: uint32(stat.PPID), + Comm: stat.Comm, + Pcomm: pcomm, + Uid: &uid, + Gid: &gid, + Cmdline: strings.Join(cmdline, " "), + Cwd: cwd, + Path: path, + Children: []apitypes.Process{}, + }, nil +} + +// isProcessAlive checks if a process with the given PID is still running +// by attempting to read its information from the /proc filesystem. +func isProcessAlive(pid int) bool { + proc, err := procfs.NewProc(pid) + if err != nil { + return false + } + _, err = proc.Stat() + return err == nil +} diff --git a/pkg/processmanager/v1/process_manager_test.go b/pkg/processmanager/v1/process_manager_test.go new file mode 100644 index 00000000..6405763f --- /dev/null +++ b/pkg/processmanager/v1/process_manager_test.go @@ -0,0 +1,1046 @@ +package processmanager + +import ( + "context" + "fmt" + "sync" + "testing" + + apitypes "github.com/armosec/armoapi-go/armotypes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" + tracerexectype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/exec/types" + "github.com/inspektor-gadget/inspektor-gadget/pkg/types" + "github.com/kubescape/node-agent/pkg/utils" +) + +// Helper function type definition +type mockProcessAdder func(pid int, ppid uint32, comm string) + +// Updated setup function with correct return types +func setupTestProcessManager(t *testing.T) (*ProcessManager, mockProcessAdder) { + ctx, cancel := context.WithCancel(context.Background()) + pm := CreateProcessManager(ctx) + + // Create process mock map + mockProcesses := make(map[int]apitypes.Process) + + // Store original function + originalGetProcessFromProc := pm.getProcessFromProc + + // Replace with mock version + pm.getProcessFromProc = func(pid int) (apitypes.Process, error) { + if proc, exists := mockProcesses[pid]; exists { + return proc, nil + } + return apitypes.Process{}, fmt.Errorf("mock process not found: %d", pid) + } + + // Set up cleanup + t.Cleanup(func() { + cancel() + pm.getProcessFromProc = originalGetProcessFromProc + }) + + // Return the process manager and the mock process adder function + return pm, func(pid int, ppid uint32, comm string) { + uid := uint32(1000) + gid := uint32(1000) + mockProcesses[pid] = apitypes.Process{ + PID: uint32(pid), + PPID: ppid, + Comm: comm, + Cmdline: comm, + Uid: &uid, + Gid: &gid, + } + } +} + +func TestProcessManagerBasics(t *testing.T) { + pm, addMockProcess := setupTestProcessManager(t) + + containerID := "test-container-1" + shimPID := uint32(999) + containerPID := uint32(1000) + + // Add mock container process with shim as parent + addMockProcess(int(containerPID), shimPID, "container-main") + + // Register container + pm.ContainerCallback(containercollection.PubSubEvent{ + Type: containercollection.EventTypeAddContainer, + Container: &containercollection.Container{ + Runtime: containercollection.RuntimeMetadata{ + BasicRuntimeMetadata: types.BasicRuntimeMetadata{ + ContainerID: containerID, + }, + }, + Pid: containerPID, + }, + }) + + // Verify shim was recorded + assert.True(t, pm.containerIdToShimPid.Has(containerID)) + assert.Equal(t, shimPID, pm.containerIdToShimPid.Get(containerID)) + + // Verify container process was added + containerProc, exists := pm.processTree.Load(containerPID) + assert.True(t, exists) + assert.Equal(t, shimPID, containerProc.PPID) +} + +func TestProcessTracking(t *testing.T) { + pm, addMockProcess := setupTestProcessManager(t) + + containerID := "test-container-1" + shimPID := uint32(999) + containerPID := uint32(1000) + + addMockProcess(int(containerPID), shimPID, "container-main") + + pm.ContainerCallback(containercollection.PubSubEvent{ + Type: containercollection.EventTypeAddContainer, + Container: &containercollection.Container{ + Runtime: containercollection.RuntimeMetadata{ + BasicRuntimeMetadata: types.BasicRuntimeMetadata{ + ContainerID: containerID, + }, + }, + Pid: containerPID, + }, + }) + + testCases := []struct { + name string + event tracerexectype.Event + verify func(t *testing.T, pm *ProcessManager) + }{ + { + name: "Container child process", + event: tracerexectype.Event{ + Pid: 1001, + Ppid: containerPID, + Comm: "nginx", + Args: []string{"nginx", "-g", "daemon off;"}, + }, + verify: func(t *testing.T, pm *ProcessManager) { + proc, exists := pm.processTree.Load(1001) + require.True(t, exists) + assert.Equal(t, containerPID, proc.PPID) + assert.Equal(t, "nginx", proc.Comm) + }, + }, + { + name: "Exec process (direct child of shim)", + event: tracerexectype.Event{ + Pid: 1002, + Ppid: shimPID, + Comm: "bash", + Args: []string{"bash"}, + }, + verify: func(t *testing.T, pm *ProcessManager) { + proc, exists := pm.processTree.Load(1002) + require.True(t, exists) + assert.Equal(t, shimPID, proc.PPID) + assert.Equal(t, "bash", proc.Comm) + }, + }, + { + name: "Nested process", + event: tracerexectype.Event{ + Pid: 1003, + Ppid: 1001, + Comm: "nginx-worker", + Args: []string{"nginx", "worker process"}, + }, + verify: func(t *testing.T, pm *ProcessManager) { + proc, exists := pm.processTree.Load(1003) + require.True(t, exists) + assert.Equal(t, uint32(1001), proc.PPID) + + parent, exists := pm.processTree.Load(1001) + require.True(t, exists) + hasChild := false + for _, child := range parent.Children { + if child.PID == 1003 { + hasChild = true + break + } + } + assert.True(t, hasChild) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + pm.ReportEvent(utils.ExecveEventType, &tc.event) + tc.verify(t, pm) + }) + } +} + +func TestProcessRemoval(t *testing.T) { + pm, addMockProcess := setupTestProcessManager(t) + + containerID := "test-container-1" + shimPID := uint32(999) + containerPID := uint32(1000) + + addMockProcess(int(containerPID), shimPID, "container-main") + + pm.ContainerCallback(containercollection.PubSubEvent{ + Type: containercollection.EventTypeAddContainer, + Container: &containercollection.Container{ + Runtime: containercollection.RuntimeMetadata{ + BasicRuntimeMetadata: types.BasicRuntimeMetadata{ + ContainerID: containerID, + }, + }, + Pid: containerPID, + }, + }) + + // Create a process tree + processes := []struct { + pid uint32 + ppid uint32 + comm string + }{ + {1001, containerPID, "parent"}, + {1002, 1001, "child1"}, + {1003, 1002, "grandchild1"}, + {1004, 1002, "grandchild2"}, + } + + // Add processes + for _, proc := range processes { + event := &tracerexectype.Event{ + Pid: proc.pid, + Ppid: proc.ppid, + Comm: proc.comm, + } + pm.ReportEvent(utils.ExecveEventType, event) + } + + // Verify initial structure + for _, proc := range processes { + assert.True(t, pm.processTree.Has(proc.pid)) + } + + // Remove middle process and verify tree reorganization + pm.removeProcess(1002) + + // Verify process was removed + assert.False(t, pm.processTree.Has(1002)) + + // Verify children were reassigned to parent + parent, exists := pm.processTree.Load(1001) + require.True(t, exists) + + // Should now have both grandchildren + childPIDs := make(map[uint32]bool) + for _, child := range parent.Children { + childPIDs[child.PID] = true + } + assert.True(t, childPIDs[1003]) + assert.True(t, childPIDs[1004]) + + // Verify grandchildren's PPID was updated + for _, pid := range []uint32{1003, 1004} { + proc, exists := pm.processTree.Load(pid) + require.True(t, exists) + assert.Equal(t, uint32(1001), proc.PPID) + } +} + +func TestContainerRemoval(t *testing.T) { + pm, addMockProcess := setupTestProcessManager(t) + + containerID := "test-container-1" + shimPID := uint32(999) + containerPID := uint32(1000) + + addMockProcess(int(containerPID), shimPID, "container-main") + + pm.ContainerCallback(containercollection.PubSubEvent{ + Type: containercollection.EventTypeAddContainer, + Container: &containercollection.Container{ + Runtime: containercollection.RuntimeMetadata{ + BasicRuntimeMetadata: types.BasicRuntimeMetadata{ + ContainerID: containerID, + }, + }, + Pid: containerPID, + }, + }) + + // Create various processes under the container + processes := []struct { + pid uint32 + ppid uint32 + comm string + }{ + {containerPID, shimPID, "container-main"}, + {1001, containerPID, "app"}, + {1002, 1001, "worker"}, + {1003, shimPID, "exec"}, // direct child of shim + } + + for _, proc := range processes { + event := &tracerexectype.Event{ + Pid: proc.pid, + Ppid: proc.ppid, + Comm: proc.comm, + } + pm.ReportEvent(utils.ExecveEventType, event) + } + + // Remove container + pm.ContainerCallback(containercollection.PubSubEvent{ + Type: containercollection.EventTypeRemoveContainer, + Container: &containercollection.Container{ + Runtime: containercollection.RuntimeMetadata{ + BasicRuntimeMetadata: types.BasicRuntimeMetadata{ + ContainerID: containerID, + }, + }, + Pid: containerPID, + }, + }) + + // Verify all processes were removed + for _, proc := range processes { + assert.False(t, pm.processTree.Has(proc.pid)) + } + + // Verify container was removed from mapping + assert.False(t, pm.containerIdToShimPid.Has(containerID)) +} + +func TestMultipleContainers(t *testing.T) { + pm, addMockProcess := setupTestProcessManager(t) + + containers := []struct { + id string + shimPID uint32 + containerPID uint32 + }{ + {"container-1", 999, 1000}, + {"container-2", 1998, 2000}, + } + + // Add containers + for _, c := range containers { + addMockProcess(int(c.containerPID), c.shimPID, fmt.Sprintf("container-%s", c.id)) + + pm.ContainerCallback(containercollection.PubSubEvent{ + Type: containercollection.EventTypeAddContainer, + Container: &containercollection.Container{ + Runtime: containercollection.RuntimeMetadata{ + BasicRuntimeMetadata: types.BasicRuntimeMetadata{ + ContainerID: c.id, + }, + }, + Pid: c.containerPID, + }, + }) + + // Add some processes to each container + event1 := &tracerexectype.Event{ + Pid: c.containerPID + 1, + Ppid: c.containerPID, + Comm: "process-1", + } + event2 := &tracerexectype.Event{ + Pid: c.containerPID + 2, + Ppid: c.shimPID, + Comm: "exec-process", + } + + pm.ReportEvent(utils.ExecveEventType, event1) + pm.ReportEvent(utils.ExecveEventType, event2) + } + + // Verify each container's processes + for _, c := range containers { + // Check container process + proc, exists := pm.processTree.Load(c.containerPID) + require.True(t, exists) + assert.Equal(t, c.shimPID, proc.PPID) + + // Check child process + childProc, exists := pm.processTree.Load(c.containerPID + 1) + require.True(t, exists) + assert.Equal(t, c.containerPID, childProc.PPID) + + // Check exec process + execProc, exists := pm.processTree.Load(c.containerPID + 2) + require.True(t, exists) + assert.Equal(t, c.shimPID, execProc.PPID) + } + + // Remove first container + pm.ContainerCallback(containercollection.PubSubEvent{ + Type: containercollection.EventTypeRemoveContainer, + Container: &containercollection.Container{ + Runtime: containercollection.RuntimeMetadata{ + BasicRuntimeMetadata: types.BasicRuntimeMetadata{ + ContainerID: containers[0].id, + }, + }, + Pid: containers[0].containerPID, + }, + }) + + // Verify first container's processes are gone + assert.False(t, pm.processTree.Has(containers[0].containerPID)) + assert.False(t, pm.processTree.Has(containers[0].containerPID+1)) + assert.False(t, pm.processTree.Has(containers[0].containerPID+2)) + + // Verify second container's processes remain + assert.True(t, pm.processTree.Has(containers[1].containerPID)) + assert.True(t, pm.processTree.Has(containers[1].containerPID+1)) + assert.True(t, pm.processTree.Has(containers[1].containerPID+2)) +} + +func TestErrorCases(t *testing.T) { + pm, addMockProcess := setupTestProcessManager(t) + + t.Run("get non-existent process tree", func(t *testing.T) { + _, err := pm.GetProcessTreeForPID("non-existent", 1000) + assert.Error(t, err) + }) + + t.Run("process with non-existent parent", func(t *testing.T) { + containerID := "test-container" + shimPID := uint32(999) + containerPID := uint32(1000) + + addMockProcess(int(containerPID), shimPID, "container-main") + + pm.ContainerCallback(containercollection.PubSubEvent{ + Type: containercollection.EventTypeAddContainer, + Container: &containercollection.Container{ + Runtime: containercollection.RuntimeMetadata{ + BasicRuntimeMetadata: types.BasicRuntimeMetadata{ + ContainerID: containerID, + }, + }, + Pid: containerPID, + }, + }) + + // Add process with non-existent parent + event := &tracerexectype.Event{ + Pid: 2000, + Ppid: 1500, // Non-existent PPID + Comm: "orphan", + } + pm.ReportEvent(utils.ExecveEventType, event) + + // Process should still be added + assert.True(t, pm.processTree.Has(2000)) + }) +} + +func TestRaceConditions(t *testing.T) { + pm, addMockProcess := setupTestProcessManager(t) + + containerID := "test-container" + shimPID := uint32(999) + containerPID := uint32(1000) + + // Setup container + addMockProcess(int(containerPID), shimPID, "container-main") + pm.ContainerCallback(containercollection.PubSubEvent{ + Type: containercollection.EventTypeAddContainer, + Container: &containercollection.Container{ + Runtime: containercollection.RuntimeMetadata{ + BasicRuntimeMetadata: types.BasicRuntimeMetadata{ + ContainerID: containerID, + }, + }, + Pid: containerPID, + }, + }) + + processCount := 100 + var mu sync.Mutex + processStates := make(map[uint32]struct { + added bool + removed bool + }) + + // Pre-populate process states + for i := 0; i < processCount; i++ { + pid := uint32(2000 + i) + processStates[pid] = struct { + added bool + removed bool + }{false, false} + } + + // Channel to signal between goroutines + removeDone := make(chan bool) + addDone := make(chan bool) + + // Goroutine to remove processes (run first) + go func() { + for i := 0; i < processCount; i++ { + if i%2 == 0 { + pid := uint32(2000 + i) + mu.Lock() + if state, exists := processStates[pid]; exists { + state.removed = true + processStates[pid] = state + } + mu.Unlock() + pm.removeProcess(pid) + } + } + removeDone <- true + }() + + // Wait for removals to complete before starting additions + <-removeDone + + // Goroutine to add processes + go func() { + for i := 0; i < processCount; i++ { + pid := uint32(2000 + i) + // Only add if not marked for removal + mu.Lock() + state := processStates[pid] + if !state.removed { + event := &tracerexectype.Event{ + Pid: pid, + Ppid: shimPID, + Comm: fmt.Sprintf("process-%d", i), + } + state.added = true + processStates[pid] = state + mu.Unlock() + pm.ReportEvent(utils.ExecveEventType, event) + } else { + mu.Unlock() + } + } + addDone <- true + }() + + // Wait for additions to complete + <-addDone + + // Verify final state + remainingCount := 0 + pm.processTree.Range(func(pid uint32, process apitypes.Process) bool { + if pid >= 2000 && pid < 2000+uint32(processCount) { + mu.Lock() + state := processStates[pid] + mu.Unlock() + + if state.removed { + t.Errorf("Process %d exists but was marked for removal", pid) + } + if !state.added { + t.Errorf("Process %d exists but was not marked as added", pid) + } + remainingCount++ + } + return true + }) + + // Verify all processes marked as removed are actually gone + mu.Lock() + for pid, state := range processStates { + if state.removed { + if pm.processTree.Has(pid) { + t.Errorf("Process %d was marked for removal but still exists", pid) + } + } else if state.added { + if !pm.processTree.Has(pid) { + t.Errorf("Process %d was marked as added but doesn't exist", pid) + } + } + } + mu.Unlock() + + // We expect exactly half of the processes to remain (odd-numbered ones) + expectedCount := processCount / 2 + assert.Equal(t, expectedCount, remainingCount, + "Expected exactly %d processes, got %d", expectedCount, remainingCount) + + // Verify all remaining processes have correct parent + pm.processTree.Range(func(pid uint32, process apitypes.Process) bool { + if pid >= 2000 && pid < 2000+uint32(processCount) { + assert.Equal(t, shimPID, process.PPID, + "Process %d should have shim as parent", pid) + } + return true + }) +} + +func TestDuplicateProcessHandling(t *testing.T) { + pm, addMockProcess := setupTestProcessManager(t) + + containerID := "test-container" + shimPID := uint32(999) + containerPID := uint32(1000) + + // Setup container + addMockProcess(int(containerPID), shimPID, "container-main") + pm.ContainerCallback(containercollection.PubSubEvent{ + Type: containercollection.EventTypeAddContainer, + Container: &containercollection.Container{ + Runtime: containercollection.RuntimeMetadata{ + BasicRuntimeMetadata: types.BasicRuntimeMetadata{ + ContainerID: containerID, + }, + }, + Pid: containerPID, + }, + }) + + t.Run("update process with same parent", func(t *testing.T) { + // First add a parent process + parentEvent := &tracerexectype.Event{ + Pid: 1001, + Ppid: containerPID, + Comm: "parent-process", + Args: []string{"parent-process", "--initial"}, + } + pm.ReportEvent(utils.ExecveEventType, parentEvent) + + // Add child process + childEvent := &tracerexectype.Event{ + Pid: 1002, + Ppid: 1001, + Comm: "child-process", + Args: []string{"child-process", "--initial"}, + } + pm.ReportEvent(utils.ExecveEventType, childEvent) + + // Verify initial state + parent, exists := pm.processTree.Load(1001) + require.True(t, exists) + assert.Equal(t, "parent-process", parent.Comm) + assert.Equal(t, "parent-process --initial", parent.Cmdline) + assert.Len(t, parent.Children, 1) + assert.Equal(t, uint32(1002), parent.Children[0].PID) + + // Add same child process again with different arguments + updatedChildEvent := &tracerexectype.Event{ + Pid: 1002, + Ppid: 1001, + Comm: "child-process", + Args: []string{"child-process", "--updated"}, + } + pm.ReportEvent(utils.ExecveEventType, updatedChildEvent) + + // Verify the process was updated + updatedChild, exists := pm.processTree.Load(1002) + require.True(t, exists) + assert.Equal(t, "child-process --updated", updatedChild.Cmdline) + + // Verify parent's children list was updated + updatedParent, exists := pm.processTree.Load(1001) + require.True(t, exists) + assert.Len(t, updatedParent.Children, 1) + assert.Equal(t, "child-process --updated", updatedParent.Children[0].Cmdline) + }) + + t.Run("update process with different parent", func(t *testing.T) { + // Move process to different parent + differentParentEvent := &tracerexectype.Event{ + Pid: 1002, + Ppid: containerPID, + Comm: "child-process", + Args: []string{"child-process", "--new-parent"}, + } + pm.ReportEvent(utils.ExecveEventType, differentParentEvent) + + // Verify process was updated with new parent + movedChild, exists := pm.processTree.Load(1002) + require.True(t, exists) + assert.Equal(t, containerPID, movedChild.PPID) + assert.Equal(t, "child-process --new-parent", movedChild.Cmdline) + + // Verify old parent no longer has the child + oldParent, exists := pm.processTree.Load(1001) + require.True(t, exists) + assert.Empty(t, oldParent.Children, "Old parent should have no children") + + // Verify new parent has the child + containerProcess, exists := pm.processTree.Load(containerPID) + require.True(t, exists) + hasChild := false + for _, child := range containerProcess.Children { + if child.PID == 1002 { + hasChild = true + assert.Equal(t, "child-process --new-parent", child.Cmdline) + } + } + assert.True(t, hasChild, "New parent should have the child") + }) +} + +func TestProcessReparenting(t *testing.T) { + pm, addMockProcess := setupTestProcessManager(t) + + containerID := "test-container" + shimPID := uint32(999) + containerPID := uint32(1000) + + // Setup container + addMockProcess(int(containerPID), shimPID, "container-main") + pm.ContainerCallback(containercollection.PubSubEvent{ + Type: containercollection.EventTypeAddContainer, + Container: &containercollection.Container{ + Runtime: containercollection.RuntimeMetadata{ + BasicRuntimeMetadata: types.BasicRuntimeMetadata{ + ContainerID: containerID, + }, + }, + Pid: containerPID, + }, + }) + + t.Run("reparent to nearest living ancestor", func(t *testing.T) { + // Create a chain of processes: + // shim -> grandparent -> parent -> child + + // Create grandparent process + grandparentPID := uint32(2000) + grandparentEvent := &tracerexectype.Event{ + Pid: grandparentPID, + Ppid: shimPID, + Comm: "grandparent", + Args: []string{"grandparent"}, + } + pm.ReportEvent(utils.ExecveEventType, grandparentEvent) + + // Create parent process + parentPID := uint32(2001) + parentEvent := &tracerexectype.Event{ + Pid: parentPID, + Ppid: grandparentPID, + Comm: "parent", + Args: []string{"parent"}, + } + pm.ReportEvent(utils.ExecveEventType, parentEvent) + + // Create child process + childPID := uint32(2002) + childEvent := &tracerexectype.Event{ + Pid: childPID, + Ppid: parentPID, + Comm: "child", + Args: []string{"child"}, + } + pm.ReportEvent(utils.ExecveEventType, childEvent) + + // Verify initial hierarchy + child, exists := pm.processTree.Load(childPID) + require.True(t, exists) + assert.Equal(t, parentPID, child.PPID) + + parent, exists := pm.processTree.Load(parentPID) + require.True(t, exists) + assert.Equal(t, grandparentPID, parent.PPID) + + // When parent dies, child should be reparented to grandparent + pm.removeProcess(parentPID) + + // Verify child was reparented to grandparent + child, exists = pm.processTree.Load(childPID) + require.True(t, exists) + assert.Equal(t, grandparentPID, child.PPID, "Child should be reparented to grandparent") + + // Verify grandparent has the child in its children list + grandparent, exists := pm.processTree.Load(grandparentPID) + require.True(t, exists) + hasChild := false + for _, c := range grandparent.Children { + if c.PID == childPID { + hasChild = true + break + } + } + assert.True(t, hasChild, "Grandparent should have the reparented child") + + // Now if grandparent dies too, child should be reparented to shim + pm.removeProcess(grandparentPID) + + child, exists = pm.processTree.Load(childPID) + require.True(t, exists) + assert.Equal(t, shimPID, child.PPID, "Child should be reparented to shim when grandparent dies") + }) + + t.Run("reparent multiple children", func(t *testing.T) { + // Create a parent with multiple children + parentPID := uint32(3000) + parentEvent := &tracerexectype.Event{ + Pid: parentPID, + Ppid: shimPID, + Comm: "parent", + Args: []string{"parent"}, + } + pm.ReportEvent(utils.ExecveEventType, parentEvent) + + // Create several children + childPIDs := []uint32{3001, 3002, 3003} + for _, pid := range childPIDs { + childEvent := &tracerexectype.Event{ + Pid: pid, + Ppid: parentPID, + Comm: fmt.Sprintf("child-%d", pid), + Args: []string{"child"}, + } + pm.ReportEvent(utils.ExecveEventType, childEvent) + } + + // Create a subprocess under one of the children + grandchildPID := uint32(3004) + grandchildEvent := &tracerexectype.Event{ + Pid: grandchildPID, + Ppid: childPIDs[0], + Comm: "grandchild", + Args: []string{"grandchild"}, + } + pm.ReportEvent(utils.ExecveEventType, grandchildEvent) + + // When parent dies, all direct children should be reparented to shim + pm.removeProcess(parentPID) + + // Verify all children were reparented to shim + for _, childPID := range childPIDs { + child, exists := pm.processTree.Load(childPID) + require.True(t, exists) + assert.Equal(t, shimPID, child.PPID, "Child should be reparented to shim") + } + + // When first child dies, its grandchild should be reparented to shim too + pm.removeProcess(childPIDs[0]) + + grandchild, exists := pm.processTree.Load(grandchildPID) + require.True(t, exists) + assert.Equal(t, shimPID, grandchild.PPID, "Grandchild should be reparented to shim") + }) +} + +func TestRemoveProcessesUnderShim(t *testing.T) { + tests := []struct { + name string + initialTree map[uint32]apitypes.Process + shimPID uint32 + expectedTree map[uint32]apitypes.Process + description string + }{ + { + name: "simple_process_tree", + initialTree: map[uint32]apitypes.Process{ + 100: {PID: 100, PPID: 1, Comm: "shim", Children: []apitypes.Process{}}, // shim process + 200: {PID: 200, PPID: 100, Comm: "parent", Children: []apitypes.Process{}}, // direct child of shim + 201: {PID: 201, PPID: 200, Comm: "child1", Children: []apitypes.Process{}}, // child of parent + 202: {PID: 202, PPID: 200, Comm: "child2", Children: []apitypes.Process{}}, // another child of parent + }, + shimPID: 100, + expectedTree: map[uint32]apitypes.Process{ + 100: {PID: 100, PPID: 1, Comm: "shim", Children: []apitypes.Process{}}, // only shim remains + }, + description: "Should remove all processes under shim including children of children", + }, + { + name: "empty_tree", + initialTree: map[uint32]apitypes.Process{}, + shimPID: 100, + expectedTree: map[uint32]apitypes.Process{}, + description: "Should handle empty process tree gracefully", + }, + { + name: "orphaned_processes", + initialTree: map[uint32]apitypes.Process{ + 100: {PID: 100, PPID: 1, Comm: "shim", Children: []apitypes.Process{}}, // shim process + 200: {PID: 200, PPID: 100, Comm: "parent", Children: []apitypes.Process{}}, // direct child of shim + 201: {PID: 201, PPID: 999, Comm: "orphan", Children: []apitypes.Process{}}, // orphaned process (parent doesn't exist) + }, + shimPID: 100, + expectedTree: map[uint32]apitypes.Process{ + 100: {PID: 100, PPID: 1, Comm: "shim", Children: []apitypes.Process{}}, // shim remains + 201: {PID: 201, PPID: 999, Comm: "orphan", Children: []apitypes.Process{}}, // orphan unaffected + }, + description: "Should handle orphaned processes correctly", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + // Create process manager with test data + pm := &ProcessManager{} + + // Populate initial process tree + for pid, process := range tc.initialTree { + pm.processTree.Set(pid, process) + } + + // Call the function under test + pm.removeProcessesUnderShim(tc.shimPID) + + // Verify results + assert.Equal(t, len(tc.expectedTree), len(pm.processTree.Keys()), + "Process tree size mismatch after removal") + + // Check each expected process + for pid, expectedProcess := range tc.expectedTree { + actualProcess, exists := pm.processTree.Load(pid) + assert.True(t, exists, "Expected process %d not found in tree", pid) + assert.Equal(t, expectedProcess, actualProcess, + "Process %d details don't match expected values", pid) + } + + // Verify no unexpected processes remain + pm.processTree.Range(func(pid uint32, process apitypes.Process) bool { + _, shouldExist := tc.expectedTree[pid] + assert.True(t, shouldExist, + "Unexpected process %d found in tree", pid) + return true + }) + }) + } +} + +func TestIsDescendantOfShim(t *testing.T) { + tests := []struct { + name string + processes map[uint32]apitypes.Process + shimPIDs map[uint32]struct{} + pid uint32 + ppid uint32 + expected bool + description string + }{ + { + name: "direct_child_of_shim", + processes: map[uint32]apitypes.Process{ + 100: {PID: 100, PPID: 1, Comm: "shim"}, + 200: {PID: 200, PPID: 100, Comm: "child"}, + }, + shimPIDs: map[uint32]struct{}{ + 100: {}, + }, + pid: 200, + ppid: 100, + expected: true, + description: "Process is a direct child of shim", + }, + { + name: "indirect_descendant", + processes: map[uint32]apitypes.Process{ + 100: {PID: 100, PPID: 1, Comm: "shim"}, + 200: {PID: 200, PPID: 100, Comm: "parent"}, + 300: {PID: 300, PPID: 200, Comm: "child"}, + }, + shimPIDs: map[uint32]struct{}{ + 100: {}, + }, + pid: 300, + ppid: 200, + expected: true, + description: "Process is an indirect descendant of shim", + }, + { + name: "not_a_descendant", + processes: map[uint32]apitypes.Process{ + 100: {PID: 100, PPID: 1, Comm: "shim"}, + 200: {PID: 200, PPID: 2, Comm: "unrelated"}, + }, + shimPIDs: map[uint32]struct{}{ + 100: {}, + }, + pid: 200, + ppid: 2, + expected: false, + description: "Process is not a descendant of any shim", + }, + { + name: "circular_reference", + processes: map[uint32]apitypes.Process{ + 100: {PID: 100, PPID: 1, Comm: "shim"}, + 200: {PID: 200, PPID: 300, Comm: "circular1"}, + 300: {PID: 300, PPID: 200, Comm: "circular2"}, + }, + shimPIDs: map[uint32]struct{}{ + 100: {}, + }, + pid: 200, + ppid: 300, + expected: false, + description: "Process is part of a circular reference", + }, + { + name: "process_chain_exceeds_max_depth", + processes: func() map[uint32]apitypes.Process { + // Create a chain where the target process is maxTreeDepth + 1 steps away from any shim + procs := map[uint32]apitypes.Process{ + 1: {PID: 1, PPID: 0, Comm: "init"}, // init process + 2: {PID: 2, PPID: 1, Comm: "shim"}, // shim process + } + // Create a chain starting far from the shim + currentPPID := uint32(100) // Start with a different base to avoid conflicts + targetPID := uint32(100 + maxTreeDepth + 1) + + // Build the chain backwards from target to base + for pid := targetPID; pid > currentPPID; pid-- { + procs[pid] = apitypes.Process{ + PID: pid, + PPID: pid - 1, + Comm: fmt.Sprintf("process-%d", pid), + } + } + // Add the base process that's not connected to shim + procs[currentPPID] = apitypes.Process{ + PID: currentPPID, + PPID: currentPPID - 1, + Comm: fmt.Sprintf("process-%d", currentPPID), + } + return procs + }(), + shimPIDs: map[uint32]struct{}{ + 2: {}, // Shim PID + }, + pid: uint32(100 + maxTreeDepth + 1), // Target process at the end of chain + ppid: uint32(100 + maxTreeDepth), // Its immediate parent + expected: false, + description: "Process chain exceeds maximum allowed depth", + }, + { + name: "multiple_shims", + processes: map[uint32]apitypes.Process{ + 100: {PID: 100, PPID: 1, Comm: "shim1"}, + 101: {PID: 101, PPID: 1, Comm: "shim2"}, + 200: {PID: 200, PPID: 100, Comm: "child1"}, + 201: {PID: 201, PPID: 101, Comm: "child2"}, + }, + shimPIDs: map[uint32]struct{}{ + 100: {}, + 101: {}, + }, + pid: 200, + ppid: 100, + expected: true, + description: "Multiple shims in the system", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + pm := &ProcessManager{} + result := pm.isDescendantOfShim(tc.pid, tc.ppid, tc.shimPIDs, tc.processes) + assert.Equal(t, tc.expected, result, tc.description) + }) + } +} diff --git a/pkg/relevancymanager/v1/relevancy_manager.go b/pkg/relevancymanager/v1/relevancy_manager.go index ce350098..a7d708f8 100644 --- a/pkg/relevancymanager/v1/relevancy_manager.go +++ b/pkg/relevancymanager/v1/relevancy_manager.go @@ -253,7 +253,7 @@ func (rm *RelevancyManager) monitorContainer(ctx context.Context, container *con // adjust ticker after first tick if !watchedContainer.InitialDelayExpired { watchedContainer.InitialDelayExpired = true - watchedContainer.UpdateDataTicker.Reset(rm.cfg.UpdateDataPeriod) + watchedContainer.UpdateDataTicker.Reset(utils.AddJitter(rm.cfg.UpdateDataPeriod, rm.cfg.MaxJitterPercentage)) } // handle collection of relevant data rm.handleRelevancy(ctx, watchedContainer, container.Runtime.ContainerID) @@ -284,7 +284,7 @@ func (rm *RelevancyManager) startRelevancyProcess(ctx context.Context, container watchedContainer := &utils.WatchedContainerData{ ContainerID: container.Runtime.ContainerID, - UpdateDataTicker: time.NewTicker(rm.cfg.InitialDelay), + UpdateDataTicker: time.NewTicker(utils.AddJitter(rm.cfg.InitialDelay, rm.cfg.MaxJitterPercentage)), SyncChannel: make(chan error, 10), K8sContainerID: k8sContainerID, RelevantRelationshipsArtifactsByIdentifier: make(map[string]bool), diff --git a/pkg/rulebindingmanager/cache/cache.go b/pkg/rulebindingmanager/cache/cache.go index 5bd7b841..4efe72fd 100644 --- a/pkg/rulebindingmanager/cache/cache.go +++ b/pkg/rulebindingmanager/cache/cache.go @@ -10,7 +10,6 @@ import ( "github.com/kubescape/go-logger/helpers" "github.com/kubescape/node-agent/pkg/k8sclient" "github.com/kubescape/node-agent/pkg/rulebindingmanager" - "github.com/kubescape/node-agent/pkg/rulebindingmanager/types" typesv1 "github.com/kubescape/node-agent/pkg/rulebindingmanager/types/v1" "github.com/kubescape/node-agent/pkg/ruleengine" ruleenginev1 "github.com/kubescape/node-agent/pkg/ruleengine/v1" @@ -20,6 +19,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" ) var _ rulebindingmanager.RuleBindingCache = (*RBCache)(nil) @@ -84,19 +84,13 @@ func (c *RBCache) AddNotifier(n *chan rulebindingmanager.RuleBindingNotify) { // ------------------ watcher.Watcher methods ----------------------- -func (c *RBCache) AddHandler(ctx context.Context, obj *unstructured.Unstructured) { +func (c *RBCache) AddHandler(ctx context.Context, obj runtime.Object) { var rbs []rulebindingmanager.RuleBindingNotify - switch obj.GetKind() { - case "Pod": - pod, err := unstructuredToPod(obj) - if err != nil { - logger.L().Error("failed to convert unstructured to pod", helpers.Error(err)) - return - } + if pod, ok := obj.(*corev1.Pod); ok { rbs = c.addPod(ctx, pod) - case types.RuntimeRuleBindingAlertKind: - ruleBinding, err := unstructuredToRuleBinding(obj) + } else if un, ok := obj.(*unstructured.Unstructured); ok { + ruleBinding, err := unstructuredToRuleBinding(un) if err != nil { logger.L().Error("failed to convert unstructured to rule binding", helpers.Error(err)) return @@ -110,19 +104,14 @@ func (c *RBCache) AddHandler(ctx context.Context, obj *unstructured.Unstructured } } } -func (c *RBCache) ModifyHandler(ctx context.Context, obj *unstructured.Unstructured) { + +func (c *RBCache) ModifyHandler(ctx context.Context, obj runtime.Object) { var rbs []rulebindingmanager.RuleBindingNotify - switch obj.GetKind() { - case "Pod": - pod, err := unstructuredToPod(obj) - if err != nil { - logger.L().Error("failed to convert unstructured to pod", helpers.Error(err)) - return - } + if pod, ok := obj.(*corev1.Pod); ok { rbs = c.addPod(ctx, pod) - case types.RuntimeRuleBindingAlertKind: - ruleBinding, err := unstructuredToRuleBinding(obj) + } else if un, ok := obj.(*unstructured.Unstructured); ok { + ruleBinding, err := unstructuredToRuleBinding(un) if err != nil { logger.L().Error("failed to convert unstructured to rule binding", helpers.Error(err)) return @@ -136,13 +125,14 @@ func (c *RBCache) ModifyHandler(ctx context.Context, obj *unstructured.Unstructu } } } -func (c *RBCache) DeleteHandler(_ context.Context, obj *unstructured.Unstructured) { + +func (c *RBCache) DeleteHandler(_ context.Context, obj runtime.Object) { var rbs []rulebindingmanager.RuleBindingNotify - switch obj.GetKind() { - case "Pod": - c.deletePod(uniqueName(obj)) - case types.RuntimeRuleBindingAlertKind: - rbs = c.deleteRuleBinding(uniqueName(obj)) + + if pod, ok := obj.(*corev1.Pod); ok { + c.deletePod(uniqueName(pod)) + } else if un, ok := obj.(*unstructured.Unstructured); ok { + rbs = c.deleteRuleBinding(uniqueName(un)) } // notify @@ -389,6 +379,11 @@ func (c *RBCache) createRule(r *typesv1.RuntimeAlertRuleBindingRule) []ruleengin return []ruleengine.RuleEvaluator{} } +// Expose the rule creator to be able to create rules from third party. +func (c *RBCache) GetRuleCreator() ruleengine.RuleCreator { + return c.ruleCreator +} + func diff(a, b []rulebindingmanager.RuleBindingNotify) []rulebindingmanager.RuleBindingNotify { m := make(map[string]rulebindingmanager.RuleBindingNotify) diff := make([]rulebindingmanager.RuleBindingNotify, 0) diff --git a/pkg/rulebindingmanager/cache/cache_test.go b/pkg/rulebindingmanager/cache/cache_test.go index 6423256b..21adc61d 100644 --- a/pkg/rulebindingmanager/cache/cache_test.go +++ b/pkg/rulebindingmanager/cache/cache_test.go @@ -291,18 +291,15 @@ func TestDeleteHandler(t *testing.T) { } tests := []struct { name string - obj *unstructured.Unstructured + obj runtime.Object expected expected }{ { name: "Test with Pod kind", - obj: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "Pod", - "metadata": map[string]interface{}{ - "name": "pod-1", - "namespace": "default", - }, + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1", + Namespace: "default", }, }, expected: expected{ @@ -334,10 +331,8 @@ func TestDeleteHandler(t *testing.T) { allPods: mapset.NewSet[string](tt.expected.pod), } c.DeleteHandler(context.Background(), tt.obj) - if tt.obj.GetKind() == "Pod" { + if _, ok := tt.obj.(*corev1.Pod); ok { assert.False(t, c.allPods.Contains(tt.expected.pod)) - } else if tt.obj.GetKind() == "RuntimeRuleAlertBinding" { - assert.True(t, c.allPods.Contains(tt.expected.pod)) } else { assert.True(t, c.allPods.Contains(tt.expected.pod)) } @@ -352,20 +347,17 @@ func TestModifyHandler(t *testing.T) { } tests := []struct { name string - obj *unstructured.Unstructured + obj runtime.Object expected expected addedPod bool addedRB bool }{ { name: "Test with Pod kind", - obj: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "Pod", - "metadata": map[string]interface{}{ - "name": "pod-1", - "namespace": "default", - }, + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1", + Namespace: "default", }, }, addedPod: true, @@ -464,20 +456,17 @@ func TestAddHandler(t *testing.T) { } tests := []struct { name string - obj *unstructured.Unstructured + obj runtime.Object expected expected addedPod bool addedRB bool }{ { name: "Test with Pod kind", - obj: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "Pod", - "metadata": map[string]interface{}{ - "name": "pod-1", - "namespace": "default", - }, + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1", + Namespace: "default", }, }, addedPod: true, diff --git a/pkg/rulebindingmanager/cache/helpers.go b/pkg/rulebindingmanager/cache/helpers.go index 88226722..d90c0063 100644 --- a/pkg/rulebindingmanager/cache/helpers.go +++ b/pkg/rulebindingmanager/cache/helpers.go @@ -9,7 +9,6 @@ import ( k8sruntime "k8s.io/apimachinery/pkg/runtime" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -34,15 +33,6 @@ func unstructuredToRuleBinding(obj *unstructured.Unstructured) (*typesv1.Runtime return rb, nil } -func unstructuredToPod(obj *unstructured.Unstructured) (*corev1.Pod, error) { - pod := &corev1.Pod{} - if err := k8sruntime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, pod); err != nil { - return nil, err - } - return pod, nil - -} - func resourcesToWatch(nodeName string) []watcher.WatchResource { var w []watcher.WatchResource diff --git a/pkg/rulebindingmanager/cache/helpers_test.go b/pkg/rulebindingmanager/cache/helpers_test.go index 96634471..6aa3974a 100644 --- a/pkg/rulebindingmanager/cache/helpers_test.go +++ b/pkg/rulebindingmanager/cache/helpers_test.go @@ -44,64 +44,6 @@ func TestResourcesToWatch(t *testing.T) { } } -func TestUnstructuredToPod(t *testing.T) { - tests := []struct { - obj *unstructured.Unstructured - name string - wantErr bool - }{ - { - name: "Test with valid pod", - obj: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Pod", - "metadata": map[string]interface{}{ - "name": "pod-1", - "namespace": "default", - }, - "spec": map[string]interface{}{ - "containers": []interface{}{ - map[string]interface{}{ - "name": "container-1", - "image": "image-1", - }, - }, - }, - }, - }, - wantErr: false, - }, - { - name: "Test with invalid pod", - obj: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Pod", - "metadata": map[string]interface{}{ - "name": "pod-1", - "namespace": "default", - }, - "spec": map[string]interface{}{ - "containers": "invalid", - }, - }, - }, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - _, err := unstructuredToPod(tt.obj) - if (err != nil) != tt.wantErr { - t.Errorf("unstructuredToPod() error = %v, wantErr %v", err, tt.wantErr) - return - } - }) - } -} - func TestUnstructuredToRuleBinding(t *testing.T) { tests := []struct { obj *unstructured.Unstructured diff --git a/pkg/ruleengine/ruleengine_interface.go b/pkg/ruleengine/ruleengine_interface.go index 3756c183..d512670f 100644 --- a/pkg/ruleengine/ruleengine_interface.go +++ b/pkg/ruleengine/ruleengine_interface.go @@ -17,11 +17,40 @@ const ( RulePrioritySystemIssue = 1000 ) +type RuleDescriptor struct { + // Rule ID + ID string + // Rule Name + Name string + // Rule Description + Description string + // Priority + Priority int + // Tags + Tags []string + // Rule requirements + Requirements RuleSpec + // Create a rule function + RuleCreationFunc func() RuleEvaluator +} + +func (r *RuleDescriptor) HasTags(tags []string) bool { + for _, tag := range tags { + for _, ruleTag := range r.Tags { + if tag == ruleTag { + return true + } + } + } + return false +} + // RuleCreator is an interface for creating rules by tags, IDs, and names type RuleCreator interface { CreateRulesByTags(tags []string) []RuleEvaluator CreateRuleByID(id string) RuleEvaluator CreateRuleByName(name string) RuleEvaluator + RegisterRule(rule RuleDescriptor) } type RuleEvaluator interface { @@ -32,7 +61,7 @@ type RuleEvaluator interface { Name() string // Rule processing - ProcessEvent(eventType utils.EventType, event interface{}, objCache objectcache.ObjectCache) RuleFailure + ProcessEvent(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) RuleFailure // Rule requirements Requirements() RuleSpec diff --git a/pkg/ruleengine/ruleengine_mock.go b/pkg/ruleengine/ruleengine_mock.go index 0f4894e1..40e46b89 100644 --- a/pkg/ruleengine/ruleengine_mock.go +++ b/pkg/ruleengine/ruleengine_mock.go @@ -25,6 +25,9 @@ func (r *RuleCreatorMock) CreateRuleByName(name string) RuleEvaluator { return &RuleMock{RuleName: name} } +func (r *RuleCreatorMock) RegisterRule(rule RuleDescriptor) { +} + var _ RuleEvaluator = (*RuleMock)(nil) type RuleMock struct { @@ -45,7 +48,7 @@ func (rule *RuleMock) ID() string { func (rule *RuleMock) DeleteRule() { } -func (rule *RuleMock) ProcessEvent(_ utils.EventType, _ interface{}, _ objectcache.ObjectCache) RuleFailure { +func (rule *RuleMock) ProcessEvent(_ utils.EventType, _ utils.K8sEvent, _ objectcache.ObjectCache) RuleFailure { return nil } diff --git a/pkg/ruleengine/v1/README.md b/pkg/ruleengine/v1/README.md deleted file mode 100644 index 4c8eed94..00000000 --- a/pkg/ruleengine/v1/README.md +++ /dev/null @@ -1,16 +0,0 @@ -| ID | Rule | Description | Tags | Priority | Application profile | Parameters | -|-------|-----------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------|----------|---------------------|-----------------------------------------------| -| R0001 | Unexpected process launched | Detecting exec calls that are not whitelisted by application profile | [exec whitelisted] | 10 | true | false | -| R0002 | Unexpected file access | Detecting file access that are not whitelisted by application profile. File access is defined by the combination of path and flags | [open whitelisted] | 5 | true | [ignoreMounts: bool ignorePrefixes: string[]] | -| R0003 | Unexpected system call | Detecting unexpected system calls that are not whitelisted by application profile. Every unexpected system call will be alerted only once. | [syscall whitelisted] | 5 | true | false | -| R0004 | Unexpected capability used | Detecting unexpected capabilities that are not whitelisted by application profile. Every unexpected capability is identified in context of a syscall and will be alerted only once per container. | [capabilities whitelisted] | 8 | true | false | -| R0005 | Unexpected domain request | Detecting unexpected domain requests that are not whitelisted by application profile. | [dns whitelisted] | 5 | true | false | -| R0006 | Unexpected service account token access | Detecting unexpected service account token access that are not whitelisted by application profile. | [token malicious whitelisted] | 8 | true | false | -| R0007 | Kubernetes Client Executed | Detecting exececution of kubernetes client | [exec malicious whitelisted] | 10 | false | false | -| R1000 | Exec from malicious source | Detecting exec calls that are from malicious source like: /dev/shm, /run, /var/run, /proc/self | [exec signature] | 10 | false | false | -| R1001 | Exec Binary Not In Base Image | Detecting exec calls of binaries that are not included in the base image | [exec malicious binary base image] | 10 | false | false | -| R1002 | Kernel Module Load | Detecting Kernel Module Load. | [syscall kernel module load] | 10 | false | false | -| R1003 | Malicious SSH Connection | Detecting ssh connection to disallowed port | [ssh connection port malicious] | 8 | false | false | -| R1004 | Exec from mount | Detecting exec calls from mounted paths. | [exec mount] | 5 | false | false | -| R1006 | Unshare System Call usage | Detecting Unshare System Call usage. | [syscall escape unshare] | 8 | false | false | -| R1007 | Crypto Miners | Detecting Crypto Miners. | [network crypto miners malicious dns] | 8 | false | false | diff --git a/pkg/ruleengine/v1/factory.go b/pkg/ruleengine/v1/factory.go index 2e4f0495..fcc993ab 100644 --- a/pkg/ruleengine/v1/factory.go +++ b/pkg/ruleengine/v1/factory.go @@ -5,12 +5,12 @@ import "github.com/kubescape/node-agent/pkg/ruleengine" var _ ruleengine.RuleCreator = (*RuleCreatorImpl)(nil) type RuleCreatorImpl struct { - ruleDescriptions []RuleDescriptor + ruleDescriptions []ruleengine.RuleDescriptor } func NewRuleCreator() *RuleCreatorImpl { return &RuleCreatorImpl{ - ruleDescriptions: []RuleDescriptor{ + ruleDescriptions: []ruleengine.RuleDescriptor{ R0001UnexpectedProcessLaunchedRuleDescriptor, R0002UnexpectedFileAccessRuleDescriptor, R0003UnexpectedSystemCallRuleDescriptor, @@ -21,6 +21,7 @@ func NewRuleCreator() *RuleCreatorImpl { R0008ReadEnvironmentVariablesProcFSRuleDescriptor, R0009EbpfProgramLoadRuleDescriptor, R0010UnexpectedSensitiveFileAccessRuleDescriptor, + R0011UnexpectedEgressNetworkTrafficRuleDescriptor, R1000ExecFromMaliciousSourceDescriptor, R1001ExecBinaryNotInBaseImageRuleDescriptor, R1002LoadKernelModuleRuleDescriptor, @@ -35,6 +36,7 @@ func NewRuleCreator() *RuleCreatorImpl { R1011LdPreloadHookRuleDescriptor, R1012HardlinkCreatedOverSensitiveFileRuleDescriptor, R1013CryptoMiningFilesAccessRuleDescriptor, + R1015MaliciousPtraceUsageRuleDescriptor, }, } } @@ -67,6 +69,10 @@ func (r *RuleCreatorImpl) CreateRuleByName(name string) ruleengine.RuleEvaluator return nil } -func (r *RuleCreatorImpl) GetAllRuleDescriptors() []RuleDescriptor { +func (r *RuleCreatorImpl) GetAllRuleDescriptors() []ruleengine.RuleDescriptor { return r.ruleDescriptions } + +func (r *RuleCreatorImpl) RegisterRule(rule ruleengine.RuleDescriptor) { + r.ruleDescriptions = append(r.ruleDescriptions, rule) +} diff --git a/pkg/ruleengine/v1/helpers.go b/pkg/ruleengine/v1/helpers.go index 6113137b..3a8b0024 100644 --- a/pkg/ruleengine/v1/helpers.go +++ b/pkg/ruleengine/v1/helpers.go @@ -21,7 +21,6 @@ var SensitiveFiles = []string{ "/etc/ssh/sshd_config", "/etc/ssh/ssh_config", "/etc/pam.d", - "/etc/group", } var ( diff --git a/pkg/ruleengine/v1/mock.go b/pkg/ruleengine/v1/mock.go index 1a79d692..c19bb824 100644 --- a/pkg/ruleengine/v1/mock.go +++ b/pkg/ruleengine/v1/mock.go @@ -11,12 +11,14 @@ import ( var _ objectcache.ApplicationProfileCache = (*RuleObjectCacheMock)(nil) var _ objectcache.K8sObjectCache = (*RuleObjectCacheMock)(nil) var _ objectcache.NetworkNeighborhoodCache = (*RuleObjectCacheMock)(nil) +var _ objectcache.DnsCache = (*RuleObjectCacheMock)(nil) type RuleObjectCacheMock struct { profile *v1beta1.ApplicationProfile podSpec *corev1.PodSpec podStatus *corev1.PodStatus nn *v1beta1.NetworkNeighborhood + dnsCache map[string]string } func (r *RuleObjectCacheMock) GetApplicationProfile(string) *v1beta1.ApplicationProfile { @@ -68,3 +70,19 @@ func (r *RuleObjectCacheMock) GetNetworkNeighborhood(string) *v1beta1.NetworkNei func (r *RuleObjectCacheMock) SetNetworkNeighborhood(nn *v1beta1.NetworkNeighborhood) { r.nn = nn } + +func (r *RuleObjectCacheMock) DnsCache() objectcache.DnsCache { + return r +} + +func (r *RuleObjectCacheMock) SetDnsCache(dnsCache map[string]string) { + r.dnsCache = dnsCache +} + +func (r *RuleObjectCacheMock) ResolveIpToDomain(ip string) string { + if _, ok := r.dnsCache[ip]; ok { + return r.dnsCache[ip] + } + + return "" +} diff --git a/pkg/ruleengine/v1/r0001_unexpected_process_launched.go b/pkg/ruleengine/v1/r0001_unexpected_process_launched.go index 5ea6543a..021fb2bc 100644 --- a/pkg/ruleengine/v1/r0001_unexpected_process_launched.go +++ b/pkg/ruleengine/v1/r0001_unexpected_process_launched.go @@ -21,7 +21,7 @@ const ( R0001Name = "Unexpected process launched" ) -var R0001UnexpectedProcessLaunchedRuleDescriptor = RuleDescriptor{ +var R0001UnexpectedProcessLaunchedRuleDescriptor = ruleengine.RuleDescriptor{ ID: R0001ID, Name: R0001Name, Description: "Detecting exec calls that are not whitelisted by application profile", @@ -75,7 +75,7 @@ func (rule *R0001UnexpectedProcessLaunched) generatePatchCommand(event *tracerex event.GetContainer(), getExecPathFromEvent(event), argList) } -func (rule *R0001UnexpectedProcessLaunched) ProcessEvent(eventType utils.EventType, event interface{}, objectCache objectcache.ObjectCache) ruleengine.RuleFailure { +func (rule *R0001UnexpectedProcessLaunched) ProcessEvent(eventType utils.EventType, event utils.K8sEvent, objectCache objectcache.ObjectCache) ruleengine.RuleFailure { if eventType != utils.ExecveEventType { return nil @@ -117,6 +117,8 @@ func (rule *R0001UnexpectedProcessLaunched) ProcessEvent(eventType utils.EventTy InfectedPID: execEvent.Pid, Arguments: map[string]interface{}{ "retval": execEvent.Retval, + "exec": execPath, + "args": execEvent.Args, }, FixSuggestions: fmt.Sprintf("If this is a valid behavior, please add the exec call \"%s\" to the whitelist in the application profile for the Pod \"%s\". You can use the following command: %s", execPath, execEvent.GetPod(), rule.generatePatchCommand(execEvent, ap)), Severity: R0001UnexpectedProcessLaunchedRuleDescriptor.Priority, @@ -142,7 +144,8 @@ func (rule *R0001UnexpectedProcessLaunched) ProcessEvent(eventType utils.EventTy RuleDescription: fmt.Sprintf("Unexpected process launched: %s in: %s", execPath, execEvent.GetContainer()), }, RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: execEvent.GetPod(), + PodName: execEvent.GetPod(), + PodLabels: execEvent.K8s.PodLabels, }, RuleID: rule.ID(), } diff --git a/pkg/ruleengine/v1/r0002_unexpected_file_access.go b/pkg/ruleengine/v1/r0002_unexpected_file_access.go index 17321270..ca7fd204 100644 --- a/pkg/ruleengine/v1/r0002_unexpected_file_access.go +++ b/pkg/ruleengine/v1/r0002_unexpected_file_access.go @@ -6,6 +6,7 @@ import ( "github.com/kubescape/node-agent/pkg/ruleengine" "github.com/kubescape/node-agent/pkg/utils" + "github.com/kubescape/storage/pkg/registry/file/dynamicpathdetector" "github.com/kubescape/node-agent/pkg/objectcache" @@ -22,7 +23,7 @@ const ( R0002Name = "Unexpected file access" ) -var R0002UnexpectedFileAccessRuleDescriptor = RuleDescriptor{ +var R0002UnexpectedFileAccessRuleDescriptor = ruleengine.RuleDescriptor{ ID: R0002ID, Name: R0002Name, Description: "Detecting file access that are not whitelisted by application profile. File access is defined by the combination of path and flags", @@ -93,7 +94,7 @@ func (rule *R0002UnexpectedFileAccess) generatePatchCommand(event *traceropentyp return fmt.Sprintf(baseTemplate, ap.GetName(), ap.GetNamespace(), event.GetContainer(), event.FullPath, flagList) } -func (rule *R0002UnexpectedFileAccess) ProcessEvent(eventType utils.EventType, event interface{}, objCache objectcache.ObjectCache) ruleengine.RuleFailure { +func (rule *R0002UnexpectedFileAccess) ProcessEvent(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) ruleengine.RuleFailure { if eventType != utils.OpenEventType { return nil } @@ -133,7 +134,7 @@ func (rule *R0002UnexpectedFileAccess) ProcessEvent(eventType utils.EventType, e } for _, open := range appProfileOpenList.Opens { - if open.Path == openEvent.FullPath { + if dynamicpathdetector.CompareDynamic(open.Path, openEvent.FullPath) { found := 0 for _, eventOpenFlag := range openEvent.Flags { // Check that event open flag is in the open.Flags diff --git a/pkg/ruleengine/v1/r0002_unexpected_file_access_test.go b/pkg/ruleengine/v1/r0002_unexpected_file_access_test.go index 64180436..8ecfac38 100644 --- a/pkg/ruleengine/v1/r0002_unexpected_file_access_test.go +++ b/pkg/ruleengine/v1/r0002_unexpected_file_access_test.go @@ -70,6 +70,58 @@ func TestR0002UnexpectedFileAccess(t *testing.T) { t.Errorf("Expected ruleResult to be nil since file is whitelisted") } + e.FullPath = "/var/log/app123.log" + profile = &v1beta1.ApplicationProfile{ + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: "test", + Opens: []v1beta1.OpenCalls{ + { + Path: "/var/log/\u22ef", + Flags: []string{"O_RDONLY"}, + }, + }, + }, + }, + }, + } + objCache.SetApplicationProfile(profile) + r.SetParameters(map[string]interface{}{"ignoreMounts": false, "ignorePrefixes": []interface{}{}}) + ruleResult = r.ProcessEvent(utils.OpenEventType, e, &objCache) + if ruleResult != nil { + t.Errorf("Expected ruleResult to be nil since file matches dynamic path in profile") + } + + // Test with dynamic path but different flags + e.Flags = []string{"O_WRONLY"} + ruleResult = r.ProcessEvent(utils.OpenEventType, e, &objCache) + if ruleResult == nil { + t.Errorf("Expected ruleResult to not be nil since flag is not whitelisted for dynamic path") + } + + // Test with dynamic path but non-matching file + e.FullPath = "/var/log/different_directory/app123.log" + e.Flags = []string{"O_RDONLY"} + ruleResult = r.ProcessEvent(utils.OpenEventType, e, &objCache) + if ruleResult == nil { + t.Errorf("Expected ruleResult to not be nil since file does not match dynamic path structure") + } + + // Test with multiple dynamic segments + e.FullPath = "/var/log/user123/app456.log" + profile.Spec.Containers[0].Opens = []v1beta1.OpenCalls{ + { + Path: "/var/log/\u22ef/\u22ef", + Flags: []string{"O_RDONLY"}, + }, + } + objCache.SetApplicationProfile(profile) + ruleResult = r.ProcessEvent(utils.OpenEventType, e, &objCache) + if ruleResult != nil { + t.Errorf("Expected ruleResult to be nil since file matches multiple dynamic segments in profile") + } + // Test with whitelisted file, but different flags e.Flags = []string{"O_WRONLY"} ruleResult = r.ProcessEvent(utils.OpenEventType, e, &objCache) diff --git a/pkg/ruleengine/v1/r0003_unexpected_system_call.go b/pkg/ruleengine/v1/r0003_unexpected_system_call.go index 22c2bce9..952ea758 100644 --- a/pkg/ruleengine/v1/r0003_unexpected_system_call.go +++ b/pkg/ruleengine/v1/r0003_unexpected_system_call.go @@ -18,7 +18,7 @@ const ( R0003Name = "Unexpected system call" ) -var R0003UnexpectedSystemCallRuleDescriptor = RuleDescriptor{ +var R0003UnexpectedSystemCallRuleDescriptor = ruleengine.RuleDescriptor{ ID: R0003ID, Name: R0003Name, Description: "Detecting unexpected system calls that are not whitelisted by application profile.", @@ -58,7 +58,7 @@ func (rule *R0003UnexpectedSystemCall) ID() string { func (rule *R0003UnexpectedSystemCall) DeleteRule() { } -func (rule *R0003UnexpectedSystemCall) ProcessEvent(eventType utils.EventType, event interface{}, objCache objectcache.ObjectCache) ruleengine.RuleFailure { +func (rule *R0003UnexpectedSystemCall) ProcessEvent(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) ruleengine.RuleFailure { if eventType != utils.SyscallEventType { return nil } @@ -92,7 +92,10 @@ func (rule *R0003UnexpectedSystemCall) ProcessEvent(eventType utils.EventType, e ruleFailure := GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - AlertName: rule.Name(), + AlertName: rule.Name(), + Arguments: map[string]interface{}{ + "syscall": syscallEvent.SyscallName, + }, InfectedPID: syscallEvent.Pid, FixSuggestions: fmt.Sprintf("If this is a valid behavior, please add the system call \"%s\" to the whitelist in the application profile for the Pod \"%s\".", syscallEvent.SyscallName, syscallEvent.GetPod()), Severity: R0003UnexpectedSystemCallRuleDescriptor.Priority, diff --git a/pkg/ruleengine/v1/r0004_unexpected_capability_used.go b/pkg/ruleengine/v1/r0004_unexpected_capability_used.go index dd77fca0..c4d8e906 100644 --- a/pkg/ruleengine/v1/r0004_unexpected_capability_used.go +++ b/pkg/ruleengine/v1/r0004_unexpected_capability_used.go @@ -3,6 +3,7 @@ package ruleengine import ( "fmt" + "github.com/goradd/maps" "github.com/kubescape/node-agent/pkg/objectcache" "github.com/kubescape/node-agent/pkg/ruleengine" "github.com/kubescape/node-agent/pkg/utils" @@ -17,7 +18,7 @@ const ( R0004Name = "Unexpected capability used" ) -var R0004UnexpectedCapabilityUsedRuleDescriptor = RuleDescriptor{ +var R0004UnexpectedCapabilityUsedRuleDescriptor = ruleengine.RuleDescriptor{ ID: R0004ID, Name: R0004Name, Description: "Detecting unexpected capabilities that are not whitelisted by application profile. Every unexpected capability is identified in context of a syscall and will be alerted only once per container.", @@ -34,6 +35,7 @@ var _ ruleengine.RuleEvaluator = (*R0004UnexpectedCapabilityUsed)(nil) type R0004UnexpectedCapabilityUsed struct { BaseRule + alertedCapabilities maps.SafeMap[string, bool] } func CreateRuleR0004UnexpectedCapabilityUsed() *R0004UnexpectedCapabilityUsed { @@ -56,7 +58,7 @@ func (rule *R0004UnexpectedCapabilityUsed) generatePatchCommand(event *tracercap event.GetContainer(), event.Syscall, event.CapName) } -func (rule *R0004UnexpectedCapabilityUsed) ProcessEvent(eventType utils.EventType, event interface{}, objCache objectcache.ObjectCache) ruleengine.RuleFailure { +func (rule *R0004UnexpectedCapabilityUsed) ProcessEvent(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) ruleengine.RuleFailure { if eventType != utils.CapabilitiesEventType { return nil } @@ -76,6 +78,10 @@ func (rule *R0004UnexpectedCapabilityUsed) ProcessEvent(eventType utils.EventTyp return nil } + if rule.alertedCapabilities.Has(capEvent.CapName) { + return nil + } + for _, capability := range appProfileCapabilitiesList.Capabilities { if capEvent.CapName == capability { return nil @@ -84,7 +90,11 @@ func (rule *R0004UnexpectedCapabilityUsed) ProcessEvent(eventType utils.EventTyp ruleFailure := GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - AlertName: rule.Name(), + AlertName: rule.Name(), + Arguments: map[string]interface{}{ + "syscall": capEvent.Syscall, + "capability": capEvent.CapName, + }, InfectedPID: capEvent.Pid, FixSuggestions: fmt.Sprintf("If this is a valid behavior, please add the capability use \"%s\" to the whitelist in the application profile for the Pod \"%s\". You can use the following command: %s", capEvent.CapName, capEvent.GetPod(), rule.generatePatchCommand(capEvent, ap)), Severity: R0004UnexpectedCapabilityUsedRuleDescriptor.Priority, @@ -108,6 +118,8 @@ func (rule *R0004UnexpectedCapabilityUsed) ProcessEvent(eventType utils.EventTyp RuleID: rule.ID(), } + rule.alertedCapabilities.Set(capEvent.CapName, true) + return &ruleFailure } diff --git a/pkg/ruleengine/v1/r0005_unexpected_domain_request.go b/pkg/ruleengine/v1/r0005_unexpected_domain_request.go index 10098f91..7ea82b38 100644 --- a/pkg/ruleengine/v1/r0005_unexpected_domain_request.go +++ b/pkg/ruleengine/v1/r0005_unexpected_domain_request.go @@ -5,6 +5,7 @@ import ( "slices" "strings" + "github.com/goradd/maps" "github.com/kubescape/node-agent/pkg/objectcache" "github.com/kubescape/node-agent/pkg/ruleengine" "github.com/kubescape/node-agent/pkg/utils" @@ -19,7 +20,7 @@ const ( R0005Name = "Unexpected domain request" ) -var R0005UnexpectedDomainRequestRuleDescriptor = RuleDescriptor{ +var R0005UnexpectedDomainRequestRuleDescriptor = ruleengine.RuleDescriptor{ ID: R0005ID, Name: R0005Name, Description: "Detecting unexpected domain requests that are not whitelisted by application profile.", @@ -36,6 +37,7 @@ var _ ruleengine.RuleEvaluator = (*R0005UnexpectedDomainRequest)(nil) type R0005UnexpectedDomainRequest struct { BaseRule + alertedDomains maps.SafeMap[string, bool] } func CreateRuleR0005UnexpectedDomainRequest() *R0005UnexpectedDomainRequest { @@ -45,6 +47,7 @@ func CreateRuleR0005UnexpectedDomainRequest() *R0005UnexpectedDomainRequest { func (rule *R0005UnexpectedDomainRequest) Name() string { return R0005Name } + func (rule *R0005UnexpectedDomainRequest) ID() string { return R0005ID } @@ -58,7 +61,7 @@ func (rule *R0005UnexpectedDomainRequest) generatePatchCommand(event *tracerdnst event.GetContainer(), event.DNSName) } -func (rule *R0005UnexpectedDomainRequest) ProcessEvent(eventType utils.EventType, event interface{}, objCache objectcache.ObjectCache) ruleengine.RuleFailure { +func (rule *R0005UnexpectedDomainRequest) ProcessEvent(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) ruleengine.RuleFailure { if eventType != utils.DnsEventType { return nil } @@ -68,6 +71,10 @@ func (rule *R0005UnexpectedDomainRequest) ProcessEvent(eventType utils.EventType return nil } + if rule.alertedDomains.Has(domainEvent.DNSName) { + return nil + } + // TODO: fix this, currently we are ignoring in-cluster communication if strings.HasSuffix(domainEvent.DNSName, "svc.cluster.local.") { return nil @@ -95,7 +102,10 @@ func (rule *R0005UnexpectedDomainRequest) ProcessEvent(eventType utils.EventType AlertName: rule.Name(), InfectedPID: domainEvent.Pid, Arguments: map[string]interface{}{ - "domain": domainEvent.DNSName, + "domain": domainEvent.DNSName, + "addresses": domainEvent.Addresses, + "protocol": domainEvent.Protocol, + "port": domainEvent.DstPort, }, FixSuggestions: fmt.Sprintf("If this is a valid behavior, please add the domain %s to the whitelist in the application profile for the Pod %s. You can use the following command: %s", domainEvent.DNSName, @@ -105,10 +115,14 @@ func (rule *R0005UnexpectedDomainRequest) ProcessEvent(eventType utils.EventType }, RuntimeProcessDetails: apitypes.ProcessTree{ ProcessTree: apitypes.Process{ - Comm: domainEvent.Comm, - Gid: &domainEvent.Gid, - PID: domainEvent.Pid, - Uid: &domainEvent.Uid, + Comm: domainEvent.Comm, + Gid: &domainEvent.Gid, + PID: domainEvent.Pid, + Uid: &domainEvent.Uid, + Pcomm: domainEvent.Pcomm, + Path: domainEvent.Exepath, + Cwd: domainEvent.Cwd, + PPID: domainEvent.Ppid, }, ContainerID: domainEvent.Runtime.ContainerID, }, @@ -117,11 +131,14 @@ func (rule *R0005UnexpectedDomainRequest) ProcessEvent(eventType utils.EventType RuleDescription: fmt.Sprintf("Unexpected domain communication: %s from: %s", domainEvent.DNSName, domainEvent.GetContainer()), }, RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: domainEvent.GetPod(), + PodName: domainEvent.GetPod(), + PodLabels: domainEvent.K8s.PodLabels, }, RuleID: rule.ID(), } + rule.alertedDomains.Set(domainEvent.DNSName, true) + return &ruleFailure } diff --git a/pkg/ruleengine/v1/r0005_unexpected_domain_request_test.go b/pkg/ruleengine/v1/r0005_unexpected_domain_request_test.go index 2d2f43f8..89844109 100644 --- a/pkg/ruleengine/v1/r0005_unexpected_domain_request_test.go +++ b/pkg/ruleengine/v1/r0005_unexpected_domain_request_test.go @@ -30,6 +30,7 @@ func TestR0005UnexpectedDomainRequest(t *testing.T) { }, }, DNSName: "test.com", + Qr: tracerdnstype.DNSPktTypeQuery, } // Test with nil appProfileAccess @@ -60,5 +61,4 @@ func TestR0005UnexpectedDomainRequest(t *testing.T) { if ruleResult != nil { t.Errorf("Expected ruleResult to be nil since domain is whitelisted") } - } diff --git a/pkg/ruleengine/v1/r0006_unexpected_service_account_token_access.go b/pkg/ruleengine/v1/r0006_unexpected_service_account_token_access.go index b499bd51..7a1c7f0a 100644 --- a/pkg/ruleengine/v1/r0006_unexpected_service_account_token_access.go +++ b/pkg/ruleengine/v1/r0006_unexpected_service_account_token_access.go @@ -27,7 +27,7 @@ var serviceAccountTokenPathsPrefix = []string{ "/var/run/secrets/eks.amazonaws.com/serviceaccount", } -var R0006UnexpectedServiceAccountTokenAccessRuleDescriptor = RuleDescriptor{ +var R0006UnexpectedServiceAccountTokenAccessRuleDescriptor = ruleengine.RuleDescriptor{ ID: R0006ID, Name: R0006Name, Description: "Detecting unexpected access to service account token.", @@ -76,7 +76,7 @@ func (rule *R0006UnexpectedServiceAccountTokenAccess) generatePatchCommand(event event.GetContainer(), event.FullPath, flagList) } -func (rule *R0006UnexpectedServiceAccountTokenAccess) ProcessEvent(eventType utils.EventType, event interface{}, objCache objectcache.ObjectCache) ruleengine.RuleFailure { +func (rule *R0006UnexpectedServiceAccountTokenAccess) ProcessEvent(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) ruleengine.RuleFailure { if eventType != utils.OpenEventType { return nil } @@ -119,7 +119,11 @@ func (rule *R0006UnexpectedServiceAccountTokenAccess) ProcessEvent(eventType uti ruleFailure := GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - AlertName: rule.Name(), + AlertName: rule.Name(), + Arguments: map[string]interface{}{ + "path": openEvent.FullPath, + "flags": openEvent.Flags, + }, InfectedPID: openEvent.Pid, FixSuggestions: fmt.Sprintf("If this is a valid behavior, please add the open call \"%s\" to the whitelist in the application profile for the Pod \"%s\". You can use the following command: %s", openEvent.FullPath, openEvent.GetPod(), rule.generatePatchCommand(openEvent, ap)), Severity: R0006UnexpectedServiceAccountTokenAccessRuleDescriptor.Priority, @@ -138,7 +142,8 @@ func (rule *R0006UnexpectedServiceAccountTokenAccess) ProcessEvent(eventType uti RuleDescription: fmt.Sprintf("Unexpected access to service account token: %s with flags: %s in: %s", openEvent.FullPath, strings.Join(openEvent.Flags, ","), openEvent.GetContainer()), }, RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: openEvent.GetPod(), + PodName: openEvent.GetPod(), + PodLabels: openEvent.K8s.PodLabels, }, RuleID: rule.ID(), } diff --git a/pkg/ruleengine/v1/r0007_kubernetes_client_executed.go b/pkg/ruleengine/v1/r0007_kubernetes_client_executed.go index 26e9ca92..bfecb5f8 100644 --- a/pkg/ruleengine/v1/r0007_kubernetes_client_executed.go +++ b/pkg/ruleengine/v1/r0007_kubernetes_client_executed.go @@ -44,7 +44,7 @@ var kubernetesClients = []string{ "containerd-shim-runc", } -var R0007KubernetesClientExecutedDescriptor = RuleDescriptor{ +var R0007KubernetesClientExecutedDescriptor = ruleengine.RuleDescriptor{ ID: R0007ID, Name: R0007Name, Description: "Detecting exececution of kubernetes client", @@ -97,7 +97,12 @@ func (rule *R0007KubernetesClientExecuted) handleNetworkEvent(event *tracernetwo ruleFailure := GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - AlertName: rule.Name(), + AlertName: rule.Name(), + Arguments: map[string]interface{}{ + "dstIP": event.DstEndpoint.Addr, + "port": event.Port, + "proto": event.Proto, + }, InfectedPID: event.Pid, FixSuggestions: "If this is a legitimate action, please consider removing this workload from the binding of this rule.", Severity: R0007KubernetesClientExecutedDescriptor.Priority, @@ -116,7 +121,8 @@ func (rule *R0007KubernetesClientExecuted) handleNetworkEvent(event *tracernetwo RuleDescription: fmt.Sprintf("Kubernetes client executed: %s", event.Comm), }, RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: event.GetPod(), + PodName: event.GetPod(), + PodLabels: event.K8s.PodLabels, }, RuleID: rule.ID(), } @@ -147,7 +153,8 @@ func (rule *R0007KubernetesClientExecuted) handleExecEvent(event *tracerexectype AlertName: rule.Name(), InfectedPID: event.Pid, Arguments: map[string]interface{}{ - "hardlink": event.ExePath, + "exec": execPath, + "args": event.Args, }, FixSuggestions: "If this is a legitimate action, please consider removing this workload from the binding of this rule.", Severity: R0007KubernetesClientExecutedDescriptor.Priority, @@ -173,7 +180,8 @@ func (rule *R0007KubernetesClientExecuted) handleExecEvent(event *tracerexectype RuleDescription: fmt.Sprintf("Kubernetes client %s was executed in: %s", execPath, event.GetContainer()), }, RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: event.GetPod(), + PodName: event.GetPod(), + PodLabels: event.K8s.PodLabels, }, RuleID: rule.ID(), } @@ -184,7 +192,7 @@ func (rule *R0007KubernetesClientExecuted) handleExecEvent(event *tracerexectype return nil } -func (rule *R0007KubernetesClientExecuted) ProcessEvent(eventType utils.EventType, event interface{}, objCache objectcache.ObjectCache) ruleengine.RuleFailure { +func (rule *R0007KubernetesClientExecuted) ProcessEvent(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) ruleengine.RuleFailure { if eventType != utils.ExecveEventType && eventType != utils.NetworkEventType { return nil } diff --git a/pkg/ruleengine/v1/r0008_read_env_variables_procfs.go b/pkg/ruleengine/v1/r0008_read_env_variables_procfs.go index e60fc483..35f7b736 100644 --- a/pkg/ruleengine/v1/r0008_read_env_variables_procfs.go +++ b/pkg/ruleengine/v1/r0008_read_env_variables_procfs.go @@ -17,7 +17,7 @@ const ( R0008Name = "Read Environment Variables from procfs" ) -var R0008ReadEnvironmentVariablesProcFSRuleDescriptor = RuleDescriptor{ +var R0008ReadEnvironmentVariablesProcFSRuleDescriptor = ruleengine.RuleDescriptor{ ID: R0008ID, Name: R0008Name, Description: "Detecting reading environment variables from procfs.", @@ -52,7 +52,7 @@ func (rule *R0008ReadEnvironmentVariablesProcFS) ID() string { func (rule *R0008ReadEnvironmentVariablesProcFS) DeleteRule() { } -func (rule *R0008ReadEnvironmentVariablesProcFS) ProcessEvent(eventType utils.EventType, event interface{}, objCache objectcache.ObjectCache) ruleengine.RuleFailure { +func (rule *R0008ReadEnvironmentVariablesProcFS) ProcessEvent(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) ruleengine.RuleFailure { if eventType != utils.OpenEventType { return nil } @@ -85,7 +85,11 @@ func (rule *R0008ReadEnvironmentVariablesProcFS) ProcessEvent(eventType utils.Ev ruleFailure := GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - AlertName: rule.Name(), + AlertName: rule.Name(), + Arguments: map[string]interface{}{ + "path": openEvent.FullPath, + "flags": openEvent.Flags, + }, InfectedPID: openEvent.Pid, FixSuggestions: "If this is a legitimate action, please consider removing this workload from the binding of this rule.", Severity: R0008ReadEnvironmentVariablesProcFSRuleDescriptor.Priority, @@ -104,7 +108,8 @@ func (rule *R0008ReadEnvironmentVariablesProcFS) ProcessEvent(eventType utils.Ev RuleDescription: fmt.Sprintf("Reading environment variables from procfs: %s", openEvent.GetContainer()), }, RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: openEvent.GetPod(), + PodName: openEvent.GetPod(), + PodLabels: openEvent.K8s.PodLabels, }, RuleID: rule.ID(), } diff --git a/pkg/ruleengine/v1/r0009_ebpf_program_load.go b/pkg/ruleengine/v1/r0009_ebpf_program_load.go index 922ffa2b..9933e93d 100644 --- a/pkg/ruleengine/v1/r0009_ebpf_program_load.go +++ b/pkg/ruleengine/v1/r0009_ebpf_program_load.go @@ -18,7 +18,7 @@ const ( R0009Name = "eBPF Program Load" ) -var R0009EbpfProgramLoadRuleDescriptor = RuleDescriptor{ +var R0009EbpfProgramLoadRuleDescriptor = ruleengine.RuleDescriptor{ ID: R0009ID, Name: R0009Name, Description: "Detecting eBPF program load.", @@ -55,7 +55,7 @@ func (rule *R0009EbpfProgramLoad) ID() string { func (rule *R0009EbpfProgramLoad) DeleteRule() { } -func (rule *R0009EbpfProgramLoad) ProcessEvent(eventType utils.EventType, event interface{}, objCache objectcache.ObjectCache) ruleengine.RuleFailure { +func (rule *R0009EbpfProgramLoad) ProcessEvent(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) ruleengine.RuleFailure { if rule.alreadyNotified { return nil } @@ -88,7 +88,10 @@ func (rule *R0009EbpfProgramLoad) ProcessEvent(eventType utils.EventType, event rule.alreadyNotified = true ruleFailure := GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - AlertName: rule.Name(), + AlertName: rule.Name(), + Arguments: map[string]interface{}{ + "syscall": syscallEvent.SyscallName, + }, InfectedPID: syscallEvent.Pid, FixSuggestions: "If this is a legitimate action, please consider removing this workload from the binding of this rule", Severity: R0009EbpfProgramLoadRuleDescriptor.Priority, @@ -105,7 +108,8 @@ func (rule *R0009EbpfProgramLoad) ProcessEvent(eventType utils.EventType, event RuleDescription: fmt.Sprintf("bpf system call executed in %s", syscallEvent.GetContainer()), }, RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: syscallEvent.GetPod(), + PodName: syscallEvent.GetPod(), + PodLabels: syscallEvent.K8s.PodLabels, }, RuleID: rule.ID(), } diff --git a/pkg/ruleengine/v1/r0010_unexpected_sensitive_file_access.go b/pkg/ruleengine/v1/r0010_unexpected_sensitive_file_access.go index a3454ef2..1f512cc9 100644 --- a/pkg/ruleengine/v1/r0010_unexpected_sensitive_file_access.go +++ b/pkg/ruleengine/v1/r0010_unexpected_sensitive_file_access.go @@ -2,11 +2,13 @@ package ruleengine import ( "fmt" + "path/filepath" "strings" "github.com/kubescape/node-agent/pkg/objectcache" "github.com/kubescape/node-agent/pkg/ruleengine" "github.com/kubescape/node-agent/pkg/utils" + "github.com/kubescape/storage/pkg/registry/file/dynamicpathdetector" apitypes "github.com/armosec/armoapi-go/armotypes" traceropentype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/open/types" @@ -19,7 +21,7 @@ const ( R0010Name = "Unexpected Sensitive File Access" ) -var R0010UnexpectedSensitiveFileAccessRuleDescriptor = RuleDescriptor{ +var R0010UnexpectedSensitiveFileAccessRuleDescriptor = ruleengine.RuleDescriptor{ ID: R0010ID, Name: R0010Name, Description: "Detecting access to sensitive files.", @@ -76,7 +78,7 @@ func (rule *R0010UnexpectedSensitiveFileAccess) ID() string { func (rule *R0010UnexpectedSensitiveFileAccess) DeleteRule() { } -func (rule *R0010UnexpectedSensitiveFileAccess) ProcessEvent(eventType utils.EventType, event interface{}, objCache objectcache.ObjectCache) ruleengine.RuleFailure { +func (rule *R0010UnexpectedSensitiveFileAccess) ProcessEvent(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) ruleengine.RuleFailure { if eventType != utils.OpenEventType { return nil } @@ -96,27 +98,23 @@ func (rule *R0010UnexpectedSensitiveFileAccess) ProcessEvent(eventType utils.Eve return nil } - isSensitive := false - for _, path := range rule.additionalPaths { - if strings.HasPrefix(openEvent.FullPath, path) { - isSensitive = true - break - } - } - - if !isSensitive { + if !isSensitivePath(openEvent.FullPath, rule.additionalPaths) { return nil } for _, open := range appProfileOpenList.Opens { - if open.Path == openEvent.FullPath { + if dynamicpathdetector.CompareDynamic(open.Path, openEvent.FullPath) { return nil } } ruleFailure := GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - AlertName: rule.Name(), + AlertName: rule.Name(), + Arguments: map[string]interface{}{ + "path": openEvent.FullPath, + "flags": openEvent.Flags, + }, InfectedPID: openEvent.Pid, FixSuggestions: "If this is a legitimate action, please consider removing this workload from the binding of this rule.", Severity: R0010UnexpectedSensitiveFileAccessRuleDescriptor.Priority, @@ -135,7 +133,8 @@ func (rule *R0010UnexpectedSensitiveFileAccess) ProcessEvent(eventType utils.Eve RuleDescription: fmt.Sprintf("Unexpected sensitive file access: %s in: %s", openEvent.FullPath, openEvent.GetContainer()), }, RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: openEvent.GetPod(), + PodName: openEvent.GetPod(), + PodLabels: openEvent.K8s.PodLabels, }, RuleID: rule.ID(), } @@ -148,3 +147,30 @@ func (rule *R0010UnexpectedSensitiveFileAccess) Requirements() ruleengine.RuleSp EventTypes: R0010UnexpectedSensitiveFileAccessRuleDescriptor.Requirements.RequiredEventTypes(), } } + +// isSensitivePath checks if a given path matches or is within any sensitive paths +func isSensitivePath(fullPath string, paths []string) bool { + // Clean the path to handle "..", "//", etc. + fullPath = filepath.Clean(fullPath) + + for _, sensitivePath := range paths { + sensitivePath = filepath.Clean(sensitivePath) + + // Check if the path exactly matches + if fullPath == sensitivePath { + return true + } + + // Check if the path is a directory that contains sensitive files + if strings.HasPrefix(sensitivePath, fullPath+"/") { + return true + } + + // Check if the path is within a sensitive directory + if strings.HasPrefix(fullPath, sensitivePath+"/") { + return true + } + } + + return false +} diff --git a/pkg/ruleengine/v1/r0010_unexpected_sensitive_file_access_test.go b/pkg/ruleengine/v1/r0010_unexpected_sensitive_file_access_test.go index 2665ae96..e45d7f2b 100644 --- a/pkg/ruleengine/v1/r0010_unexpected_sensitive_file_access_test.go +++ b/pkg/ruleengine/v1/r0010_unexpected_sensitive_file_access_test.go @@ -3,25 +3,14 @@ package ruleengine import ( "testing" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/utils" - - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" - traceropentype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/open/types" eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" + "github.com/kubescape/node-agent/pkg/utils" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" ) -func TestR0010UnexpectedSensitiveFileAccess(t *testing.T) { - // Create a new rule - r := CreateRuleR0010UnexpectedSensitiveFileAccess() - // Assert r is not nil - if r == nil { - t.Errorf("Expected r to not be nil") - } - - // Create a file access event - e := &traceropentype.Event{ +func createTestEvent(path string, flags []string) *traceropentype.Event { + return &traceropentype.Event{ Event: eventtypes.Event{ CommonData: eventtypes.CommonData{ K8s: eventtypes.K8sMetadata{ @@ -31,74 +20,127 @@ func TestR0010UnexpectedSensitiveFileAccess(t *testing.T) { }, }, }, - Path: "/test", - FullPath: "/test", - Flags: []string{"O_RDONLY"}, + Path: path, + FullPath: path, + Flags: flags, } +} - // Test with nil appProfileAccess - ruleResult := r.ProcessEvent(utils.OpenEventType, e, &objectcache.ObjectCacheMock{}) - if ruleResult != nil { - t.Errorf("Expected ruleResult to not be nil since no appProfile") +func createTestProfile(containerName string, paths []string, flags []string) *v1beta1.ApplicationProfile { + opens := make([]v1beta1.OpenCalls, len(paths)) + for i, path := range paths { + opens[i] = v1beta1.OpenCalls{ + Path: path, + Flags: flags, + } } - // Test with whitelisted file - objCache := RuleObjectCacheMock{} - profile := objCache.ApplicationProfileCache().GetApplicationProfile("test") - if profile == nil { - profile = &v1beta1.ApplicationProfile{ - Spec: v1beta1.ApplicationProfileSpec{ - Containers: []v1beta1.ApplicationProfileContainer{ - { - Name: "test", - Opens: []v1beta1.OpenCalls{ - { - Path: "/test", - Flags: []string{"O_RDONLY"}, - }, - }, - }, + return &v1beta1.ApplicationProfile{ + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: containerName, + Opens: opens, }, }, - } - objCache.SetApplicationProfile(profile) - } - ruleResult = r.ProcessEvent(utils.OpenEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since file is whitelisted and not sensitive") + }, } +} - // Test with non whitelisted file, but not sensitive - e.FullPath = "/var/test1" - ruleResult = r.ProcessEvent(utils.OpenEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since file is not whitelisted and not sensitive") +func TestR0010UnexpectedSensitiveFileAccess(t *testing.T) { + tests := []struct { + name string + event *traceropentype.Event + profile *v1beta1.ApplicationProfile + additionalPaths []interface{} + expectAlert bool + description string + }{ + { + name: "No application profile", + event: createTestEvent("/test", []string{"O_RDONLY"}), + profile: nil, + expectAlert: false, + description: "Should not alert when no application profile is present", + }, + { + name: "Whitelisted non-sensitive file", + event: createTestEvent("/test", []string{"O_RDONLY"}), + profile: createTestProfile("test", []string{"/test"}, []string{"O_RDONLY"}), + expectAlert: false, + description: "Should not alert for whitelisted non-sensitive file", + }, + { + name: "Non-whitelisted non-sensitive file", + event: createTestEvent("/var/test1", []string{"O_RDONLY"}), + profile: createTestProfile("test", []string{"/test"}, []string{"O_RDONLY"}), + expectAlert: false, + description: "Should not alert for non-whitelisted non-sensitive file", + }, + { + name: "Whitelisted sensitive file", + event: createTestEvent("/etc/shadow", []string{"O_RDONLY"}), + profile: createTestProfile("test", []string{"/etc/shadow"}, []string{"O_RDONLY"}), + expectAlert: false, + description: "Should not alert for whitelisted sensitive file", + }, + { + name: "Non-whitelisted sensitive file", + event: createTestEvent("/etc/shadow", []string{"O_RDONLY"}), + profile: createTestProfile("test", []string{"/test"}, []string{"O_RDONLY"}), + expectAlert: true, + description: "Should alert for non-whitelisted sensitive file", + }, + { + name: "Additional sensitive path", + event: createTestEvent("/etc/custom-sensitive", []string{"O_RDONLY"}), + profile: createTestProfile("test", []string{"/test"}, []string{"O_RDONLY"}), + additionalPaths: []interface{}{"/etc/custom-sensitive"}, + expectAlert: true, + description: "Should alert for non-whitelisted file in additional sensitive paths", + }, + { + name: "Wildcard path match", + event: createTestEvent("/etc/blabla", []string{"O_RDONLY"}), + profile: createTestProfile("test", []string{"/etc/\u22ef"}, []string{"O_RDONLY"}), + expectAlert: false, + description: "Should not alert when path matches wildcard pattern", + }, + { + name: "Path traversal attempt", + event: createTestEvent("/etc/shadow/../passwd", []string{"O_RDONLY"}), + profile: createTestProfile("test", []string{"/test"}, []string{"O_RDONLY"}), + expectAlert: true, + description: "Should alert for path traversal attempts", + }, } - // Test with sensitive file that is whitelisted - e.FullPath = "/etc/shadow" - profile.Spec.Containers[0].Opens[0].Path = "/etc/shadow" - ruleResult = r.ProcessEvent(utils.OpenEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since file is whitelisted and sensitive") - } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rule := CreateRuleR0010UnexpectedSensitiveFileAccess() + if rule == nil { + t.Fatal("Expected rule to not be nil") + } - // Test with sensitive file, but not whitelisted - e.FullPath = "/etc/shadow" - profile.Spec.Containers[0].Opens[0].Path = "/test" - ruleResult = r.ProcessEvent(utils.OpenEventType, e, &objCache) - if ruleResult == nil { - t.Errorf("Expected ruleResult to not be nil since file is not whitelisted and sensitive") - } + objCache := &RuleObjectCacheMock{} + if tt.profile != nil { + objCache.SetApplicationProfile(tt.profile) + } - // Test with sensitive file that originates from additionalPaths parameter - e.FullPath = "/etc/blabla" - profile.Spec.Containers[0].Opens[0].Path = "/test" - additionalPaths := []interface{}{"/etc/blabla"} - r.SetParameters(map[string]interface{}{"additionalPaths": additionalPaths}) - ruleResult = r.ProcessEvent(utils.OpenEventType, e, &objCache) - if ruleResult == nil { - t.Errorf("Expected ruleResult to not be nil since file is not whitelisted and sensitive") - } + if tt.additionalPaths != nil { + rule.SetParameters(map[string]interface{}{ + "additionalPaths": tt.additionalPaths, + }) + } + + result := rule.ProcessEvent(utils.OpenEventType, tt.event, objCache) + if tt.expectAlert && result == nil { + t.Errorf("%s: expected alert but got none", tt.description) + } + if !tt.expectAlert && result != nil { + t.Errorf("%s: expected no alert but got one", tt.description) + } + }) + } } diff --git a/pkg/ruleengine/v1/r0011_unexpected_egress_network_traffic.go b/pkg/ruleengine/v1/r0011_unexpected_egress_network_traffic.go new file mode 100644 index 00000000..c72387d1 --- /dev/null +++ b/pkg/ruleengine/v1/r0011_unexpected_egress_network_traffic.go @@ -0,0 +1,192 @@ +package ruleengine + +import ( + "bytes" + "fmt" + "net" + "slices" + "strings" + + apitypes "github.com/armosec/armoapi-go/armotypes" + "github.com/goradd/maps" + "github.com/kubescape/node-agent/pkg/objectcache" + "github.com/kubescape/node-agent/pkg/ruleengine" + "github.com/kubescape/node-agent/pkg/utils" + + tracernetworktype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/network/types" +) + +const ( + R0011ID = "R0011" + R0011Name = "Unexpected Egress Network Traffic" +) + +var R0011UnexpectedEgressNetworkTrafficRuleDescriptor = ruleengine.RuleDescriptor{ + ID: R0011ID, + Name: R0011Name, + Description: "Detecting unexpected egress network traffic that is not whitelisted by application profile.", + Tags: []string{"dns", "whitelisted", "network"}, + Priority: RulePriorityMed, + Requirements: &RuleRequirements{ + EventTypes: []utils.EventType{utils.NetworkEventType}, + }, + RuleCreationFunc: func() ruleengine.RuleEvaluator { + return CreateRuleR0011UnexpectedEgressNetworkTraffic() + }, +} +var _ ruleengine.RuleEvaluator = (*R0011UnexpectedEgressNetworkTraffic)(nil) + +type R0011UnexpectedEgressNetworkTraffic struct { + BaseRule + alertedAdresses maps.SafeMap[string, bool] +} + +func CreateRuleR0011UnexpectedEgressNetworkTraffic() *R0011UnexpectedEgressNetworkTraffic { + return &R0011UnexpectedEgressNetworkTraffic{} +} + +func (rule *R0011UnexpectedEgressNetworkTraffic) Name() string { + return R0011Name +} +func (rule *R0011UnexpectedEgressNetworkTraffic) ID() string { + return R0011ID +} + +func (rule *R0011UnexpectedEgressNetworkTraffic) DeleteRule() { +} + +func (rule *R0011UnexpectedEgressNetworkTraffic) handleNetworkEvent(networkEvent *tracernetworktype.Event, objCache objectcache.ObjectCache) ruleengine.RuleFailure { + // Check if we already alerted on this endpoint. + endpoint := fmt.Sprintf("%s:%d:%s", networkEvent.DstEndpoint.Addr, networkEvent.Port, networkEvent.Proto) + if ok := rule.alertedAdresses.Has(endpoint); ok { + return nil + } + + // Check if the network event is outgoing and the destination is not a private IP. + if networkEvent.PktType == "OUTGOING" && !isPrivateIP(networkEvent.DstEndpoint.Addr) { + nn := objCache.NetworkNeighborhoodCache().GetNetworkNeighborhood(networkEvent.Runtime.ContainerID) + if nn == nil { + return nil + } + + nnContainer, err := getContainerFromNetworkNeighborhood(nn, networkEvent.GetContainer()) + if err != nil { + return nil + } + + domain := objCache.DnsCache().ResolveIpToDomain(networkEvent.DstEndpoint.Addr) + + if domain != "" { + return nil + } + + // Check if the address is in the egress list and isn't in cluster. + for _, egress := range nnContainer.Egress { + if egress.IPAddress == networkEvent.DstEndpoint.Addr { + return nil + } + + // Check if we seen this dns name before and it's in-cluster address and in the egress list. + if domain != "" && (strings.HasSuffix(domain, "svc.cluster.local.") || slices.Contains(egress.DNSNames, domain)) { + return nil + } + } + + // Alert on the address. + rule.alertedAdresses.Set(endpoint, true) + return &GenericRuleFailure{ + BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ + AlertName: rule.Name(), + InfectedPID: networkEvent.Pid, + Arguments: map[string]interface{}{ + "ip": networkEvent.DstEndpoint.Addr, + "port": networkEvent.Port, + "proto": networkEvent.Proto, + }, + FixSuggestions: fmt.Sprintf("If this is a valid behavior, please add the IP %s to the whitelist in the application profile for the Pod %s.", + networkEvent.DstEndpoint.Addr, + networkEvent.GetPod(), + ), + Severity: R0011UnexpectedEgressNetworkTrafficRuleDescriptor.Priority, + }, + RuntimeProcessDetails: apitypes.ProcessTree{ + ProcessTree: apitypes.Process{ + Comm: networkEvent.Comm, + Gid: &networkEvent.Gid, + PID: networkEvent.Pid, + Uid: &networkEvent.Uid, + }, + ContainerID: networkEvent.Runtime.ContainerID, + }, + TriggerEvent: networkEvent.Event, + RuleAlert: apitypes.RuleAlert{ + RuleDescription: fmt.Sprintf("Unexpected egress network communication to: %s:%d using %s from: %s", networkEvent.DstEndpoint.Addr, networkEvent.Port, networkEvent.Proto, networkEvent.GetContainer()), + }, + RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ + PodName: networkEvent.GetPod(), + PodLabels: networkEvent.K8s.PodLabels, + }, + RuleID: rule.ID(), + } + } + + return nil +} + +func (rule *R0011UnexpectedEgressNetworkTraffic) ProcessEvent(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) ruleengine.RuleFailure { + if eventType != utils.NetworkEventType { + return nil + } + + networkEvent, ok := event.(*tracernetworktype.Event) + if !ok { + return nil + } + return rule.handleNetworkEvent(networkEvent, objCache) + +} + +func (rule *R0011UnexpectedEgressNetworkTraffic) Requirements() ruleengine.RuleSpec { + return &RuleRequirements{ + EventTypes: R0011UnexpectedEgressNetworkTrafficRuleDescriptor.Requirements.RequiredEventTypes(), + } +} + +func isPrivateIP(ip string) bool { + parsedIP := net.ParseIP(ip) + if parsedIP == nil { + return false + } + + // Check if IP is localhost + if parsedIP.IsLoopback() { + return true + } + + // Check if IP is metadata server + if parsedIP.Equal(net.ParseIP("169.254.169.254")) { + return true + } + + // Check if IP is in private IP ranges + privateIPRanges := []struct { + start net.IP + end net.IP + }{ + {net.ParseIP("10.0.0.0"), net.ParseIP("10.255.255.255")}, + {net.ParseIP("172.16.0.0"), net.ParseIP("172.31.255.255")}, + {net.ParseIP("192.168.0.0"), net.ParseIP("192.168.255.255")}, + // Class D (Multicast) + {net.ParseIP("224.0.0.0"), net.ParseIP("239.255.255.255")}, + // Class E (Experimental) + {net.ParseIP("240.0.0.0"), net.ParseIP("255.255.255.255")}, + } + + for _, r := range privateIPRanges { + if bytes.Compare(parsedIP, r.start) >= 0 && bytes.Compare(parsedIP, r.end) <= 0 { + return true + } + } + + return false +} diff --git a/pkg/ruleengine/v1/r0011_unexpected_egress_network_traffic_test.go b/pkg/ruleengine/v1/r0011_unexpected_egress_network_traffic_test.go new file mode 100644 index 00000000..949e8673 --- /dev/null +++ b/pkg/ruleengine/v1/r0011_unexpected_egress_network_traffic_test.go @@ -0,0 +1,139 @@ +package ruleengine + +import ( + "testing" + + "github.com/kubescape/node-agent/pkg/utils" + + tracernetworktype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/network/types" + eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" +) + +func TestR0011UnexpectedNetworkTraffic(t *testing.T) { + // Create a new rule + r := CreateRuleR0011UnexpectedEgressNetworkTraffic() + // Assert r is not nil + if r == nil { + t.Errorf("Expected r to not be nil") + } + + // Create a network request event + e := &tracernetworktype.Event{ + Event: eventtypes.Event{ + CommonData: eventtypes.CommonData{ + K8s: eventtypes.K8sMetadata{ + BasicK8sMetadata: eventtypes.BasicK8sMetadata{ + ContainerName: "test", + }, + }, + }, + }, + PktType: "OUTGOING", + DstEndpoint: eventtypes.L3Endpoint{ + Addr: "1.1.1.1", + }, + Port: 80, + } + + // Test with nil network neighborhood. + ruleResult := r.ProcessEvent(utils.NetworkEventType, e, &RuleObjectCacheMock{}) + if ruleResult != nil { + t.Errorf("Expected ruleResult to not be nil since no appProfile") + } + + // Test with whitelisted address without dns cache. + objCache := RuleObjectCacheMock{} + nn := objCache.NetworkNeighborhoodCache().GetNetworkNeighborhood("test") + if nn == nil { + nn = &v1beta1.NetworkNeighborhood{} + nn.Spec.Containers = append(nn.Spec.Containers, v1beta1.NetworkNeighborhoodContainer{ + Name: "test", + + Egress: []v1beta1.NetworkNeighbor{ + { + DNS: "test.com", + DNSNames: []string{"test.com"}, + IPAddress: "1.1.1.1", + }, + }, + }) + + objCache.SetNetworkNeighborhood(nn) + } + + ruleResult = r.ProcessEvent(utils.NetworkEventType, e, &objCache) + if ruleResult != nil { + t.Errorf("Expected ruleResult to be nil since domain/adress is whitelisted") + } + + // Test with non-whitelisted address without dns cache. + e.DstEndpoint.Addr = "2.2.2.2" + ruleResult = r.ProcessEvent(utils.NetworkEventType, e, &objCache) + if ruleResult == nil { + t.Errorf("Expected ruleResult to not be nil since domain/adress is not whitelisted") + } + + // Test with whitelisted address with dns cache. + objCache.SetDnsCache(map[string]string{"2.2.2.2": "test.com"}) + ruleResult = r.ProcessEvent(utils.NetworkEventType, e, &objCache) + if ruleResult != nil { + t.Errorf("Expected ruleResult to be nil since we are able to resolve the address") + } + + // Test with incoming packet. + e.PktType = "INCOMING" + ruleResult = r.ProcessEvent(utils.NetworkEventType, e, &objCache) + if ruleResult != nil { + t.Errorf("Expected ruleResult to be nil since packet is incoming") + } + + // Test with private address. + e.PktType = "OUTGOING" + e.DstEndpoint.Addr = "10.0.0.1" + ruleResult = r.ProcessEvent(utils.NetworkEventType, e, &objCache) + if ruleResult != nil { + t.Errorf("Expected ruleResult to be nil since address is private") + } + + // Test with non-whitelisted address with dns cache empty. + e.DstEndpoint.Addr = "4.4.4.4" + objCache.SetDnsCache(map[string]string{}) + ruleResult = r.ProcessEvent(utils.NetworkEventType, e, &objCache) + if ruleResult == nil { + t.Errorf("Expected ruleResult to not be nil since we are not able to resolve the address") + } + + // Test with non-whitelisted address with nil dns cache with different port. + e.DstEndpoint.Addr = "5.5.5.5" + e.Port = 443 + ruleResult = r.ProcessEvent(utils.NetworkEventType, e, &objCache) + if ruleResult == nil { + t.Errorf("Expected ruleResult to not be nil since it's not whitelisted") + } + + // Test with non-whitelisted address with nil dns cache with different port. + e.DstEndpoint.Addr = "5.5.5.5" + e.Port = 80 + ruleResult = r.ProcessEvent(utils.NetworkEventType, e, &objCache) + if ruleResult == nil { + t.Errorf("Expected ruleResult to not be nil since it's not whitelisted and it's different port") + } + + // Test with non-whitelisted address with nil dns cache with different port. + e.DstEndpoint.Addr = "5.5.5.5" + e.Port = 80 + ruleResult = r.ProcessEvent(utils.NetworkEventType, e, &objCache) + if ruleResult != nil { + t.Errorf("Expected ruleResult to be nil since we already alerted on this port") + } + + // Test with non-whitelisted address with nil dns cache with different port. + e.DstEndpoint.Addr = "5.5.5.5" + e.Port = 80 + e.Proto = "UDP" + ruleResult = r.ProcessEvent(utils.NetworkEventType, e, &objCache) + if ruleResult == nil { + t.Errorf("Expected ruleResult to not be nil since it's a different protocol") + } +} diff --git a/pkg/ruleengine/v1/r1000_exec_from_malicious_source.go b/pkg/ruleengine/v1/r1000_exec_from_malicious_source.go index b33a3bcc..2be1abf2 100644 --- a/pkg/ruleengine/v1/r1000_exec_from_malicious_source.go +++ b/pkg/ruleengine/v1/r1000_exec_from_malicious_source.go @@ -18,7 +18,7 @@ const ( R1000Name = "Exec from malicious source" ) -var R1000ExecFromMaliciousSourceDescriptor = RuleDescriptor{ +var R1000ExecFromMaliciousSourceDescriptor = ruleengine.RuleDescriptor{ ID: R1000ID, Name: R1000Name, Description: "Detecting exec calls that are from malicious source like: /dev/shm, /proc/self", @@ -49,7 +49,7 @@ func (rule *R1000ExecFromMaliciousSource) ID() string { return R1000ID } -func (rule *R1000ExecFromMaliciousSource) ProcessEvent(eventType utils.EventType, event interface{}, _ objectcache.ObjectCache) ruleengine.RuleFailure { +func (rule *R1000ExecFromMaliciousSource) ProcessEvent(eventType utils.EventType, event utils.K8sEvent, _ objectcache.ObjectCache) ruleengine.RuleFailure { if eventType != utils.ExecveEventType { return nil } @@ -102,7 +102,8 @@ func (rule *R1000ExecFromMaliciousSource) ProcessEvent(eventType utils.EventType RuleDescription: fmt.Sprintf("Execution from malicious source: %s in: %s", execPathDir, execEvent.GetContainer()), }, RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: execEvent.GetPod(), + PodName: execEvent.GetPod(), + PodLabels: execEvent.K8s.PodLabels, }, RuleID: rule.ID(), } diff --git a/pkg/ruleengine/v1/r1001_exec_binary_not_in_base_image.go b/pkg/ruleengine/v1/r1001_exec_binary_not_in_base_image.go index 8c8acd54..f9236509 100644 --- a/pkg/ruleengine/v1/r1001_exec_binary_not_in_base_image.go +++ b/pkg/ruleengine/v1/r1001_exec_binary_not_in_base_image.go @@ -18,7 +18,7 @@ const ( R1001Name = "Exec Binary Not In Base Image" ) -var R1001ExecBinaryNotInBaseImageRuleDescriptor = RuleDescriptor{ +var R1001ExecBinaryNotInBaseImageRuleDescriptor = ruleengine.RuleDescriptor{ ID: R1001ID, Name: R1001Name, Description: "Detecting exec calls of binaries that are not included in the base image", @@ -52,7 +52,7 @@ func (rule *R1001ExecBinaryNotInBaseImage) ID() string { func (rule *R1001ExecBinaryNotInBaseImage) DeleteRule() { } -func (rule *R1001ExecBinaryNotInBaseImage) ProcessEvent(eventType utils.EventType, event interface{}, objectCache objectcache.ObjectCache) ruleengine.RuleFailure { +func (rule *R1001ExecBinaryNotInBaseImage) ProcessEvent(eventType utils.EventType, event utils.K8sEvent, objectCache objectcache.ObjectCache) ruleengine.RuleFailure { if eventType != utils.ExecveEventType { return nil } @@ -97,7 +97,8 @@ func (rule *R1001ExecBinaryNotInBaseImage) ProcessEvent(eventType utils.EventTyp RuleDescription: fmt.Sprintf("Process (%s) was executed in: %s and is not part of the image", execEvent.Comm, execEvent.GetContainer()), }, RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: execEvent.GetPod(), + PodName: execEvent.GetPod(), + PodLabels: execEvent.K8s.PodLabels, }, RuleID: rule.ID(), } diff --git a/pkg/ruleengine/v1/r1002_load_kernel_module.go b/pkg/ruleengine/v1/r1002_load_kernel_module.go index 8d859316..b74dfda4 100644 --- a/pkg/ruleengine/v1/r1002_load_kernel_module.go +++ b/pkg/ruleengine/v1/r1002_load_kernel_module.go @@ -17,7 +17,7 @@ const ( R1002Name = "Kernel Module Load" ) -var R1002LoadKernelModuleRuleDescriptor = RuleDescriptor{ +var R1002LoadKernelModuleRuleDescriptor = ruleengine.RuleDescriptor{ ID: R1002ID, Name: R1002Name, Description: "Detecting Kernel Module Load.", @@ -52,7 +52,7 @@ func (rule *R1002LoadKernelModule) ID() string { func (rule *R1002LoadKernelModule) DeleteRule() { } -func (rule *R1002LoadKernelModule) ProcessEvent(eventType utils.EventType, event interface{}, objCache objectcache.ObjectCache) ruleengine.RuleFailure { +func (rule *R1002LoadKernelModule) ProcessEvent(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) ruleengine.RuleFailure { if rule.alerted { return nil } @@ -89,7 +89,8 @@ func (rule *R1002LoadKernelModule) ProcessEvent(eventType utils.EventType, event RuleDescription: fmt.Sprintf("Kernel module load syscall (%s) was called in: %s", syscallEvent.SyscallName, syscallEvent.GetContainer()), }, RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: syscallEvent.GetPod(), + PodName: syscallEvent.GetPod(), + PodLabels: syscallEvent.K8s.PodLabels, }, RuleID: rule.ID(), } diff --git a/pkg/ruleengine/v1/r1003_malicious_ssh_connection.go b/pkg/ruleengine/v1/r1003_malicious_ssh_connection.go index ae62a3ce..8ea39b0a 100644 --- a/pkg/ruleengine/v1/r1003_malicious_ssh_connection.go +++ b/pkg/ruleengine/v1/r1003_malicious_ssh_connection.go @@ -2,9 +2,13 @@ package ruleengine import ( "fmt" + "os" "slices" + "strconv" + "strings" "github.com/goradd/maps" + "github.com/kubescape/go-logger/helpers" "github.com/kubescape/node-agent/pkg/objectcache" "github.com/kubescape/node-agent/pkg/ruleengine" "github.com/kubescape/node-agent/pkg/utils" @@ -21,7 +25,7 @@ const ( R1003Name = "Malicious SSH Connection" ) -var R1003MaliciousSSHConnectionRuleDescriptor = RuleDescriptor{ +var R1003MaliciousSSHConnectionRuleDescriptor = ruleengine.RuleDescriptor{ ID: R1003ID, Name: R1003Name, Description: "Detecting ssh connection to disallowed port", @@ -39,13 +43,54 @@ var _ ruleengine.RuleEvaluator = (*R1003MaliciousSSHConnection)(nil) type R1003MaliciousSSHConnection struct { BaseRule - allowedPorts []uint16 - requests maps.SafeMap[string, string] // Mapping of src IP to dst IP + allowedPorts []uint16 + ephemeralPortRange [2]uint16 + requests maps.SafeMap[string, string] // Mapping of src IP to dst IP +} + +// ReadPortRange reads the two port numbers from /proc/sys/net/ipv4/ip_local_port_range +func ReadPortRange() ([2]uint16, error) { + // Default port range + var startPort, endPort uint16 = 32768, 60999 + + // Read the contents of the file + data, err := os.ReadFile("/proc/sys/net/ipv4/ip_local_port_range") + if err != nil { + return [2]uint16{startPort, endPort}, fmt.Errorf("failed to read port range file: %v", err) + } + + // Convert the data to a string and split by spaces + ports := strings.Fields(string(data)) + if len(ports) != 2 { + return [2]uint16{startPort, endPort}, fmt.Errorf("unexpected format in port range file") + } + + // Convert the port strings to integers + startPortInt, err := strconv.Atoi(ports[0]) + if err != nil { + return [2]uint16{startPort, endPort}, fmt.Errorf("failed to convert start port: %v", err) + } + + endPortInt, err := strconv.Atoi(ports[1]) + if err != nil { + return [2]uint16{startPort, endPort}, fmt.Errorf("failed to convert end port: %v", err) + } + + if startPortInt < 0 || startPortInt > 65535 || endPortInt < 0 || endPortInt > 65535 { + return [2]uint16{startPort, endPort}, fmt.Errorf("invalid port range") + } + + return [2]uint16{uint16(startPortInt), uint16(endPortInt)}, nil } func CreateRuleR1003MaliciousSSHConnection() *R1003MaliciousSSHConnection { + ephemeralPorts, err := ReadPortRange() + if err != nil { + logger.L().Error("Failed to read port range, setting to default range:", helpers.Error(err)) + } return &R1003MaliciousSSHConnection{ - allowedPorts: []uint16{22}, + allowedPorts: []uint16{22, 2022}, + ephemeralPortRange: ephemeralPorts, } } func (rule *R1003MaliciousSSHConnection) Name() string { @@ -82,13 +127,40 @@ func (rule *R1003MaliciousSSHConnection) SetParameters(params map[string]interfa func (rule *R1003MaliciousSSHConnection) DeleteRule() { } -func (rule *R1003MaliciousSSHConnection) ProcessEvent(eventType utils.EventType, event interface{}, objectCache objectcache.ObjectCache) ruleengine.RuleFailure { +func (rule *R1003MaliciousSSHConnection) ProcessEvent(eventType utils.EventType, event utils.K8sEvent, objectCache objectcache.ObjectCache) ruleengine.RuleFailure { if eventType != utils.SSHEventType { return nil } sshEvent := event.(*tracersshtype.Event) + // Check only outgoing packets (source port is ephemeral) + if sshEvent.SrcPort < rule.ephemeralPortRange[0] || sshEvent.SrcPort > rule.ephemeralPortRange[1] { + return nil + } + + nn := objectCache.NetworkNeighborhoodCache().GetNetworkNeighborhood(sshEvent.Runtime.ContainerID) + if nn == nil { + return nil + } + + nnContainer, err := getContainerFromNetworkNeighborhood(nn, sshEvent.GetContainer()) + if err != nil { + return nil + } + + for _, egress := range nnContainer.Egress { + if egress.IPAddress == sshEvent.DstIP { + for _, port := range egress.Ports { + if port.Port != nil { + if uint16(*port.Port) == sshEvent.DstPort { + return nil + } + } + } + } + } + if !slices.Contains(rule.allowedPorts, sshEvent.DstPort) { // Check if the event is a response to a request we have already seen. if rule.requests.Has(sshEvent.DstIP) { @@ -97,7 +169,13 @@ func (rule *R1003MaliciousSSHConnection) ProcessEvent(eventType utils.EventType, rule.requests.Set(sshEvent.SrcIP, sshEvent.DstIP) ruleFailure := GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - AlertName: rule.Name(), + AlertName: rule.Name(), + Arguments: map[string]interface{}{ + "srcIP": sshEvent.SrcIP, + "dstIP": sshEvent.DstIP, + "dstPort": sshEvent.DstPort, + "srcPort": sshEvent.SrcPort, + }, InfectedPID: sshEvent.Pid, FixSuggestions: "If this is a legitimate action, please add the port as a parameter to the binding of this rule", Severity: R1003MaliciousSSHConnectionRuleDescriptor.Priority, @@ -116,7 +194,8 @@ func (rule *R1003MaliciousSSHConnection) ProcessEvent(eventType utils.EventType, RuleDescription: fmt.Sprintf("SSH connection to disallowed port %s:%d", sshEvent.DstIP, sshEvent.DstPort), }, RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: sshEvent.GetPod(), + PodName: sshEvent.GetPod(), + PodLabels: sshEvent.K8s.PodLabels, }, RuleID: rule.ID(), } diff --git a/pkg/ruleengine/v1/r1003_malicious_ssh_connection_test.go b/pkg/ruleengine/v1/r1003_malicious_ssh_connection_test.go index 628687e3..f61c0b45 100644 --- a/pkg/ruleengine/v1/r1003_malicious_ssh_connection_test.go +++ b/pkg/ruleengine/v1/r1003_malicious_ssh_connection_test.go @@ -5,6 +5,8 @@ import ( tracersshtype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/ssh/types" "github.com/kubescape/node-agent/pkg/utils" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + "k8s.io/utils/ptr" eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" ) @@ -32,17 +34,49 @@ func TestR1003DisallowedSSHConnectionPort_ProcessEvent(t *testing.T) { SrcIP: "1.1.1.1", DstIP: "2.2.2.2", DstPort: 22, - SrcPort: 1234, + SrcPort: 33333, } - failure := rule.ProcessEvent(utils.SSHEventType, sshEvent, &RuleObjectCacheMock{}) + // Test with whitelisted address without dns cache. + objCache := RuleObjectCacheMock{} + nn := objCache.NetworkNeighborhoodCache().GetNetworkNeighborhood("test") + if nn == nil { + nn = &v1beta1.NetworkNeighborhood{} + nn.Spec.Containers = append(nn.Spec.Containers, v1beta1.NetworkNeighborhoodContainer{ + Name: "test", + + Egress: []v1beta1.NetworkNeighbor{ + { + DNS: "test.com", + DNSNames: []string{"test.com"}, + IPAddress: "1.1.1.1", + Ports: []v1beta1.NetworkPort{ + { + Port: ptr.To(int32(2023)), + }, + }, + }, + }, + }) + + objCache.SetNetworkNeighborhood(nn) + } + + failure := rule.ProcessEvent(utils.SSHEventType, sshEvent, &objCache) if failure != nil { t.Errorf("Expected nil since the SSH connection is to an allowed port, got %v", failure) } // Test disallowed port sshEvent.DstPort = 1234 - failure = rule.ProcessEvent(utils.SSHEventType, sshEvent, &RuleObjectCacheMock{}) + failure = rule.ProcessEvent(utils.SSHEventType, sshEvent, &objCache) + if failure == nil { + t.Errorf("Expected failure since the SSH connection is to a disallowed port, got nil") + } + + // Test disallowed port that is in the egress list + sshEvent.DstPort = 2023 + failure = rule.ProcessEvent(utils.SSHEventType, sshEvent, &objCache) if failure == nil { t.Errorf("Expected failure since the SSH connection is to a disallowed port, got nil") } diff --git a/pkg/ruleengine/v1/r1004_exec_from_mount.go b/pkg/ruleengine/v1/r1004_exec_from_mount.go index 6a8a93f7..e266a874 100644 --- a/pkg/ruleengine/v1/r1004_exec_from_mount.go +++ b/pkg/ruleengine/v1/r1004_exec_from_mount.go @@ -18,7 +18,7 @@ const ( R1004Name = "Exec from mount" ) -var R1004ExecFromMountRuleDescriptor = RuleDescriptor{ +var R1004ExecFromMountRuleDescriptor = ruleengine.RuleDescriptor{ ID: R1004ID, Name: R1004Name, Description: "Detecting exec calls from mounted paths.", @@ -50,7 +50,7 @@ func (rule *R1004ExecFromMount) ID() string { func (rule *R1004ExecFromMount) DeleteRule() { } -func (rule *R1004ExecFromMount) ProcessEvent(eventType utils.EventType, event interface{}, objCache objectcache.ObjectCache) ruleengine.RuleFailure { +func (rule *R1004ExecFromMount) ProcessEvent(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) ruleengine.RuleFailure { if eventType != utils.ExecveEventType { return nil } @@ -80,7 +80,8 @@ func (rule *R1004ExecFromMount) ProcessEvent(eventType utils.EventType, event in AlertName: rule.Name(), InfectedPID: execEvent.Pid, Arguments: map[string]interface{}{ - "hardlink": execEvent.ExePath, + "exec": execEvent.ExePath, + "args": execEvent.Args, }, FixSuggestions: "If this is a legitimate action, please consider removing this workload from the binding of this rule", Severity: R1004ExecFromMountRuleDescriptor.Priority, @@ -106,7 +107,8 @@ func (rule *R1004ExecFromMount) ProcessEvent(eventType utils.EventType, event in RuleDescription: fmt.Sprintf("Process (%s) was executed from a mounted path (%s) in: %s", fullPath, mount, execEvent.GetContainer()), }, RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: execEvent.GetPod(), + PodName: execEvent.GetPod(), + PodLabels: execEvent.K8s.PodLabels, }, RuleID: R1004ID, } diff --git a/pkg/ruleengine/v1/r1005_fileless_execution.go b/pkg/ruleengine/v1/r1005_fileless_execution.go index e2438a57..d1645caa 100644 --- a/pkg/ruleengine/v1/r1005_fileless_execution.go +++ b/pkg/ruleengine/v1/r1005_fileless_execution.go @@ -18,7 +18,7 @@ const ( R1005Name = "Fileless Execution" ) -var R1005FilelessExecutionRuleDescriptor = RuleDescriptor{ +var R1005FilelessExecutionRuleDescriptor = ruleengine.RuleDescriptor{ ID: R1005ID, Name: R1005Name, Description: "Detecting Fileless Execution", @@ -54,7 +54,7 @@ func (rule *R1005FilelessExecution) ID() string { func (rule *R1005FilelessExecution) DeleteRule() { } -func (rule *R1005FilelessExecution) ProcessEvent(eventType utils.EventType, event interface{}, _ objectcache.ObjectCache) ruleengine.RuleFailure { +func (rule *R1005FilelessExecution) ProcessEvent(eventType utils.EventType, event utils.K8sEvent, _ objectcache.ObjectCache) ruleengine.RuleFailure { if eventType == utils.ExecveEventType { return rule.handleExecveEvent(event.(*tracerexectype.Event)) } @@ -104,7 +104,8 @@ func (rule *R1005FilelessExecution) handleExecveEvent(execEvent *tracerexectype. RuleDescription: fmt.Sprintf("Fileless execution detected: exec call \"%s\" is from a malicious source \"%s\"", execPathDir, "/proc/self/fd"), }, RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: execEvent.GetPod(), + PodName: execEvent.GetPod(), + PodLabels: execEvent.K8s.PodLabels, }, RuleID: rule.ID(), } diff --git a/pkg/ruleengine/v1/r1006_unshare_system_call.go b/pkg/ruleengine/v1/r1006_unshare_system_call.go index 0b99413d..5440d673 100644 --- a/pkg/ruleengine/v1/r1006_unshare_system_call.go +++ b/pkg/ruleengine/v1/r1006_unshare_system_call.go @@ -17,7 +17,7 @@ const ( R1006Name = "Unshare System Call usage" ) -var R1006UnshareSyscallRuleDescriptor = RuleDescriptor{ +var R1006UnshareSyscallRuleDescriptor = ruleengine.RuleDescriptor{ ID: R1006ID, Name: R1006Name, Description: "Detecting Unshare System Call usage, which can be used to escape container.", @@ -54,7 +54,7 @@ func (rule *R1006UnshareSyscall) ID() string { func (rule *R1006UnshareSyscall) DeleteRule() { } -func (rule *R1006UnshareSyscall) ProcessEvent(eventType utils.EventType, event interface{}, objCache objectcache.ObjectCache) ruleengine.RuleFailure { +func (rule *R1006UnshareSyscall) ProcessEvent(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) ruleengine.RuleFailure { if rule.alreadyNotified { return nil } @@ -91,7 +91,8 @@ func (rule *R1006UnshareSyscall) ProcessEvent(eventType utils.EventType, event i RuleDescription: fmt.Sprintf("unshare system call executed in %s", syscallEvent.GetContainer()), }, RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: syscallEvent.GetPod(), + PodName: syscallEvent.GetPod(), + PodLabels: syscallEvent.K8s.PodLabels, }, RuleID: rule.ID(), } diff --git a/pkg/ruleengine/v1/r1007_xmr_crypto_mining.go b/pkg/ruleengine/v1/r1007_xmr_crypto_mining.go index d9ae8e9d..4083afd9 100644 --- a/pkg/ruleengine/v1/r1007_xmr_crypto_mining.go +++ b/pkg/ruleengine/v1/r1007_xmr_crypto_mining.go @@ -17,7 +17,7 @@ const ( R1007Name = "XMR Crypto Mining Detection" ) -var R1007XMRCryptoMiningRuleDescriptor = RuleDescriptor{ +var R1007XMRCryptoMiningRuleDescriptor = ruleengine.RuleDescriptor{ ID: R1007ID, Name: R1007Name, Description: "Detecting XMR Crypto Miners by randomx algorithm usage.", @@ -54,7 +54,7 @@ func (rule *R1007XMRCryptoMining) ID() string { func (rule *R1007XMRCryptoMining) DeleteRule() { } -func (rule *R1007XMRCryptoMining) ProcessEvent(eventType utils.EventType, event interface{}, _ objectcache.ObjectCache) ruleengine.RuleFailure { +func (rule *R1007XMRCryptoMining) ProcessEvent(eventType utils.EventType, event utils.K8sEvent, _ objectcache.ObjectCache) ruleengine.RuleFailure { if eventType != utils.RandomXEventType { return nil } @@ -85,7 +85,8 @@ func (rule *R1007XMRCryptoMining) ProcessEvent(eventType utils.EventType, event RuleDescription: fmt.Sprintf("XMR Crypto Miner process: (%s) executed in: %s", randomXEvent.ExePath, randomXEvent.GetContainer()), }, RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: randomXEvent.GetPod(), + PodName: randomXEvent.GetPod(), + PodLabels: randomXEvent.K8s.PodLabels, }, RuleID: rule.ID(), } diff --git a/pkg/ruleengine/v1/r1008_crypto_mining_domains.go b/pkg/ruleengine/v1/r1008_crypto_mining_domains.go index 09642c93..0f2bc2da 100644 --- a/pkg/ruleengine/v1/r1008_crypto_mining_domains.go +++ b/pkg/ruleengine/v1/r1008_crypto_mining_domains.go @@ -5,6 +5,7 @@ import ( "log" "slices" + "github.com/goradd/maps" "github.com/kubescape/node-agent/pkg/objectcache" "github.com/kubescape/node-agent/pkg/ruleengine" "github.com/kubescape/node-agent/pkg/utils" diff --git a/pkg/ruleengine/v1/r1009_crypto_mining_port.go b/pkg/ruleengine/v1/r1009_crypto_mining_port.go index 4e837d58..072155b2 100644 --- a/pkg/ruleengine/v1/r1009_crypto_mining_port.go +++ b/pkg/ruleengine/v1/r1009_crypto_mining_port.go @@ -22,7 +22,7 @@ var CommonlyUsedCryptoMinersPorts = []uint16{ 45700, // Monero (XMR) - Stratum mining protocol (TCP). (stratum+tcp://xmr.pool.minergate.com) } -var R1009CryptoMiningRelatedPortRuleDescriptor = RuleDescriptor{ +var R1009CryptoMiningRelatedPortRuleDescriptor = ruleengine.RuleDescriptor{ ID: R1009ID, Name: R1009Name, Description: "Detecting Crypto Miners by suspicious port usage.", @@ -59,16 +59,49 @@ func (rule *R1009CryptoMiningRelatedPort) ID() string { func (rule *R1009CryptoMiningRelatedPort) DeleteRule() { } -func (rule *R1009CryptoMiningRelatedPort) ProcessEvent(eventType utils.EventType, event interface{}, _ objectcache.ObjectCache) ruleengine.RuleFailure { +func (rule *R1009CryptoMiningRelatedPort) ProcessEvent(eventType utils.EventType, event utils.K8sEvent, objectcache objectcache.ObjectCache) ruleengine.RuleFailure { if eventType != utils.NetworkEventType { return nil } + networkEvent, ok := event.(*tracernetworktype.Event) + if !ok { + return nil + } + + nn := objectcache.NetworkNeighborhoodCache().GetNetworkNeighborhood(networkEvent.Runtime.ContainerID) + if nn == nil { + return nil + } + + nnContainer, err := getContainerFromNetworkNeighborhood(nn, networkEvent.GetContainer()) + if err != nil { + return nil + } + + // Check if the port is in the egress list. + for _, nn := range nnContainer.Egress { + for _, port := range nn.Ports { + if port.Port == nil { + continue + } + + if networkEvent.Port == uint16(*port.Port) { + return nil + } + } + } + if networkEvent, ok := event.(*tracernetworktype.Event); ok { if networkEvent.Proto == "TCP" && networkEvent.PktType == "OUTGOING" && slices.Contains(CommonlyUsedCryptoMinersPorts, networkEvent.Port) { ruleFailure := GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - AlertName: rule.Name(), + AlertName: rule.Name(), + Arguments: map[string]interface{}{ + "port": networkEvent.Port, + "proto": networkEvent.Proto, + "ip": networkEvent.DstEndpoint.Addr, + }, InfectedPID: networkEvent.Pid, FixSuggestions: "If this is a legitimate action, please consider removing this workload from the binding of this rule.", Severity: R1009CryptoMiningRelatedPortRuleDescriptor.Priority, @@ -87,7 +120,8 @@ func (rule *R1009CryptoMiningRelatedPort) ProcessEvent(eventType utils.EventType RuleDescription: fmt.Sprintf("Communication on a commonly used crypto mining port: %d in: %s", networkEvent.Port, networkEvent.GetContainer()), }, RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: networkEvent.GetPod(), + PodName: networkEvent.GetPod(), + PodLabels: networkEvent.K8s.PodLabels, }, RuleID: rule.ID(), } diff --git a/pkg/ruleengine/v1/r1009_crypto_mining_port_test.go b/pkg/ruleengine/v1/r1009_crypto_mining_port_test.go index 821374f4..42bde1ac 100644 --- a/pkg/ruleengine/v1/r1009_crypto_mining_port_test.go +++ b/pkg/ruleengine/v1/r1009_crypto_mining_port_test.go @@ -4,9 +4,11 @@ import ( "testing" "github.com/kubescape/node-agent/pkg/utils" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" tracerexectype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/exec/types" tracernetworktype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/network/types" + eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" ) func TestR1009CryptoMiningRelatedPort(t *testing.T) { @@ -28,9 +30,43 @@ func TestR1009CryptoMiningRelatedPort(t *testing.T) { t.Errorf("Expected nil, got %v", result) } + var port int32 = 3334 + + // Test with whitelisted port + objCache := RuleObjectCacheMock{} + nn := objCache.NetworkNeighborhoodCache().GetNetworkNeighborhood("test") + if nn == nil { + nn = &v1beta1.NetworkNeighborhood{} + nn.Spec.Containers = append(nn.Spec.Containers, v1beta1.NetworkNeighborhoodContainer{ + Name: "test", + + Egress: []v1beta1.NetworkNeighbor{ + { + DNS: "test.com", + Ports: []v1beta1.NetworkPort{ + { + Port: &port, + }, + }, + }, + }, + }) + + objCache.SetNetworkNeighborhood(nn) + } + // Test when event meets all conditions to return a ruleFailure eventType = utils.NetworkEventType event = &tracernetworktype.Event{ + Event: eventtypes.Event{ + CommonData: eventtypes.CommonData{ + K8s: eventtypes.K8sMetadata{ + BasicK8sMetadata: eventtypes.BasicK8sMetadata{ + ContainerName: "test", + }, + }, + }, + }, Proto: "TCP", PktType: "OUTGOING", Port: CommonlyUsedCryptoMinersPorts[0], @@ -39,8 +75,25 @@ func TestR1009CryptoMiningRelatedPort(t *testing.T) { Pid: 1, Uid: 1, } - result = rule.ProcessEvent(eventType, event, &RuleObjectCacheMock{}) + result = rule.ProcessEvent(eventType, event, &objCache) if result == nil { t.Errorf("Expected ruleFailure, got nil") } + + // Test when event does not meet conditions to return a ruleFailure + port = 3333 + objCache.nn.Spec.Containers[0].Egress[0].Ports[0].Port = &port + result = rule.ProcessEvent(eventType, event, &objCache) + if result != nil { + t.Errorf("Expected nil, got %v", result) + } + + // Test with nil port in the egress list + port = 0 + objCache.nn.Spec.Containers[0].Egress[0].Ports[0].Port = &port + result = rule.ProcessEvent(eventType, event, &objCache) + if result == nil { + t.Errorf("Expected not nil, got %v", result) + } + } diff --git a/pkg/ruleengine/v1/r1010_symlink_created_over_sensitive_file.go b/pkg/ruleengine/v1/r1010_symlink_created_over_sensitive_file.go index 8a94d12c..8a4b97d3 100644 --- a/pkg/ruleengine/v1/r1010_symlink_created_over_sensitive_file.go +++ b/pkg/ruleengine/v1/r1010_symlink_created_over_sensitive_file.go @@ -21,7 +21,7 @@ const ( R1010Name = "Symlink Created Over Sensitive File" ) -var R1010SymlinkCreatedOverSensitiveFileRuleDescriptor = RuleDescriptor{ +var R1010SymlinkCreatedOverSensitiveFileRuleDescriptor = ruleengine.RuleDescriptor{ ID: R1010ID, Name: R1010Name, Description: "Detecting symlink creation over sensitive files.", @@ -78,7 +78,7 @@ func (rule *R1010SymlinkCreatedOverSensitiveFile) ID() string { func (rule *R1010SymlinkCreatedOverSensitiveFile) DeleteRule() { } -func (rule *R1010SymlinkCreatedOverSensitiveFile) ProcessEvent(eventType utils.EventType, event interface{}, objCache objectcache.ObjectCache) ruleengine.RuleFailure { +func (rule *R1010SymlinkCreatedOverSensitiveFile) ProcessEvent(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) ruleengine.RuleFailure { if eventType != utils.SymlinkEventType { return nil } @@ -98,7 +98,11 @@ func (rule *R1010SymlinkCreatedOverSensitiveFile) ProcessEvent(eventType utils.E if strings.HasPrefix(symlinkEvent.OldPath, path) { return &GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - AlertName: rule.Name(), + AlertName: rule.Name(), + Arguments: map[string]interface{}{ + "oldPath": symlinkEvent.OldPath, + "newPath": symlinkEvent.NewPath, + }, InfectedPID: symlinkEvent.Pid, FixSuggestions: "If this is a legitimate action, please consider removing this workload from the binding of this rule.", Severity: R1010SymlinkCreatedOverSensitiveFileRuleDescriptor.Priority, @@ -121,7 +125,8 @@ func (rule *R1010SymlinkCreatedOverSensitiveFile) ProcessEvent(eventType utils.E RuleDescription: fmt.Sprintf("Symlink created over sensitive file: %s - %s in: %s", symlinkEvent.OldPath, symlinkEvent.NewPath, symlinkEvent.GetContainer()), }, RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: symlinkEvent.GetPod(), + PodName: symlinkEvent.GetPod(), + PodLabels: symlinkEvent.K8s.PodLabels, }, RuleID: rule.ID(), } diff --git a/pkg/ruleengine/v1/r1011_ld_preload_hook.go b/pkg/ruleengine/v1/r1011_ld_preload_hook.go index 1a529d04..30041aac 100644 --- a/pkg/ruleengine/v1/r1011_ld_preload_hook.go +++ b/pkg/ruleengine/v1/r1011_ld_preload_hook.go @@ -25,7 +25,7 @@ const ( var LD_PRELOAD_ENV_VARS = []string{"LD_PRELOAD", "LD_AUDIT", "LD_LIBRARY_PATH"} -var R1011LdPreloadHookRuleDescriptor = RuleDescriptor{ +var R1011LdPreloadHookRuleDescriptor = ruleengine.RuleDescriptor{ ID: R1011ID, Name: R1011Name, Description: "Detecting ld_preload hook techniques.", @@ -111,6 +111,7 @@ func (rule *R1011LdPreloadHook) handleExecEvent(execEvent *tracerexectype.Event, ruleFailure := GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ AlertName: rule.Name(), + Arguments: map[string]interface{}{"envVar": ldHookVar}, InfectedPID: execEvent.Pid, FixSuggestions: fmt.Sprintf("Check the environment variable %s", ldHookVar), Severity: R1011LdPreloadHookRuleDescriptor.Priority, @@ -136,7 +137,8 @@ func (rule *R1011LdPreloadHook) handleExecEvent(execEvent *tracerexectype.Event, RuleDescription: fmt.Sprintf("Process (%s) was executed in: %s and is using the environment variable %s", execEvent.Comm, execEvent.GetContainer(), fmt.Sprintf("%s=%s", ldHookVar, envVars[ldHookVar])), }, RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: execEvent.GetPod(), + PodName: execEvent.GetPod(), + PodLabels: execEvent.K8s.PodLabels, }, RuleID: rule.ID(), } @@ -151,7 +153,11 @@ func (rule *R1011LdPreloadHook) handleOpenEvent(openEvent *traceropentype.Event) if openEvent.FullPath == LD_PRELOAD_FILE && (openEvent.FlagsRaw&(int32(os.O_WRONLY)|int32(os.O_RDWR))) != 0 { ruleFailure := GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - AlertName: rule.Name(), + AlertName: rule.Name(), + Arguments: map[string]interface{}{ + "path": openEvent.FullPath, + "flags": openEvent.Flags, + }, InfectedPID: openEvent.Pid, FixSuggestions: "Check the file /etc/ld.so.preload", Severity: R1011LdPreloadHookRuleDescriptor.Priority, @@ -170,7 +176,8 @@ func (rule *R1011LdPreloadHook) handleOpenEvent(openEvent *traceropentype.Event) RuleDescription: fmt.Sprintf("Process (%s) was executed in: %s and is opening the file %s", openEvent.Comm, openEvent.GetContainer(), openEvent.Path), }, RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: openEvent.GetPod(), + PodName: openEvent.GetPod(), + PodLabels: openEvent.K8s.PodLabels, }, RuleID: rule.ID(), } @@ -181,7 +188,7 @@ func (rule *R1011LdPreloadHook) handleOpenEvent(openEvent *traceropentype.Event) return nil } -func (rule *R1011LdPreloadHook) ProcessEvent(eventType utils.EventType, event interface{}, objectCache objectcache.ObjectCache) ruleengine.RuleFailure { +func (rule *R1011LdPreloadHook) ProcessEvent(eventType utils.EventType, event utils.K8sEvent, objectCache objectcache.ObjectCache) ruleengine.RuleFailure { if eventType != utils.ExecveEventType && eventType != utils.OpenEventType { return nil } diff --git a/pkg/ruleengine/v1/r1012_hardlink_created_over_sensitive_file.go b/pkg/ruleengine/v1/r1012_hardlink_created_over_sensitive_file.go index 17334224..2458929a 100644 --- a/pkg/ruleengine/v1/r1012_hardlink_created_over_sensitive_file.go +++ b/pkg/ruleengine/v1/r1012_hardlink_created_over_sensitive_file.go @@ -21,7 +21,7 @@ const ( R1012Name = "Hardlink Created Over Sensitive File" ) -var R1012HardlinkCreatedOverSensitiveFileRuleDescriptor = RuleDescriptor{ +var R1012HardlinkCreatedOverSensitiveFileRuleDescriptor = ruleengine.RuleDescriptor{ ID: R1012ID, Name: R1012Name, Description: "Detecting hardlink creation over sensitive files.", @@ -78,7 +78,7 @@ func (rule *R1012HardlinkCreatedOverSensitiveFile) ID() string { func (rule *R1012HardlinkCreatedOverSensitiveFile) DeleteRule() { } -func (rule *R1012HardlinkCreatedOverSensitiveFile) ProcessEvent(eventType utils.EventType, event interface{}, objCache objectcache.ObjectCache) ruleengine.RuleFailure { +func (rule *R1012HardlinkCreatedOverSensitiveFile) ProcessEvent(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) ruleengine.RuleFailure { if eventType != utils.HardlinkEventType { return nil } @@ -98,7 +98,11 @@ func (rule *R1012HardlinkCreatedOverSensitiveFile) ProcessEvent(eventType utils. if strings.HasPrefix(hardlinkEvent.OldPath, path) { return &GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - AlertName: rule.Name(), + AlertName: rule.Name(), + Arguments: map[string]interface{}{ + "oldPath": hardlinkEvent.OldPath, + "newPath": hardlinkEvent.NewPath, + }, InfectedPID: hardlinkEvent.Pid, FixSuggestions: "If this is a legitimate action, please consider removing this workload from the binding of this rule.", Severity: R1012HardlinkCreatedOverSensitiveFileRuleDescriptor.Priority, @@ -121,7 +125,8 @@ func (rule *R1012HardlinkCreatedOverSensitiveFile) ProcessEvent(eventType utils. RuleDescription: fmt.Sprintf("Hardlink created over sensitive file: %s - %s in: %s", hardlinkEvent.OldPath, hardlinkEvent.NewPath, hardlinkEvent.GetContainer()), }, RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: hardlinkEvent.GetPod(), + PodName: hardlinkEvent.GetPod(), + PodLabels: hardlinkEvent.K8s.PodLabels, }, RuleID: rule.ID(), } diff --git a/pkg/ruleengine/v1/r1015_malicious_ptrace_usage.go b/pkg/ruleengine/v1/r1015_malicious_ptrace_usage.go new file mode 100644 index 00000000..42c311ea --- /dev/null +++ b/pkg/ruleengine/v1/r1015_malicious_ptrace_usage.go @@ -0,0 +1,123 @@ +package ruleengine + +import ( + "fmt" + + "github.com/kubescape/node-agent/pkg/objectcache" + "github.com/kubescape/node-agent/pkg/ruleengine" + "github.com/kubescape/node-agent/pkg/utils" + + apitypes "github.com/armosec/armoapi-go/armotypes" + "github.com/kubescape/go-logger" + "github.com/kubescape/go-logger/helpers" + + tracerptracetype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/ptrace/tracer/types" +) + +const ( + R1015ID = "R1015" + R1015Name = "Malicious Ptrace Usage" +) + +var R1015MaliciousPtraceUsageRuleDescriptor = ruleengine.RuleDescriptor{ + ID: R1015ID, + Name: R1015Name, + Description: "Detecting potentially malicious ptrace usage.", + Tags: []string{"process", "malicious"}, + Priority: RulePriorityHigh, + Requirements: &RuleRequirements{ + EventTypes: []utils.EventType{ + utils.PtraceEventType, + }, + }, + RuleCreationFunc: func() ruleengine.RuleEvaluator { + return CreateRuleR1015MaliciousPtraceUsage() + }, +} +var _ ruleengine.RuleEvaluator = (*R1015MaliciousPtraceUsage)(nil) + +type R1015MaliciousPtraceUsage struct { + BaseRule + allowedProcesses []string +} + +func CreateRuleR1015MaliciousPtraceUsage() *R1015MaliciousPtraceUsage { + return &R1015MaliciousPtraceUsage{ + allowedProcesses: []string{}, + } +} + +func (rule *R1015MaliciousPtraceUsage) SetParameters(parameters map[string]interface{}) { + rule.BaseRule.SetParameters(parameters) + + allowedProcessesInterface := rule.GetParameters()["allowedProcesses"] + if allowedProcessesInterface == nil { + return + } + + allowedProcesses, ok := interfaceToStringSlice(allowedProcessesInterface) + if ok { + for _, process := range allowedProcesses { + rule.allowedProcesses = append(rule.allowedProcesses, fmt.Sprintf("%v", process)) + } + } else { + logger.L().Warning("failed to convert allowedProcesses to []string", helpers.String("ruleID", rule.ID())) + } +} + +func (rule *R1015MaliciousPtraceUsage) Name() string { + return R1015Name +} + +func (rule *R1015MaliciousPtraceUsage) ID() string { + return R1015ID +} + +func (rule *R1015MaliciousPtraceUsage) DeleteRule() { +} + +func (rule *R1015MaliciousPtraceUsage) ProcessEvent(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) ruleengine.RuleFailure { + if eventType != utils.PtraceEventType { + return nil + } + + ptraceEvent, ok := event.(*tracerptracetype.Event) + if !ok { + return nil + } + + return &GenericRuleFailure{ + BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ + AlertName: rule.Name(), + InfectedPID: ptraceEvent.Pid, + FixSuggestions: "Consider reviewing the application usage of ptrace.", + Severity: R1015MaliciousPtraceUsageRuleDescriptor.Priority, + }, + RuntimeProcessDetails: apitypes.ProcessTree{ + ProcessTree: apitypes.Process{ + Comm: ptraceEvent.Comm, + PPID: ptraceEvent.PPid, + PID: ptraceEvent.Pid, + Uid: &ptraceEvent.Uid, + Gid: &ptraceEvent.Gid, + Path: ptraceEvent.ExePath, + }, + ContainerID: ptraceEvent.Runtime.ContainerID, + }, + TriggerEvent: ptraceEvent.Event, + RuleAlert: apitypes.RuleAlert{ + RuleDescription: fmt.Sprintf("Malicious ptrace usage detected from: %s on PID: %d", ptraceEvent.Comm, ptraceEvent.Pid), + }, + RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ + PodName: ptraceEvent.GetPod(), + PodLabels: ptraceEvent.K8s.PodLabels, + }, + RuleID: rule.ID(), + } +} + +func (rule *R1015MaliciousPtraceUsage) Requirements() ruleengine.RuleSpec { + return &RuleRequirements{ + EventTypes: R1015MaliciousPtraceUsageRuleDescriptor.Requirements.RequiredEventTypes(), + } +} diff --git a/pkg/ruleengine/v1/r1015_malicious_ptrace_usage_test.go b/pkg/ruleengine/v1/r1015_malicious_ptrace_usage_test.go new file mode 100644 index 00000000..7bba3b7d --- /dev/null +++ b/pkg/ruleengine/v1/r1015_malicious_ptrace_usage_test.go @@ -0,0 +1,110 @@ +package ruleengine + +import ( + "testing" + + eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" + "github.com/kubescape/node-agent/pkg/utils" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + + tracerptracetype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/ptrace/tracer/types" +) + +const ( + // Define the ptrace constants + PTRACE_SETREGS = 13 + PTRACE_POKETEXT = 4 + PTRACE_POKEDATA = 5 +) + +func TestR1015MaliciousPtraceUsage(t *testing.T) { + // Create a new rule + r := CreateRuleR1015MaliciousPtraceUsage() // Assert r is not nil + if r == nil { + t.Errorf("Expected r to not be nil") + } + + objCache := RuleObjectCacheMock{} + profile := objCache.ApplicationProfileCache().GetApplicationProfile("test") + if profile == nil { + profile = &v1beta1.ApplicationProfile{ + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: "test", + Opens: []v1beta1.OpenCalls{ + { + Path: "/test", + Flags: []string{"O_RDONLY"}, + }, + }, + Execs: []v1beta1.ExecCalls{ + { + Path: "/usr/sbin/groupadd", + Args: []string{"test"}, + }, + }, + }, + }, + }, + } + objCache.SetApplicationProfile(profile) + } + + // Create a ptrace event for a disallowed request (malicious request) + e := &tracerptracetype.Event{ + Event: eventtypes.Event{ + CommonData: eventtypes.CommonData{ + K8s: eventtypes.K8sMetadata{ + BasicK8sMetadata: eventtypes.BasicK8sMetadata{ + ContainerName: "test", + }, + }, + }, + }, + Comm: "malicious_process", + Pid: 1234, + PPid: 5678, + Uid: 1000, + Gid: 1000, + ExePath: "/path/to/malicious_process", + Request: PTRACE_SETREGS, // Malicious ptrace request + } + + ruleResult := r.ProcessEvent(utils.PtraceEventType, e, &objCache) + if ruleResult == nil { + t.Errorf("Expected ruleResult to be Failure because of malicious ptrace request: %d", e.Request) + return + } + + // Check that the ruleResult contains the expected details + genericRuleFailure, ok := ruleResult.(*GenericRuleFailure) + if !ok { + t.Errorf("Expected ruleResult to be of type GenericRuleFailure") + return + } + + if genericRuleFailure.BaseRuntimeAlert.AlertName != r.Name() { + t.Errorf("Expected AlertName to be %s, got %s", r.Name(), genericRuleFailure.BaseRuntimeAlert.AlertName) + } + if genericRuleFailure.BaseRuntimeAlert.InfectedPID != e.Pid { + t.Errorf("Expected InfectedPID to be %d, got %d", e.Pid, genericRuleFailure.BaseRuntimeAlert.InfectedPID) + } + + // Test with a disallowed request but recognized process + e.Comm = "processA" // Allowed process + e.Request = PTRACE_POKETEXT // Malicious ptrace request + ruleResult = r.ProcessEvent(utils.PtraceEventType, e, &objCache) + if ruleResult == nil { + t.Errorf("Expected ruleResult to be Failure because of malicious ptrace request: %d, even though process is allowed", e.Request) + return + } + + // Test with an unrecognized process and malicious request + e.Comm = "unknown_process" + e.Request = PTRACE_POKEDATA // Malicious ptrace request + ruleResult = r.ProcessEvent(utils.PtraceEventType, e, &objCache) + if ruleResult == nil { + t.Errorf("Expected ruleResult to be Failure because of unknown process with malicious ptrace request: %d", e.Request) + } +} diff --git a/pkg/ruleengine/v1/rule.go b/pkg/ruleengine/v1/rule.go index 2a084263..8c198ab6 100644 --- a/pkg/ruleengine/v1/rule.go +++ b/pkg/ruleengine/v1/rule.go @@ -16,34 +16,6 @@ const ( RulePrioritySystemIssue = 1000 ) -type RuleDescriptor struct { - // Rule ID - ID string - // Rule Name - Name string - // Rule Description - Description string - // Priority - Priority int - // Tags - Tags []string - // Rule requirements - Requirements ruleengine.RuleSpec - // Create a rule function - RuleCreationFunc func() ruleengine.RuleEvaluator -} - -func (r *RuleDescriptor) HasTags(tags []string) bool { - for _, tag := range tags { - for _, ruleTag := range r.Tags { - if tag == ruleTag { - return true - } - } - } - return false -} - var _ ruleengine.RuleSpec = (*RuleRequirements)(nil) type RuleRequirements struct { diff --git a/pkg/rulemanager/rule_manager_interface.go b/pkg/rulemanager/rule_manager_interface.go index 52ae18c6..76f71de8 100644 --- a/pkg/rulemanager/rule_manager_interface.go +++ b/pkg/rulemanager/rule_manager_interface.go @@ -1,17 +1,9 @@ package rulemanager import ( - tracerhardlinktype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/hardlink/types" - tracerrandomxtype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/randomx/types" - tracersshtype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/ssh/types" - tracersymlinktype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/symlink/types" + "github.com/kubescape/node-agent/pkg/utils" containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" - tracercapabilitiestype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/capabilities/types" - tracerdnstype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/dns/types" - tracerexectype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/exec/types" - tracernetworktype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/network/types" - traceropentype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/open/types" v1 "k8s.io/api/core/v1" ) @@ -19,15 +11,7 @@ import ( type RuleManagerClient interface { ContainerCallback(notif containercollection.PubSubEvent) RegisterPeekFunc(peek func(mntns uint64) ([]string, error)) - ReportCapability(event tracercapabilitiestype.Event) - ReportFileExec(event tracerexectype.Event) - ReportFileOpen(event traceropentype.Event) - ReportNetworkEvent(event tracernetworktype.Event) - ReportDNSEvent(event tracerdnstype.Event) - ReportRandomxEvent(event tracerrandomxtype.Event) - ReportSymlinkEvent(event tracersymlinktype.Event) - ReportHardlinkEvent(event tracerhardlinktype.Event) - ReportSSHEvent(event tracersshtype.Event) + ReportEvent(eventType utils.EventType, event utils.K8sEvent) HasApplicableRuleBindings(namespace, name string) bool HasFinalApplicationProfile(pod *v1.Pod) bool IsContainerMonitored(k8sContainerID string) bool diff --git a/pkg/rulemanager/rule_manager_mock.go b/pkg/rulemanager/rule_manager_mock.go index 08d0cbd1..3cf78d59 100644 --- a/pkg/rulemanager/rule_manager_mock.go +++ b/pkg/rulemanager/rule_manager_mock.go @@ -1,17 +1,9 @@ package rulemanager import ( - tracerhardlinktype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/hardlink/types" - tracerrandomxtype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/randomx/types" - tracersshtype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/ssh/types" - tracersymlinktype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/symlink/types" + "github.com/kubescape/node-agent/pkg/utils" containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" - tracercapabilitiestype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/capabilities/types" - tracerdnstype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/dns/types" - tracerexectype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/exec/types" - tracernetworktype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/network/types" - traceropentype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/open/types" v1 "k8s.io/api/core/v1" ) @@ -32,39 +24,7 @@ func (r *RuleManagerMock) RegisterPeekFunc(_ func(mntns uint64) ([]string, error // noop } -func (r *RuleManagerMock) ReportCapability(_ tracercapabilitiestype.Event) { - // noop -} - -func (r *RuleManagerMock) ReportFileExec(_ tracerexectype.Event) { - // noop -} - -func (r *RuleManagerMock) ReportFileOpen(_ traceropentype.Event) { - // noop -} - -func (r *RuleManagerMock) ReportNetworkEvent(_ tracernetworktype.Event) { - // noop -} - -func (r *RuleManagerMock) ReportDNSEvent(_ tracerdnstype.Event) { - // noop -} - -func (r *RuleManagerMock) ReportRandomxEvent(_ tracerrandomxtype.Event) { - // noop -} - -func (r *RuleManagerMock) ReportSymlinkEvent(_ tracersymlinktype.Event) { - // noop -} - -func (r *RuleManagerMock) ReportHardlinkEvent(_ tracerhardlinktype.Event) { - // noop -} - -func (r *RuleManagerMock) ReportSSHEvent(_ tracersshtype.Event) { +func (r *RuleManagerMock) ReportEvent(_ utils.EventType, _ utils.K8sEvent) { // noop } diff --git a/pkg/rulemanager/v1/rule_manager.go b/pkg/rulemanager/v1/rule_manager.go index 92c3b3f4..88e0800a 100644 --- a/pkg/rulemanager/v1/rule_manager.go +++ b/pkg/rulemanager/v1/rule_manager.go @@ -10,6 +10,7 @@ import ( "github.com/kubescape/node-agent/pkg/config" "github.com/kubescape/node-agent/pkg/exporters" "github.com/kubescape/node-agent/pkg/k8sclient" + "github.com/kubescape/node-agent/pkg/processmanager" "github.com/kubescape/node-agent/pkg/ruleengine" "github.com/kubescape/node-agent/pkg/rulemanager" "github.com/kubescape/node-agent/pkg/utils" @@ -26,20 +27,11 @@ import ( "github.com/kubescape/node-agent/pkg/metricsmanager" "github.com/kubescape/node-agent/pkg/objectcache" - tracerhardlinktype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/hardlink/types" - tracerrandomxtype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/randomx/types" - tracersshtype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/ssh/types" - tracersymlinktype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/symlink/types" ruleenginetypes "github.com/kubescape/node-agent/pkg/ruleengine/types" mapset "github.com/deckarep/golang-set/v2" "github.com/goradd/maps" containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" - tracercapabilitiestype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/capabilities/types" - tracerdnstype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/dns/types" - tracerexectype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/exec/types" - tracernetworktype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/network/types" - traceropentype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/open/types" eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -51,6 +43,11 @@ import ( storageUtils "github.com/kubescape/storage/pkg/utils" ) +const ( + // Max file size to calculate hash is 50MB. + maxFileSize int64 = 50 * 1024 * 1024 +) + type RuleManager struct { cfg config.Config watchedContainerChannels maps.SafeMap[string, chan error] // key is k8sContainerID @@ -68,11 +65,12 @@ type RuleManager struct { clusterName string containerIdToShimPid maps.SafeMap[string, uint32] containerIdToPid maps.SafeMap[string, uint32] + processManager processmanager.ProcessManagerClient } var _ rulemanager.RuleManagerClient = (*RuleManager)(nil) -func CreateRuleManager(ctx context.Context, cfg config.Config, k8sClient k8sclient.K8sClientInterface, ruleBindingCache bindingcache.RuleBindingCache, objectCache objectcache.ObjectCache, exporter exporters.Exporter, metrics metricsmanager.MetricsManager, nodeName string, clusterName string) (*RuleManager, error) { +func CreateRuleManager(ctx context.Context, cfg config.Config, k8sClient k8sclient.K8sClientInterface, ruleBindingCache bindingcache.RuleBindingCache, objectCache objectcache.ObjectCache, exporter exporters.Exporter, metrics metricsmanager.MetricsManager, nodeName string, clusterName string, processManager processmanager.ProcessManagerClient) (*RuleManager, error) { return &RuleManager{ cfg: cfg, ctx: ctx, @@ -85,6 +83,7 @@ func CreateRuleManager(ctx context.Context, cfg config.Config, k8sClient k8sclie metrics: metrics, nodeName: nodeName, clusterName: clusterName, + processManager: processManager, }, nil } @@ -325,117 +324,18 @@ func (rm *RuleManager) RegisterPeekFunc(peek func(mntns uint64) ([]string, error rm.syscallPeekFunc = peek } -func (rm *RuleManager) ReportCapability(event tracercapabilitiestype.Event) { - if event.GetNamespace() == "" || event.GetPod() == "" { - logger.L().Error("RuleManager - failed to get namespace and pod name from ReportCapability event") - return - } - - // list capability rules - rules := rm.ruleBindingCache.ListRulesForPod(event.GetNamespace(), event.GetPod()) - - rm.processEvent(utils.CapabilitiesEventType, &event, rules) -} - -func (rm *RuleManager) ReportFileExec(event tracerexectype.Event) { - if event.GetNamespace() == "" || event.GetPod() == "" { - logger.L().Error("RuleManager - failed to get namespace and pod name from ReportFileExec event") - return - } - - // list exec rules - rules := rm.ruleBindingCache.ListRulesForPod(event.GetNamespace(), event.GetPod()) - rm.processEvent(utils.ExecveEventType, &event, rules) -} - -func (rm *RuleManager) ReportFileOpen(event traceropentype.Event) { - if event.GetNamespace() == "" || event.GetPod() == "" { - logger.L().Error("RuleManager - failed to get namespace and pod name from ReportFileOpen event") - return - } - - // list open rules - rules := rm.ruleBindingCache.ListRulesForPod(event.GetNamespace(), event.GetPod()) - - rm.processEvent(utils.OpenEventType, &event, rules) - -} - -func (rm *RuleManager) ReportNetworkEvent(event tracernetworktype.Event) { - if event.GetNamespace() == "" || event.GetPod() == "" { - logger.L().Error("RuleManager - failed to get namespace and pod name from ReportNetworkEvent event") - return - } - - // list network rules - rules := rm.ruleBindingCache.ListRulesForPod(event.GetNamespace(), event.GetPod()) - - rm.processEvent(utils.NetworkEventType, &event, rules) -} - -func (rm *RuleManager) ReportDNSEvent(event tracerdnstype.Event) { - // ignore events with empty container name - if event.K8s.ContainerName == "" { - return - } - - if event.GetNamespace() == "" || event.GetPod() == "" { - logger.L().Error("RuleManager - failed to get namespace and pod name from ReportDNSEvent event") - return - } - - // list dns rules - rules := rm.ruleBindingCache.ListRulesForPod(event.GetNamespace(), event.GetPod()) - - rm.processEvent(utils.DnsEventType, &event, rules) -} - -func (rm *RuleManager) ReportRandomxEvent(event tracerrandomxtype.Event) { - if event.GetNamespace() == "" || event.GetPod() == "" { - logger.L().Error("RuleManager - failed to get namespace and pod name from randomx event") - return - } - - // list randomx rules - rules := rm.ruleBindingCache.ListRulesForPod(event.GetNamespace(), event.GetPod()) - - rm.processEvent(utils.RandomXEventType, &event, rules) -} - -func (rm *RuleManager) ReportSymlinkEvent(event tracersymlinktype.Event) { - if event.GetNamespace() == "" || event.GetPod() == "" { - logger.L().Error("RuleManager - failed to get namespace and pod name from ReportSymlinkEvent event") - return - } - - // list symlink rules - rules := rm.ruleBindingCache.ListRulesForPod(event.GetNamespace(), event.GetPod()) - rm.processEvent(utils.SymlinkEventType, &event, rules) -} - -func (rm *RuleManager) ReportHardlinkEvent(event tracerhardlinktype.Event) { - if event.GetNamespace() == "" || event.GetPod() == "" { - logger.L().Error("RuleManager - failed to get namespace and pod name from ReportHardlinkEvent event") - return - } - - // list hardlink rules - rules := rm.ruleBindingCache.ListRulesForPod(event.GetNamespace(), event.GetPod()) - rm.processEvent(utils.HardlinkEventType, &event, rules) -} - -func (rm *RuleManager) ReportSSHEvent(event tracersshtype.Event) { +func (rm *RuleManager) ReportEvent(eventType utils.EventType, event utils.K8sEvent) { if event.GetNamespace() == "" || event.GetPod() == "" { - logger.L().Error("RuleManager - failed to get namespace and pod name from ReportSSHEvent event") + logger.L().Error("RuleManager - failed to get namespace and pod name from custom event") return } - // list ssh rules + // list custom rules rules := rm.ruleBindingCache.ListRulesForPod(event.GetNamespace(), event.GetPod()) - rm.processEvent(utils.SSHEventType, &event, rules) + rm.processEvent(eventType, event, rules) } -func (rm *RuleManager) processEvent(eventType utils.EventType, event interface{}, rules []ruleengine.RuleEvaluator) { +func (rm *RuleManager) processEvent(eventType utils.EventType, event utils.K8sEvent, rules []ruleengine.RuleEvaluator) { for _, rule := range rules { if rule == nil { continue @@ -456,8 +356,13 @@ func (rm *RuleManager) processEvent(eventType utils.EventType, event interface{} } } func (rm *RuleManager) enrichRuleFailure(ruleFailure ruleengine.RuleFailure) ruleengine.RuleFailure { - path, err := utils.GetPathFromPid(ruleFailure.GetRuntimeProcessDetails().ProcessTree.PID) - hostPath := "" + var err error + var path string + var hostPath string + if ruleFailure.GetRuntimeProcessDetails().ProcessTree.Path == "" { + path, err = utils.GetPathFromPid(ruleFailure.GetRuntimeProcessDetails().ProcessTree.PID) + } + if err != nil { if ruleFailure.GetRuntimeProcessDetails().ProcessTree.Path != "" { hostPath = filepath.Join("/proc", fmt.Sprintf("/%d/root/%s", rm.containerIdToPid.Get(ruleFailure.GetTriggerEvent().Runtime.ContainerID), ruleFailure.GetRuntimeProcessDetails().ProcessTree.Path)) @@ -470,91 +375,55 @@ func (rm *RuleManager) enrichRuleFailure(ruleFailure ruleengine.RuleFailure) rul baseRuntimeAlert := ruleFailure.GetBaseRuntimeAlert() baseRuntimeAlert.Timestamp = time.Unix(0, int64(ruleFailure.GetTriggerEvent().Timestamp)) - - if baseRuntimeAlert.MD5Hash == "" && hostPath != "" { - md5hash, err := utils.CalculateMD5FileHash(hostPath) + var size int64 = 0 + if hostPath != "" { + size, err = utils.GetFileSize(hostPath) if err != nil { - md5hash = "" + size = 0 } - baseRuntimeAlert.MD5Hash = md5hash } - if baseRuntimeAlert.SHA1Hash == "" && hostPath != "" { - sha1hash, err := utils.CalculateSHA1FileHash(hostPath) - if err != nil { - sha1hash = "" - } - - baseRuntimeAlert.SHA1Hash = sha1hash - } - - if baseRuntimeAlert.SHA256Hash == "" && hostPath != "" { - sha256hash, err := utils.CalculateSHA256FileHash(hostPath) - if err != nil { - sha256hash = "" - } - - baseRuntimeAlert.SHA256Hash = sha256hash + if baseRuntimeAlert.Size == "" && hostPath != "" && size != 0 { + baseRuntimeAlert.Size = humanize.Bytes(uint64(size)) } - if baseRuntimeAlert.Size == "" && hostPath != "" { - size, err := utils.GetFileSize(hostPath) - if err != nil { - baseRuntimeAlert.Size = "" - } else { - baseRuntimeAlert.Size = humanize.Bytes(uint64(size)) + if size != 0 && size < maxFileSize && hostPath != "" { + if baseRuntimeAlert.MD5Hash == "" || baseRuntimeAlert.SHA1Hash == "" { + sha1hash, md5hash, err := utils.CalculateFileHashes(hostPath) + if err == nil { + baseRuntimeAlert.MD5Hash = md5hash + baseRuntimeAlert.SHA1Hash = sha1hash + } } } ruleFailure.SetBaseRuntimeAlert(baseRuntimeAlert) runtimeProcessDetails := ruleFailure.GetRuntimeProcessDetails() - if runtimeProcessDetails.ProcessTree.Cmdline == "" { - commandLine, err := utils.GetCmdlineByPid(int(ruleFailure.GetRuntimeProcessDetails().ProcessTree.PID)) - if err != nil { - runtimeProcessDetails.ProcessTree.Cmdline = "" - } else { - runtimeProcessDetails.ProcessTree.Cmdline = *commandLine - } - } - - if runtimeProcessDetails.ProcessTree.PPID == 0 { - parent, err := utils.GetProcessStat(int(ruleFailure.GetRuntimeProcessDetails().ProcessTree.PID)) - if err != nil { - runtimeProcessDetails.ProcessTree.PPID = 0 - } else { - runtimeProcessDetails.ProcessTree.PPID = uint32(parent.PPID) - } - - if runtimeProcessDetails.ProcessTree.Pcomm == "" { - if err == nil { - runtimeProcessDetails.ProcessTree.Pcomm = parent.Comm - } else { - runtimeProcessDetails.ProcessTree.Pcomm = "" - } - } - } - - if runtimeProcessDetails.ProcessTree.PID == 0 { - runtimeProcessDetails.ProcessTree.PID = ruleFailure.GetRuntimeProcessDetails().ProcessTree.PID - } - if runtimeProcessDetails.ProcessTree.Comm == "" { - comm, err := utils.GetCommFromPid(ruleFailure.GetRuntimeProcessDetails().ProcessTree.PID) + err = backoff.Retry(func() error { + tree, err := rm.processManager.GetProcessTreeForPID( + ruleFailure.GetRuntimeProcessDetails().ContainerID, + int(ruleFailure.GetRuntimeProcessDetails().ProcessTree.PID), + ) if err != nil { - comm = "" + return err } - runtimeProcessDetails.ProcessTree.Comm = comm - } - - if runtimeProcessDetails.ProcessTree.Path == "" && path != "" { - runtimeProcessDetails.ProcessTree.Path = path - } - - if rm.containerIdToShimPid.Has(ruleFailure.GetRuntimeProcessDetails().ContainerID) { - shimPid := rm.containerIdToShimPid.Get(ruleFailure.GetRuntimeProcessDetails().ContainerID) - tree, err := utils.CreateProcessTree(&runtimeProcessDetails.ProcessTree, shimPid) - if err == nil { + runtimeProcessDetails.ProcessTree = tree + return nil + }, backoff.NewExponentialBackOff( + backoff.WithInitialInterval(50*time.Millisecond), + backoff.WithMaxInterval(200*time.Millisecond), + backoff.WithMaxElapsedTime(500*time.Millisecond), + )) + + if err != nil && rm.containerIdToShimPid.Has(ruleFailure.GetRuntimeProcessDetails().ContainerID) { + logger.L().Debug("RuleManager - failed to get process tree, trying to get process tree from shim", + helpers.Error(err), + helpers.String("container ID", ruleFailure.GetRuntimeProcessDetails().ContainerID)) + + if tree, err := utils.CreateProcessTree(&runtimeProcessDetails.ProcessTree, + rm.containerIdToShimPid.Get(ruleFailure.GetRuntimeProcessDetails().ContainerID)); err == nil { runtimeProcessDetails.ProcessTree = *tree } } diff --git a/pkg/rulemanager/v1/rule_manager_test.go b/pkg/rulemanager/v1/rule_manager_test.go index 11ee67ea..cefa8559 100644 --- a/pkg/rulemanager/v1/rule_manager_test.go +++ b/pkg/rulemanager/v1/rule_manager_test.go @@ -1,77 +1,39 @@ package rulemanager -// func TestApplicationProfileManager(t *testing.T) { -// cfg := config.Config{ -// InitialDelay: 1 * time.Second, -// MaxSniffingTime: 5 * time.Minute, -// UpdateDataPeriod: 1 * time.Second, -// } -// ctx := context.TODO() -// k8sClient := &k8sclient.K8sClientMock{} -// storageClient := &storage.StorageHttpClientMock{} -// am, err := CreateApplicationProfileManager(ctx, cfg, "cluster", k8sClient, storageClient) -// assert.NoError(t, err) -// // prepare container -// container := &containercollection.Container{ -// K8s: containercollection.K8sMetadata{ -// BasicK8sMetadata: types.BasicK8sMetadata{ -// Namespace: "ns", -// PodName: "pod", -// ContainerName: "cont", -// }, -// }, -// Runtime: containercollection.RuntimeMetadata{ -// BasicRuntimeMetadata: types.BasicRuntimeMetadata{ -// ContainerID: "5fff6a395ce4e6984a9447cc6cfb09f473eaf278498243963fcc944889bc8400", -// }, -// }, -// } -// // register peek function for syscall tracer -// go am.RegisterPeekFunc(func(_ uint64) ([]string, error) { -// return []string{"dup", "listen"}, nil -// }) -// // report capability -// go am.ReportCapability("ns/pod/cont", "NET_BIND_SERVICE") -// // report file exec -// go am.ReportFileExec("ns/pod/cont", "", []string{"ls"}) // will not be reported -// go am.ReportFileExec("ns/pod/cont", "/bin/bash", []string{"-c", "ls"}) -// // report file open -// go am.ReportFileOpen("ns/pod/cont", "/etc/passwd", []string{"O_RDONLY"}) -// // report container started (race condition with reports) -// am.ContainerCallback(containercollection.PubSubEvent{ -// Type: containercollection.EventTypeAddContainer, -// Container: container, -// }) -// // let it run for a while -// time.Sleep(15 * time.Second) // need to sleep longer because of AddRandomDuration in startApplicationProfiling -// // report another file open -// go am.ReportFileOpen("ns/pod/cont", "/etc/hosts", []string{"O_RDONLY"}) -// // sleep more -// time.Sleep(2 * time.Second) -// // report container stopped -// am.ContainerCallback(containercollection.PubSubEvent{ -// Type: containercollection.EventTypeRemoveContainer, -// Container: container, -// }) -// // let it stop -// time.Sleep(2 * time.Second) -// // verify generated CRDs -// assert.Equal(t, 1, len(storageClient.ApplicationActivities)) -// sort.Strings(storageClient.ApplicationActivities[0].Spec.Syscalls) -// assert.Equal(t, []string{"dup", "listen"}, storageClient.ApplicationActivities[0].Spec.Syscalls) -// assert.Equal(t, 2, len(storageClient.ApplicationProfiles)) -// assert.Equal(t, 2, len(storageClient.ApplicationProfileSummaries)) -// // check the first profile -// sort.Strings(storageClient.ApplicationProfiles[0].Spec.Containers[0].Capabilities) -// assert.Equal(t, []string{"NET_BIND_SERVICE"}, storageClient.ApplicationProfiles[0].Spec.Containers[1].Capabilities) -// assert.Equal(t, []v1beta1.ExecCalls{{Path: "/bin/bash", Args: []string{"-c", "ls"}, Envs: []string(nil)}}, storageClient.ApplicationProfiles[0].Spec.Containers[1].Execs) -// assert.Equal(t, []v1beta1.OpenCalls{{Path: "/etc/passwd", Flags: []string{"O_RDONLY"}}}, storageClient.ApplicationProfiles[0].Spec.Containers[1].Opens) -// // check the second profile - this is a patch for execs and opens -// sort.Strings(storageClient.ApplicationProfiles[1].Spec.Containers[0].Capabilities) -// assert.Equal(t, []string{"NET_BIND_SERVICE"}, storageClient.ApplicationProfiles[1].Spec.Containers[1].Capabilities) -// assert.Equal(t, []v1beta1.ExecCalls{{Path: "/bin/bash", Args: []string{"-c", "ls"}, Envs: []string(nil)}}, storageClient.ApplicationProfiles[1].Spec.Containers[1].Execs) -// assert.Equal(t, []v1beta1.OpenCalls{ -// {Path: "/etc/passwd", Flags: []string{"O_RDONLY"}}, -// {Path: "/etc/hosts", Flags: []string{"O_RDONLY"}}, -// }, storageClient.ApplicationProfiles[1].Spec.Containers[1].Opens) -// } +import ( + "testing" + + eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" + "github.com/kubescape/go-logger" + tracerhardlinktype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/hardlink/types" + "github.com/kubescape/node-agent/pkg/utils" +) + +func TestReportEvent(t *testing.T) { + // Create a hardlink event + e := &tracerhardlinktype.Event{ + Event: eventtypes.Event{ + CommonData: eventtypes.CommonData{ + K8s: eventtypes.K8sMetadata{ + BasicK8sMetadata: eventtypes.BasicK8sMetadata{ + ContainerName: "test", + }, + }, + }, + }, + Comm: "test", + OldPath: "test", + NewPath: "test", + } + + // Create a new rule + reportEvent(utils.HardlinkEventType, e) +} + +func reportEvent(eventType utils.EventType, event utils.K8sEvent) { + k8sEvent := event.(*tracerhardlinktype.Event) + if k8sEvent.GetNamespace() == "" || k8sEvent.GetPod() == "" { + logger.L().Error("RuleManager - failed to get namespace and pod name from custom event") + return + } +} diff --git a/pkg/seccompmanager/seccomp_manager_interface.go b/pkg/seccompmanager/seccomp_manager_interface.go index de81e97f..b941b47d 100644 --- a/pkg/seccompmanager/seccomp_manager_interface.go +++ b/pkg/seccompmanager/seccomp_manager_interface.go @@ -2,11 +2,11 @@ package seccompmanager import ( "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + v1beta1api "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" ) type SeccompManagerClient interface { - AddSeccompProfile(obj *unstructured.Unstructured) error - DeleteSeccompProfile(obj *unstructured.Unstructured) error + AddSeccompProfile(obj *v1beta1api.SeccompProfile) error + DeleteSeccompProfile(obj *v1beta1api.SeccompProfile) error GetSeccompProfile(name string, path *string) (v1beta1.SingleSeccompProfile, error) } diff --git a/pkg/seccompmanager/seccomp_manager_mock.go b/pkg/seccompmanager/seccomp_manager_mock.go index 221639f6..78c773d9 100644 --- a/pkg/seccompmanager/seccomp_manager_mock.go +++ b/pkg/seccompmanager/seccomp_manager_mock.go @@ -2,7 +2,7 @@ package seccompmanager import ( "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + v1beta1api "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" ) type SeccompManagerMock struct { @@ -14,11 +14,11 @@ func NewSeccompManagerMock() *SeccompManagerMock { var _ SeccompManagerClient = (*SeccompManagerMock)(nil) -func (s *SeccompManagerMock) AddSeccompProfile(_ *unstructured.Unstructured) error { +func (s *SeccompManagerMock) AddSeccompProfile(_ *v1beta1api.SeccompProfile) error { return nil } -func (s *SeccompManagerMock) DeleteSeccompProfile(_ *unstructured.Unstructured) error { +func (s *SeccompManagerMock) DeleteSeccompProfile(_ *v1beta1api.SeccompProfile) error { return nil } diff --git a/pkg/seccompmanager/v1/seccomp_manager.go b/pkg/seccompmanager/v1/seccomp_manager.go index 19f99327..b2e6aae3 100644 --- a/pkg/seccompmanager/v1/seccomp_manager.go +++ b/pkg/seccompmanager/v1/seccomp_manager.go @@ -15,10 +15,9 @@ import ( "github.com/kubescape/go-logger" "github.com/kubescape/go-logger/helpers" "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + v1beta1api "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" "github.com/spf13/afero" "go.uber.org/multierr" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - k8sruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" ) @@ -41,11 +40,7 @@ func NewSeccompManager() (*SeccompManager, error) { var _ seccompmanager.SeccompManagerClient = (*SeccompManager)(nil) -func (s *SeccompManager) AddSeccompProfile(obj *unstructured.Unstructured) error { - sp, err := unstructuredToSeccompProfile(obj) - if err != nil { - return fmt.Errorf("failed to convert unstructured to seccomp profile: %w", err) - } +func (s *SeccompManager) AddSeccompProfile(sp *v1beta1api.SeccompProfile) error { // store the profile for each container var errs error profilePaths := mapset.NewSet[string]() @@ -72,11 +67,11 @@ func (s *SeccompManager) AddSeccompProfile(obj *unstructured.Unstructured) error } profilePaths.Add(profilePath) } - s.profilesPaths.Set(obj.GetUID(), profilePaths) + s.profilesPaths.Set(sp.GetUID(), profilePaths) return errs } -func (s *SeccompManager) DeleteSeccompProfile(obj *unstructured.Unstructured) error { +func (s *SeccompManager) DeleteSeccompProfile(obj *v1beta1api.SeccompProfile) error { uid := obj.GetUID() var errs error for _, path := range s.profilesPaths.Get(uid).ToSlice() { @@ -128,12 +123,3 @@ func getProfilesDir() (string, error) { } return seccompProfilesDir, nil } - -func unstructuredToSeccompProfile(obj *unstructured.Unstructured) (*v1beta1.SeccompProfile, error) { - sp := &v1beta1.SeccompProfile{} - err := k8sruntime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, sp) - if err != nil { - return nil, err - } - return sp, nil -} diff --git a/pkg/seccompmanager/v1/seccomp_manager_test.go b/pkg/seccompmanager/v1/seccomp_manager_test.go index 7c517d14..ff09aa0b 100644 --- a/pkg/seccompmanager/v1/seccomp_manager_test.go +++ b/pkg/seccompmanager/v1/seccomp_manager_test.go @@ -5,9 +5,10 @@ import ( "path/filepath" "testing" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" "github.com/spf13/afero" "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // func TestName(t *testing.T) { @@ -56,27 +57,22 @@ import ( func TestSeccompManager(t *testing.T) { tests := []struct { name string - obj *unstructured.Unstructured + obj *v1beta1.SeccompProfile path string wantErr assert.ErrorAssertionFunc }{ { name: "create seccomp profile", - obj: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "SeccompProfile", - "metadata": map[string]interface{}{ - "name": "replicaset-nginx-77b4fdf86c", - "namespace": "default", - }, - "spec": map[string]interface{}{ - "containers": []map[string]interface{}{ - { - "name": "nginx", - "path": "default/replicaset-nginx-77b4fdf86c-nginx.json", - }, - }, - }, + obj: &v1beta1.SeccompProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "replicaset-nginx-77b4fdf86c", + Namespace: "default", + }, + Spec: v1beta1.SeccompProfileSpec{ + Containers: []v1beta1.SingleSeccompProfile{{ + Name: "nginx", + Path: "default/replicaset-nginx-77b4fdf86c-nginx.json", + }}, }, }, path: "default/replicaset-nginx-77b4fdf86c-nginx.json", diff --git a/pkg/storage/storage_mock.go b/pkg/storage/storage_mock.go index 2bf7147c..6a0ee6d2 100644 --- a/pkg/storage/storage_mock.go +++ b/pkg/storage/storage_mock.go @@ -19,7 +19,6 @@ type StorageHttpClientMock struct { NetworkNeighborhoods []*v1beta1.NetworkNeighborhood NetworkNeighborses []*v1beta1.NetworkNeighbors ImageCounters map[string]int - nginxSBOMSpdxBytes *spdxv1beta1.SBOMSPDXv2p3 mockSBOM *v1beta1.SBOMSyft failedOnce bool } diff --git a/pkg/storage/v1/applicationprofile.go b/pkg/storage/v1/applicationprofile.go index 2f09b5be..2b804f50 100644 --- a/pkg/storage/v1/applicationprofile.go +++ b/pkg/storage/v1/applicationprofile.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "fmt" - "strconv" "github.com/kubescape/k8s-interface/instanceidhandler/v1/helpers" "github.com/kubescape/node-agent/pkg/utils" @@ -53,7 +52,6 @@ func (sc Storage) patchApplicationProfile(name, namespace string, operations []u if err != nil { return fmt.Errorf("patch application profile: %w", err) } - // check if returned profile is full if status, ok := profile.Annotations[helpers.StatusMetadataKey]; ok && status == helpers.TooLarge { if channel != nil { @@ -61,7 +59,6 @@ func (sc Storage) patchApplicationProfile(name, namespace string, operations []u } return nil } - // check if returned profile is completed if c, ok := profile.Annotations[helpers.CompletionMetadataKey]; ok { if s, ok := profile.Annotations[helpers.StatusMetadataKey]; ok && s == helpers.Complete && c == helpers.Completed { @@ -71,34 +68,5 @@ func (sc Storage) patchApplicationProfile(name, namespace string, operations []u return nil } } - - // check if returned profile is too big - if s, ok := profile.Annotations[helpers.ResourceSizeMetadataKey]; ok { - size, err := strconv.Atoi(s) - if err != nil { - return fmt.Errorf("parse size: %w", err) - } - if size > sc.maxApplicationProfileSize { - // add annotation to indicate that the profile is full - annotationOperations := []utils.PatchOperation{ - { - Op: "replace", - Path: "/metadata/annotations/" + utils.EscapeJSONPointerElement(helpers.StatusMetadataKey), - Value: helpers.TooLarge, - }, - } - annotationsPatch, err := json.Marshal(annotationOperations) - if err != nil { - return fmt.Errorf("create patch for annotations: %w", err) - } - _, err = sc.StorageClient.ApplicationProfiles(namespace).Patch(context.Background(), sc.modifyName(name), types.JSONPatchType, annotationsPatch, v1.PatchOptions{}) - if err != nil { - return fmt.Errorf("patch application profile annotations: %w", err) - } - if channel != nil { - channel <- utils.TooLargeObjectError - } - } - } return nil } diff --git a/pkg/storage/v1/network.go b/pkg/storage/v1/network.go index 9b486964..c74ced2b 100644 --- a/pkg/storage/v1/network.go +++ b/pkg/storage/v1/network.go @@ -4,11 +4,9 @@ import ( "context" "encoding/json" "fmt" - "strconv" - - "github.com/kubescape/node-agent/pkg/utils" "github.com/kubescape/k8s-interface/instanceidhandler/v1/helpers" + "github.com/kubescape/node-agent/pkg/utils" "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -70,34 +68,5 @@ func (sc Storage) patchNetworkNeighborhood(name, namespace string, operations [] return nil } } - - // check if returned neighborhood is too big - if s, ok := neighborhood.Annotations[helpers.ResourceSizeMetadataKey]; ok { - size, err := strconv.Atoi(s) - if err != nil { - return fmt.Errorf("parse size: %w", err) - } - if size > sc.maxNetworkNeighborhoodSize { - // add annotation to indicate that the neighborhood is full - annotationOperations := []utils.PatchOperation{ - { - Op: "replace", - Path: "/metadata/annotations/" + utils.EscapeJSONPointerElement(helpers.StatusMetadataKey), - Value: helpers.TooLarge, - }, - } - annotationsPatch, err := json.Marshal(annotationOperations) - if err != nil { - return fmt.Errorf("create patch for annotations: %w", err) - } - _, err = sc.StorageClient.NetworkNeighborhoods(namespace).Patch(context.Background(), sc.modifyName(name), types.JSONPatchType, annotationsPatch, v1.PatchOptions{}) - if err != nil { - return fmt.Errorf("patch application neighborhood annotations: %w", err) - } - if channel != nil { - channel <- utils.TooLargeObjectError - } - } - } return nil } diff --git a/pkg/storage/v1/storage.go b/pkg/storage/v1/storage.go index b83682e1..44a78cf7 100644 --- a/pkg/storage/v1/storage.go +++ b/pkg/storage/v1/storage.go @@ -26,18 +26,14 @@ import ( ) const ( - DefaultMaxApplicationProfileSize = 10000 - DefaultMaxNetworkNeighborhoodSize = 1000 - KubeConfig = "KUBECONFIG" + KubeConfig = "KUBECONFIG" ) type Storage struct { - StorageClient spdxv1beta1.SpdxV1beta1Interface - maxApplicationProfileSize int - maxNetworkNeighborhoodSize int - maxJsonPatchOperations int - namespace string - multiplier *int // used for testing to multiply the resources by this + StorageClient spdxv1beta1.SpdxV1beta1Interface + maxJsonPatchOperations int + namespace string + multiplier *int // used for testing to multiply the resources by this } var _ storage.StorageClient = (*Storage)(nil) @@ -53,24 +49,15 @@ func CreateStorage(namespace string) (*Storage, error) { return nil, fmt.Errorf("failed to create K8S Aggregated API Client with err: %v", err) } } + // force GRPC + cfg.AcceptContentTypes = "application/vnd.kubernetes.protobuf" + cfg.ContentType = "application/vnd.kubernetes.protobuf" clientset, err := versioned.NewForConfig(cfg) if err != nil { return nil, fmt.Errorf("failed to create K8S Aggregated API Client with err: %v", err) } - maxApplicationProfileSize, err := strconv.Atoi(os.Getenv("MAX_APPLICATION_PROFILE_SIZE")) - if err != nil { - maxApplicationProfileSize = DefaultMaxApplicationProfileSize - } - logger.L().Debug("maxApplicationProfileSize", helpers.Int("size", maxApplicationProfileSize)) - - maxNetworkNeighborhoodSize, err := strconv.Atoi(os.Getenv("MAX_NETWORK_NEIGHBORHOOD_SIZE")) - if err != nil { - maxNetworkNeighborhoodSize = DefaultMaxNetworkNeighborhoodSize - } - logger.L().Debug("maxNetworkNeighborhoodSize", helpers.Int("size", maxNetworkNeighborhoodSize)) - // wait for storage to be ready if err := backoff.RetryNotify(func() error { _, err := clientset.SpdxV1beta1().ApplicationProfiles("default").List(context.Background(), metav1.ListOptions{}) @@ -82,12 +69,10 @@ func CreateStorage(namespace string) (*Storage, error) { } return &Storage{ - StorageClient: clientset.SpdxV1beta1(), - maxApplicationProfileSize: maxApplicationProfileSize, - maxNetworkNeighborhoodSize: maxNetworkNeighborhoodSize, - maxJsonPatchOperations: 9999, - namespace: namespace, - multiplier: getMultiplier(), + StorageClient: clientset.SpdxV1beta1(), + maxJsonPatchOperations: 9999, + namespace: namespace, + multiplier: getMultiplier(), }, nil } @@ -134,7 +119,7 @@ func (sc Storage) PatchNetworkNeighborsIngressAndEgress(name, namespace string, return nil } -func (sc Storage) PatchNetworkNeighborsMatchLabels(name, namespace string, networkNeighbors *v1beta1.NetworkNeighbors) error { +func (sc Storage) PatchNetworkNeighborsMatchLabels(_, namespace string, networkNeighbors *v1beta1.NetworkNeighbors) error { sc.modifyNameP(&networkNeighbors.Name) defer sc.revertNameP(&networkNeighbors.Name) _, err := sc.StorageClient.NetworkNeighborses(namespace).Update(context.Background(), networkNeighbors, metav1.UpdateOptions{}) diff --git a/pkg/utils/applicationprofile.go b/pkg/utils/applicationprofile.go index 191590d8..b83248e3 100644 --- a/pkg/utils/applicationprofile.go +++ b/pkg/utils/applicationprofile.go @@ -8,7 +8,7 @@ import ( "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" ) -func CreateCapabilitiesPatchOperations(capabilities, syscalls []string, execs map[string][]string, opens map[string]mapset.Set[string], containerType string, containerIndex int) []PatchOperation { +func CreateCapabilitiesPatchOperations(capabilities, syscalls []string, execs map[string][]string, opens map[string]mapset.Set[string], endpoints map[string]*v1beta1.HTTPEndpoint, containerType string, containerIndex int) []PatchOperation { var profileOperations []PatchOperation // add capabilities sort.Strings(capabilities) @@ -63,10 +63,20 @@ func CreateCapabilitiesPatchOperations(capabilities, syscalls []string, execs ma }, }) } + + httpEndpoints := fmt.Sprintf("/spec/%s/%d/endpoints/-", containerType, containerIndex) + for _, endpoint := range endpoints { + profileOperations = append(profileOperations, PatchOperation{ + Op: "add", + Path: httpEndpoints, + Value: *endpoint, + }) + } + return profileOperations } -func EnrichApplicationProfileContainer(container *v1beta1.ApplicationProfileContainer, observedCapabilities, observedSyscalls []string, execs map[string][]string, opens map[string]mapset.Set[string]) { +func EnrichApplicationProfileContainer(container *v1beta1.ApplicationProfileContainer, observedCapabilities, observedSyscalls []string, execs map[string][]string, opens map[string]mapset.Set[string], endpoints map[string]*v1beta1.HTTPEndpoint) { // add capabilities caps := mapset.NewSet(observedCapabilities...) caps.Append(container.Capabilities...) @@ -100,6 +110,11 @@ func EnrichApplicationProfileContainer(container *v1beta1.ApplicationProfileCont Flags: flags, }) } + + // add endpoints + for _, endpoint := range endpoints { + container.Endpoints = append(container.Endpoints, *endpoint) + } } // TODO make generic? diff --git a/pkg/utils/applicationprofile_test.go b/pkg/utils/applicationprofile_test.go index 09c21fd4..c65407c1 100644 --- a/pkg/utils/applicationprofile_test.go +++ b/pkg/utils/applicationprofile_test.go @@ -40,22 +40,24 @@ func Test_EnrichApplicationProfileContainer(t *testing.T) { existingContainer := GetApplicationProfileContainer(applicationProfile, Container, 0) assert.NotNil(t, existingContainer) + var test map[string]*v1beta1.HTTPEndpoint + // empty enrich - EnrichApplicationProfileContainer(existingContainer, []string{}, []string{}, map[string][]string{}, map[string]mapset.Set[string]{}) + EnrichApplicationProfileContainer(existingContainer, []string{}, []string{}, map[string][]string{}, map[string]mapset.Set[string]{}, test) assert.Equal(t, 5, len(existingContainer.Capabilities)) assert.Equal(t, 2, len(existingContainer.Execs)) assert.Equal(t, 5, len(existingContainer.Syscalls)) assert.Equal(t, 0, len(existingContainer.Opens)) // enrich with existing capabilities, syscalls - no change - EnrichApplicationProfileContainer(existingContainer, []string{"SETGID"}, []string{"listen"}, map[string][]string{}, map[string]mapset.Set[string]{}) + EnrichApplicationProfileContainer(existingContainer, []string{"SETGID"}, []string{"listen"}, map[string][]string{}, map[string]mapset.Set[string]{}, test) assert.Equal(t, 5, len(existingContainer.Capabilities)) assert.Equal(t, 2, len(existingContainer.Execs)) assert.Equal(t, 5, len(existingContainer.Syscalls)) assert.Equal(t, 0, len(existingContainer.Opens)) // enrich with new capabilities, syscalls - add - EnrichApplicationProfileContainer(existingContainer, []string{"NEW"}, []string{"xxx", "yyy"}, map[string][]string{}, map[string]mapset.Set[string]{}) + EnrichApplicationProfileContainer(existingContainer, []string{"NEW"}, []string{"xxx", "yyy"}, map[string][]string{}, map[string]mapset.Set[string]{}, test) assert.Equal(t, 6, len(existingContainer.Capabilities)) assert.Equal(t, 2, len(existingContainer.Execs)) assert.Equal(t, 7, len(existingContainer.Syscalls)) @@ -65,7 +67,7 @@ func Test_EnrichApplicationProfileContainer(t *testing.T) { opens := map[string]mapset.Set[string]{ "/checkoutservice": mapset.NewSet("O_RDONLY", "O_WRONLY"), } - EnrichApplicationProfileContainer(existingContainer, []string{"NEW"}, []string{"xxx", "yyy"}, map[string][]string{}, opens) + EnrichApplicationProfileContainer(existingContainer, []string{"NEW"}, []string{"xxx", "yyy"}, map[string][]string{}, opens, test) assert.Equal(t, 6, len(existingContainer.Capabilities)) assert.Equal(t, 2, len(existingContainer.Execs)) assert.Equal(t, 7, len(existingContainer.Syscalls)) diff --git a/pkg/utils/events.go b/pkg/utils/events.go index 5af8e2e0..e2f1e774 100644 --- a/pkg/utils/events.go +++ b/pkg/utils/events.go @@ -1,17 +1,24 @@ package utils -type EventType int +type K8sEvent interface { + GetPod() string + GetNamespace() string +} + +type EventType string const ( - ExecveEventType EventType = iota - OpenEventType - CapabilitiesEventType - DnsEventType - NetworkEventType - SyscallEventType - RandomXEventType - SymlinkEventType - HardlinkEventType - SSHEventType - AllEventType + ExecveEventType EventType = "exec" + OpenEventType EventType = "open" + CapabilitiesEventType EventType = "capabilities" + DnsEventType EventType = "dns" + NetworkEventType EventType = "network" + SyscallEventType EventType = "syscall" + RandomXEventType EventType = "randomx" + SymlinkEventType EventType = "symlink" + HardlinkEventType EventType = "hardlink" + SSHEventType EventType = "ssh" + HTTPEventType EventType = "http" + PtraceEventType EventType = "ptrace" + AllEventType EventType = "all" ) diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index b5ee4c91..e78f99f8 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -8,6 +8,7 @@ import ( "encoding/hex" "errors" "fmt" + "hash" "io" "math/rand" "os" @@ -55,11 +56,6 @@ var ( IncompleteSBOMError = errors.New("incomplete SBOM") ) -type PackageSourceInfoData struct { - Exist bool - PackageSPDXIdentifier []v1beta1.ElementID -} - type ContainerType int const ( @@ -164,11 +160,20 @@ func CreateK8sPodID(namespaceName string, podName string) string { return strings.Join([]string{namespaceName, podName}, "/") } -// AddRandomDuration adds between min and max seconds to duration -func AddRandomDuration(min, max int, duration time.Duration) time.Duration { +// AddJitter adds jitter percent to the duration +func AddJitter(duration time.Duration, maxJitterPercentage int) time.Duration { + if maxJitterPercentage == 0 { + return duration + } + jitter := 1 + rand.Intn(maxJitterPercentage)/100 + return duration * time.Duration(jitter) +} + +// RandomDuration returns a duration between 1/2 max and max +func RandomDuration(max int, duration time.Duration) time.Duration { // we don't initialize the seed, so we will get the same sequence of random numbers every time - randomDuration := time.Duration(rand.Intn(max+1-min)+min) * time.Second - return randomDuration + duration + mini := max / 2 + return time.Duration(rand.Intn(1+max-mini)+mini) * duration } func Atoi(s string) int { @@ -423,7 +428,7 @@ func GetProcessEnv(pid int) (map[string]string, error) { } // Get the path of the file on the node. -func GetHostFilePathFromEvent(event interface{}, containerPid uint32) (string, error) { +func GetHostFilePathFromEvent(event K8sEvent, containerPid uint32) (string, error) { if execEvent, ok := event.(*tracerexectype.Event); ok { realPath := filepath.Join("/proc", fmt.Sprintf("/%d/root/%s", containerPid, GetExecPathFromEvent(execEvent))) return realPath, nil @@ -476,67 +481,34 @@ func CalculateSHA256FileExecHash(path string, args []string) string { return hex.EncodeToString(hashInBytes) } -// Calculate the SHA256 hash of the given file. -func CalculateSHA256FileHash(path string) (string, error) { +// CalculateFileHashes calculates both SHA1 and MD5 hashes of the given file. +func CalculateFileHashes(path string) (sha1Hash string, md5Hash string, err error) { file, err := os.Open(path) if err != nil { - return "", err + return "", "", err } defer func(file *os.File) { _ = file.Close() }(file) - hash := sha256.New() - if _, err := io.Copy(hash, file); err != nil { - return "", err - } - - hashInBytes := hash.Sum(nil) - hashString := hex.EncodeToString(hashInBytes) + sha1Hash256 := sha1.New() + md5Hash256 := md5.New() - return hashString, nil -} + multiWriter := io.MultiWriter(sha1Hash256, md5Hash256) -// Calculate the SHA1 hash of the given file. -func CalculateSHA1FileHash(path string) (string, error) { - file, err := os.Open(path) - if err != nil { - return "", err + if _, err := io.Copy(multiWriter, file); err != nil { + return "", "", err } - defer func(file *os.File) { - _ = file.Close() - }(file) - hash := sha1.New() - if _, err := io.Copy(hash, file); err != nil { - return "", err - } + sha1HashString := hashToString(sha1Hash256) + md5HashString := hashToString(md5Hash256) - hashInBytes := hash.Sum(nil) - hashString := hex.EncodeToString(hashInBytes) - - return hashString, nil + return sha1HashString, md5HashString, nil } -// Calculate the MD5 hash of the given file. -func CalculateMD5FileHash(path string) (string, error) { - file, err := os.Open(path) - if err != nil { - return "", err - } - defer func(file *os.File) { - _ = file.Close() - }(file) - - hash := md5.New() - if _, err := io.Copy(hash, file); err != nil { - return "", err - } - - hashInBytes := hash.Sum(nil) - hashString := hex.EncodeToString(hashInBytes) - - return hashString, nil +// hashToString converts a hash.Hash to a hexadecimal string. +func hashToString(h hash.Hash) string { + return hex.EncodeToString(h.Sum(nil)) } // Creates a process tree from a process. diff --git a/pkg/utils/utils_test.go b/pkg/utils/utils_test.go index cc44eed7..fb39940b 100644 --- a/pkg/utils/utils_test.go +++ b/pkg/utils/utils_test.go @@ -4,7 +4,6 @@ import ( "reflect" "strings" "testing" - "time" apitypes "github.com/armosec/armoapi-go/armotypes" "github.com/kubescape/k8s-interface/instanceidhandler/v1" @@ -145,42 +144,6 @@ func TestCreateK8sContainerID(t *testing.T) { } } -func TestRandomSleep(t *testing.T) { - type args struct { - min int - max int - } - tests := []struct { - name string - args args - }{ - { - name: "normal", - args: args{ - min: 1, - max: 3, - }, - }, - { - name: "min equals max", - args: args{ - min: 1, - max: 1, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - start := time.Now() - time.Sleep(AddRandomDuration(tt.args.min, tt.args.max, 0)) - elapsed := int(time.Since(start).Seconds()) - if elapsed < tt.args.min || elapsed > tt.args.max { - t.Errorf("AddRandomDuration() = %v, want between %v and %v", elapsed, tt.args.min, tt.args.max) - } - }) - } -} - func TestAtoi(t *testing.T) { type args struct { s string diff --git a/internal/validator/ebpf/verifier.go b/pkg/validator/ebpf/verifier.go similarity index 100% rename from internal/validator/ebpf/verifier.go rename to pkg/validator/ebpf/verifier.go diff --git a/internal/validator/validator.go b/pkg/validator/validator.go similarity index 98% rename from internal/validator/validator.go rename to pkg/validator/validator.go index fa655242..0db9834d 100644 --- a/internal/validator/validator.go +++ b/pkg/validator/validator.go @@ -5,8 +5,8 @@ import ( "os" "syscall" - "github.com/kubescape/node-agent/internal/validator/ebpf" "github.com/kubescape/node-agent/pkg/config" + "github.com/kubescape/node-agent/pkg/validator/ebpf" "github.com/cilium/ebpf/rlimit" "github.com/facette/natsort" diff --git a/internal/validator/validator_test.go b/pkg/validator/validator_test.go similarity index 100% rename from internal/validator/validator_test.go rename to pkg/validator/validator_test.go diff --git a/pkg/watcher/cooldownqueue/cooldownqueue.go b/pkg/watcher/cooldownqueue/cooldownqueue.go index 1bba9ea0..01200d3f 100644 --- a/pkg/watcher/cooldownqueue/cooldownqueue.go +++ b/pkg/watcher/cooldownqueue/cooldownqueue.go @@ -5,16 +5,20 @@ import ( "time" "istio.io/pkg/cache" - - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/watch" ) -var ( - DefaultExpiration = 5 * time.Second - EvictionInterval = 1 * time.Second +const ( + defaultExpiration = 5 * time.Second + evictionInterval = 1 * time.Second ) +// CooldownQueue is a queue that lets clients put events into it with a cooldown +// +// When a client puts an event into a queue, it waits for a cooldown period before +// the event is forwarded to the consumer. If and event for the same key is put into the queue +// again before the cooldown period is over, the event is overridden and the cooldown period is reset. type CooldownQueue struct { closed bool seenEvents cache.ExpiringCache @@ -30,7 +34,7 @@ func NewCooldownQueue() *CooldownQueue { callback := func(key, value any) { events <- value.(watch.Event) } - c := cache.NewTTLWithCallback(DefaultExpiration, EvictionInterval, callback) + c := cache.NewTTLWithCallback(defaultExpiration, evictionInterval, callback) return &CooldownQueue{ seenEvents: c, innerChan: events, @@ -40,11 +44,9 @@ func NewCooldownQueue() *CooldownQueue { // makeEventKey creates a unique key for an event from a watcher func makeEventKey(e watch.Event) string { - object, ok := e.Object.(*unstructured.Unstructured) - if !ok { - return "" - } - return strings.Join([]string{object.GroupVersionKind().Group, object.GroupVersionKind().Version, object.GetKind(), object.GetNamespace(), object.GetName()}, "/") + gvk := e.Object.GetObjectKind().GroupVersionKind() + meta := e.Object.(metav1.Object) + return strings.Join([]string{gvk.Group, gvk.Version, gvk.Kind, meta.GetNamespace(), meta.GetName()}, "/") } func (q *CooldownQueue) Closed() bool { diff --git a/pkg/watcher/dynamicwatcher/watch.go b/pkg/watcher/dynamicwatcher/watch.go index aabf98f9..8e236403 100644 --- a/pkg/watcher/dynamicwatcher/watch.go +++ b/pkg/watcher/dynamicwatcher/watch.go @@ -10,6 +10,7 @@ import ( "github.com/kubescape/node-agent/pkg/k8sclient" "github.com/kubescape/node-agent/pkg/watcher" "github.com/kubescape/node-agent/pkg/watcher/cooldownqueue" + spdxv1beta1 "github.com/kubescape/storage/pkg/generated/clientset/versioned/typed/softwarecomposition/v1beta1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/pager" @@ -19,7 +20,6 @@ import ( "github.com/kubescape/go-logger/helpers" k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/watch" ) @@ -37,6 +37,7 @@ type SkipNamespaceFunc func(string) bool type WatchHandler struct { k8sClient k8sclient.K8sClientInterface + storageClient spdxv1beta1.SpdxV1beta1Interface resources map[string]watcher.WatchResource eventQueues map[string]*cooldownqueue.CooldownQueue handlers []watcher.Watcher @@ -44,10 +45,12 @@ type WatchHandler struct { } var errWatchClosed = errors.New("watch channel closed") +var errNotImplemented = errors.New("not implemented") -func NewWatchHandler(k8sClient k8sclient.K8sClientInterface, skipNamespaceFunc SkipNamespaceFunc) *WatchHandler { +func NewWatchHandler(k8sClient k8sclient.K8sClientInterface, storageClient spdxv1beta1.SpdxV1beta1Interface, skipNamespaceFunc SkipNamespaceFunc) *WatchHandler { return &WatchHandler{ k8sClient: k8sClient, + storageClient: storageClient, resources: make(map[string]watcher.WatchResource), eventQueues: make(map[string]*cooldownqueue.CooldownQueue), skipNamespaceFunc: skipNamespaceFunc, @@ -104,7 +107,7 @@ func (wh *WatchHandler) watch(ctx context.Context, resource watcher.WatchResourc // process events for event := range eventQueue.ResultChan { // skip non-objects - obj, ok := event.Object.(*unstructured.Unstructured) + obj, ok := event.Object.(runtime.Object) if !ok || obj == nil { continue } @@ -128,14 +131,40 @@ func (wh *WatchHandler) Stop(_ context.Context) { } } +func (wh *WatchHandler) chooseWatcher(res schema.GroupVersionResource, opts metav1.ListOptions) (watch.Interface, error) { + switch res.Resource { + case "applicationprofiles": + return wh.storageClient.ApplicationProfiles("").Watch(context.Background(), opts) + case "networkneighborhoods": + return wh.storageClient.NetworkNeighborhoods("").Watch(context.Background(), opts) + case "pods": + return wh.k8sClient.GetKubernetesClient().CoreV1().Pods("").Watch(context.Background(), opts) + case "runtimerulealertbindings": + return wh.k8sClient.GetDynamicClient().Resource(res).Namespace("").Watch(context.Background(), opts) + case "seccompprofiles": + return wh.storageClient.SeccompProfiles("").Watch(context.Background(), opts) + case "operatorcommands": + return wh.k8sClient.GetDynamicClient().Resource(res).Namespace("").Watch(context.Background(), opts) + default: + // Make sure the resource version is not our storage, if so we panic. + if res.Group == kubescapeCustomResourceGroup { + return nil, fmt.Errorf("resource must use the storage client %s: %w", res.Resource, errNotImplemented) + } + + return wh.k8sClient.GetDynamicClient().Resource(res).Watch(context.Background(), opts) + } +} + func (wh *WatchHandler) watchRetry(ctx context.Context, res schema.GroupVersionResource, watchOpts metav1.ListOptions, eventQueue *cooldownqueue.CooldownQueue) { exitFatal := true if err := backoff.RetryNotify(func() error { - w, err := wh.k8sClient.GetDynamicClient().Resource(res).Namespace("").Watch(context.Background(), watchOpts) + w, err := wh.chooseWatcher(res, watchOpts) if err != nil { if k8sErrors.ReasonForError(err) == metav1.StatusReasonNotFound { exitFatal = false return backoff.Permanent(err) + } else if errors.Is(err, errNotImplemented) { + return backoff.Permanent(err) } return fmt.Errorf("client resource: %w", err) } @@ -158,8 +187,9 @@ func (wh *WatchHandler) watchRetry(ctx context.Context, res schema.GroupVersionR if event.Type == watch.Error { return fmt.Errorf("watch error: %s", event.Object) } - pod := event.Object.(*unstructured.Unstructured) - if wh.skipNamespaceFunc(pod.GetNamespace()) { + obj := event.Object.(metav1.Object) + // we don't want to skip kubescape.io resources (CRDs). @amirmalka @amitschendel + if res.Group != "kubescape.io" && wh.skipNamespaceFunc(obj.GetNamespace()) { continue } eventQueue.Enqueue(event) @@ -179,20 +209,44 @@ func (wh *WatchHandler) watchRetry(ctx context.Context, res schema.GroupVersionR } } +func (wh *WatchHandler) chooseLister(res schema.GroupVersionResource, opts metav1.ListOptions) (runtime.Object, error) { + switch res.Resource { + case "applicationprofiles": + return wh.storageClient.ApplicationProfiles("").List(context.Background(), opts) + case "networkneighborhoods": + return wh.storageClient.NetworkNeighborhoods("").List(context.Background(), opts) + case "pods": + return wh.k8sClient.GetKubernetesClient().CoreV1().Pods("").List(context.Background(), opts) + case "runtimerulealertbindings": + return wh.k8sClient.GetDynamicClient().Resource(res).Namespace("").List(context.Background(), opts) + case "seccompprofiles": + return wh.storageClient.SeccompProfiles("").List(context.Background(), opts) + case "operatorcommands": + return wh.k8sClient.GetDynamicClient().Resource(res).Namespace("").List(context.Background(), opts) + default: + // Make sure the resource version is not our storage, if so we panic. + if res.Group == kubescapeCustomResourceGroup { + return nil, fmt.Errorf("resource must use the storage client %s: %w", res.Resource, errNotImplemented) + } + + return wh.k8sClient.GetDynamicClient().Resource(res).List(context.Background(), opts) + } +} + func (wh *WatchHandler) getExistingStorageObjects(ctx context.Context, res schema.GroupVersionResource, watchOpts metav1.ListOptions) (string, error) { logger.L().Debug("WatchHandler - getting existing objects from storage", helpers.String("resource", res.Resource)) list := pager.New(func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) { - return wh.k8sClient.GetDynamicClient().Resource(res).Namespace("").List(ctx, opts) + return wh.chooseLister(res, opts) }) var resourceVersion string if err := list.EachListItem(context.Background(), watchOpts, func(obj runtime.Object) error { - pod := obj.(*unstructured.Unstructured) - resourceVersion = pod.GetResourceVersion() - if wh.skipNamespaceFunc(pod.GetNamespace()) { + meta := obj.(metav1.Object) + resourceVersion = meta.GetResourceVersion() + if wh.skipNamespaceFunc(meta.GetNamespace()) { return nil } for _, handler := range wh.handlers { - handler.AddHandler(ctx, pod) + handler.AddHandler(ctx, obj) } return nil }); err != nil { diff --git a/pkg/watcher/dynamicwatcher/watch_test.go b/pkg/watcher/dynamicwatcher/watch_test.go index 4754216f..c4b2829d 100644 --- a/pkg/watcher/dynamicwatcher/watch_test.go +++ b/pkg/watcher/dynamicwatcher/watch_test.go @@ -10,22 +10,22 @@ import ( "github.com/kubescape/node-agent/pkg/watcher" "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + storagefake "github.com/kubescape/storage/pkg/generated/clientset/versioned/fake" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "github.com/kubescape/k8s-interface/k8sinterface" "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - dynamicfake "k8s.io/client-go/dynamic/fake" + "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) var ( - resourcePod = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "Pod"} + resourcePod = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} resourceNetworkNeighborhood = schema.GroupVersionResource{Group: "spdx.softwarecomposition.kubescape.io", Version: "v1beta1", Resource: "networkneighborhoods"} resourceApplicationProfile = schema.GroupVersionResource{Group: "spdx.softwarecomposition.kubescape.io", Version: "v1beta1", Resource: "applicationprofiles"} ) @@ -36,28 +36,13 @@ func init() { appsv1.AddToScheme(scheme.Scheme) } -func getGroupVersionResource(obj *unstructured.Unstructured) schema.GroupVersionResource { - switch obj.GetKind() { - case "ApplicationProfile": - return resourceApplicationProfile - case "NetworkNeighborhood": - return resourceNetworkNeighborhood - default: - return schema.GroupVersionResource{ - Group: obj.GetObjectKind().GroupVersionKind().Group, - Version: obj.GetObjectKind().GroupVersionKind().Version, - Resource: obj.GetObjectKind().GroupVersionKind().Kind, - } - } -} - type testObj struct { name string resources []schema.GroupVersionResource preCreatedObjects []runtime.Object - createObjects []*unstructured.Unstructured - modifiedObjects []*unstructured.Unstructured - deleteObjects []*unstructured.Unstructured + createObjects []runtime.Object + modifiedObjects []runtime.Object + deleteObjects []runtime.Object } func startTest(t *testing.T, tc testObj) { @@ -72,9 +57,10 @@ func startTest(t *testing.T, tc testObj) { } k8sClient := k8sinterface.NewKubernetesApiMock() - k8sClient.DynamicClient = dynamicfake.NewSimpleDynamicClient(scheme.Scheme, tc.preCreatedObjects...) + k8sClient.KubernetesClient = fake.NewClientset() + storageClient := storagefake.NewSimpleClientset(tc.preCreatedObjects...).SpdxV1beta1() - wh := NewWatchHandler(k8sClient, func(s string) bool { + wh := NewWatchHandler(k8sClient, storageClient, func(s string) bool { return false }) @@ -90,35 +76,35 @@ func startTest(t *testing.T, tc testObj) { wh.Start(ctx) - createdObj := map[string]*unstructured.Unstructured{} - modifiedObj := map[string]*unstructured.Unstructured{} - deleteObj := map[string]*unstructured.Unstructured{} + createdObj := map[string]runtime.Object{} + modifiedObj := map[string]runtime.Object{} + deleteObj := map[string]runtime.Object{} l := sync.Mutex{} go func() { for { obj := <-a.WatcherMock.Added - t.Logf("added object: kind: %s, name: %s\n", obj.GetKind(), obj.GetName()) + t.Logf("added object: kind: %s, name: %s\n", obj.GetObjectKind().GroupVersionKind().Kind, obj) l.Lock() - createdObj[obj.GetKind()+"/"+obj.GetName()] = obj + createdObj[getKey(obj)] = obj resourcesCreatedWg.Done() l.Unlock() } }() go func() { for obj := range a.WatcherMock.Updated { - t.Logf("modified object: kind: %s, name: %s\n", obj.GetKind(), obj.GetName()) + t.Logf("modified object: kind: %s, name: %s\n", obj.GetObjectKind().GroupVersionKind().Kind, obj.(metav1.Object).GetName()) l.Lock() - modifiedObj[obj.GetKind()+"/"+obj.GetName()] = obj + modifiedObj[getKey(obj)] = obj resourcesModifiedWg.Done() l.Unlock() } }() go func() { for obj := range a.WatcherMock.Deleted { - t.Logf("deleted object: kind: %s, name: %s\n", obj.GetKind(), obj.GetName()) + t.Logf("deleted object: kind: %s, name: %s\n", obj.GetObjectKind().GroupVersionKind().Kind, obj.(metav1.Object).GetName()) l.Lock() - deleteObj[obj.GetKind()+"/"+obj.GetName()] = obj + deleteObj[getKey(obj)] = obj resourcesDeleteWg.Done() l.Unlock() } @@ -127,17 +113,29 @@ func startTest(t *testing.T, tc testObj) { // wait for watcher to start time.Sleep(1 * time.Second) - // cerate objects + // create objects for i := range tc.createObjects { - if _, err := wh.k8sClient.GetDynamicClient().Resource(getGroupVersionResource(tc.createObjects[i])).Namespace("").Create(ctx, tc.createObjects[i], metav1.CreateOptions{}); err != nil { + var err error + if ap, ok := tc.createObjects[i].(*v1beta1.ApplicationProfile); ok { + _, err = wh.storageClient.ApplicationProfiles(ap.Namespace).Create(ctx, ap, metav1.CreateOptions{}) + } else if nn, ok := tc.createObjects[i].(*v1beta1.NetworkNeighborhood); ok { + _, err = wh.storageClient.NetworkNeighborhoods(nn.Namespace).Create(ctx, nn, metav1.CreateOptions{}) + } else if pod, ok := tc.createObjects[i].(*corev1.Pod); ok { + _, err = wh.k8sClient.GetKubernetesClient().CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{}) + } else if sp, ok := tc.createObjects[i].(*v1beta1.SeccompProfile); ok { + _, err = wh.storageClient.SeccompProfiles(sp.Namespace).Create(ctx, sp, metav1.CreateOptions{}) + } else { + t.Fatalf("unsupported object type: %v", tc.createObjects[i]) + } + if err != nil { t.Fatalf("error creating object: %v", err) } } - resourcesCreatedWg.Wait() + waitTimeout(resourcesCreatedWg, 10*time.Second) assert.Equal(t, len(tc.createObjects)+len(tc.preCreatedObjects), len(createdObj)) for _, o := range tc.createObjects { - k := o.GetKind() + "/" + o.GetName() + k := getKey(o) c, ok := createdObj[k] assert.True(t, ok) assert.NotNil(t, c) @@ -145,39 +143,68 @@ func startTest(t *testing.T, tc testObj) { // modify objects labels := map[string]string{"test": "test"} - for i := range tc.modifiedObjects { - tc.modifiedObjects[i].SetLabels(labels) - if _, err := wh.k8sClient.GetDynamicClient().Resource(getGroupVersionResource(tc.modifiedObjects[i])).Namespace("").Update(ctx, tc.modifiedObjects[i], metav1.UpdateOptions{}); err != nil { - t.Fatalf("error creating object: %v", err) + for _, o := range tc.modifiedObjects { + o.(metav1.Object).SetLabels(labels) + var err error + if ap, ok := o.(*v1beta1.ApplicationProfile); ok { + _, err = wh.storageClient.ApplicationProfiles(ap.Namespace).Update(ctx, ap, metav1.UpdateOptions{}) + } else if nn, ok := o.(*v1beta1.NetworkNeighborhood); ok { + _, err = wh.storageClient.NetworkNeighborhoods(nn.Namespace).Update(ctx, nn, metav1.UpdateOptions{}) + } else if pod, ok := o.(*corev1.Pod); ok { + _, err = wh.k8sClient.GetKubernetesClient().CoreV1().Pods(pod.Namespace).Update(ctx, pod, metav1.UpdateOptions{}) + } else if sp, ok := o.(*v1beta1.SeccompProfile); ok { + _, err = wh.storageClient.SeccompProfiles(sp.Namespace).Update(ctx, sp, metav1.UpdateOptions{}) + } else { + t.Fatalf("unsupported object type: %v", o) + } + if err != nil { + t.Fatalf("error updating object: %v", err) } } - resourcesModifiedWg.Wait() - + waitTimeout(resourcesModifiedWg, 10*time.Second) assert.Equal(t, len(tc.modifiedObjects), len(modifiedObj)) + for _, o := range tc.modifiedObjects { - k := o.GetKind() + "/" + o.GetName() + k := getKey(o) c, ok := modifiedObj[k] assert.True(t, ok) assert.NotNil(t, c) - assert.Equal(t, labels, o.GetLabels()) + assert.Equal(t, labels, o.(metav1.Object).GetLabels()) } // delete objects - for i := range tc.deleteObjects { - if err := wh.k8sClient.GetDynamicClient().Resource(getGroupVersionResource(tc.deleteObjects[i])).Namespace("").Delete(ctx, tc.deleteObjects[i].GetName(), metav1.DeleteOptions{}); err != nil { - t.Fatalf("error creating object: %v", err) + for _, o := range tc.deleteObjects { + var err error + if ap, ok := o.(*v1beta1.ApplicationProfile); ok { + err = wh.storageClient.ApplicationProfiles(ap.Namespace).Delete(ctx, ap.Name, metav1.DeleteOptions{}) + } else if nn, ok := o.(*v1beta1.NetworkNeighborhood); ok { + err = wh.storageClient.NetworkNeighborhoods(nn.Namespace).Delete(ctx, nn.Name, metav1.DeleteOptions{}) + } else if pod, ok := o.(*corev1.Pod); ok { + err = wh.k8sClient.GetKubernetesClient().CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}) + } else if sp, ok := o.(*v1beta1.SeccompProfile); ok { + err = wh.storageClient.SeccompProfiles(sp.Namespace).Delete(ctx, sp.Name, metav1.DeleteOptions{}) + } else { + t.Fatalf("unsupported object type: %v", o) + } + if err != nil { + t.Fatalf("error deleting object: %v", err) } } - resourcesDeleteWg.Wait() + waitTimeout(resourcesDeleteWg, 10*time.Second) assert.Equal(t, len(tc.deleteObjects), len(deleteObj)) + for _, o := range tc.deleteObjects { - k := o.GetKind() + "/" + o.GetName() + k := getKey(o) c, ok := deleteObj[k] assert.True(t, ok) assert.NotNil(t, c) } +} +func getKey(obj runtime.Object) string { + return obj.GetObjectKind().GroupVersionKind().Kind + "/" + obj.(metav1.Object).GetName() } + func TestStart_1(t *testing.T) { tt := []testObj{ { @@ -193,17 +220,17 @@ func TestStart_1(t *testing.T) { { name: "watch Pods", resources: []schema.GroupVersionResource{resourcePod}, - createObjects: []*unstructured.Unstructured{mocks.GetUnstructured(mocks.TestKindPod, mocks.TestNginx), mocks.GetUnstructured(mocks.TestKindPod, mocks.TestCollection)}, + createObjects: []runtime.Object{mocks.GetRuntime(mocks.TestKindPod, mocks.TestNginx), mocks.GetRuntime(mocks.TestKindPod, mocks.TestCollection)}, }, { name: "watch ApplicationProfiles", resources: []schema.GroupVersionResource{resourceApplicationProfile}, - createObjects: []*unstructured.Unstructured{mocks.GetUnstructured(mocks.TestKindAP, mocks.TestNginx), mocks.GetUnstructured(mocks.TestKindAP, mocks.TestCollection)}, + createObjects: []runtime.Object{mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx), mocks.GetRuntime(mocks.TestKindAP, mocks.TestCollection)}, }, { name: "watch NetworkNeighborhoods", resources: []schema.GroupVersionResource{resourceNetworkNeighborhood}, - createObjects: []*unstructured.Unstructured{mocks.GetUnstructured(mocks.TestKindNN, mocks.TestNginx), mocks.GetUnstructured(mocks.TestKindNN, mocks.TestCollection)}, + createObjects: []runtime.Object{mocks.GetRuntime(mocks.TestKindNN, mocks.TestNginx), mocks.GetRuntime(mocks.TestKindNN, mocks.TestCollection)}, }, } @@ -220,13 +247,13 @@ func TestStart_2(t *testing.T) { name: "list and modify", resources: []schema.GroupVersionResource{resourceApplicationProfile}, preCreatedObjects: []runtime.Object{mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx)}, - modifiedObjects: []*unstructured.Unstructured{mocks.GetUnstructured(mocks.TestKindAP, mocks.TestNginx)}, + modifiedObjects: []runtime.Object{mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx)}, }, { name: "watch and modify", resources: []schema.GroupVersionResource{resourceApplicationProfile}, - createObjects: []*unstructured.Unstructured{mocks.GetUnstructured(mocks.TestKindAP, mocks.TestNginx)}, - modifiedObjects: []*unstructured.Unstructured{mocks.GetUnstructured(mocks.TestKindAP, mocks.TestNginx)}, + createObjects: []runtime.Object{mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx)}, + modifiedObjects: []runtime.Object{mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx)}, }, } @@ -244,13 +271,13 @@ func TestStart_3(t *testing.T) { name: "list and watch", resources: []schema.GroupVersionResource{resourceApplicationProfile}, preCreatedObjects: []runtime.Object{mocks.GetRuntime(mocks.TestKindAP, mocks.TestCollection)}, - createObjects: []*unstructured.Unstructured{mocks.GetUnstructured(mocks.TestKindAP, mocks.TestNginx)}, + createObjects: []runtime.Object{mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx)}, }, { name: "list and delete", resources: []schema.GroupVersionResource{resourceApplicationProfile}, preCreatedObjects: []runtime.Object{mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx)}, - deleteObjects: []*unstructured.Unstructured{mocks.GetUnstructured(mocks.TestKindAP, mocks.TestNginx)}, + deleteObjects: []runtime.Object{mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx)}, }, } @@ -265,9 +292,9 @@ func TestStart_4(t *testing.T) { { name: "watch, modify, and delete", resources: []schema.GroupVersionResource{resourceApplicationProfile}, - createObjects: []*unstructured.Unstructured{mocks.GetUnstructured(mocks.TestKindAP, mocks.TestNginx)}, - modifiedObjects: []*unstructured.Unstructured{mocks.GetUnstructured(mocks.TestKindAP, mocks.TestNginx)}, - deleteObjects: []*unstructured.Unstructured{mocks.GetUnstructured(mocks.TestKindAP, mocks.TestNginx)}, + createObjects: []runtime.Object{mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx)}, + modifiedObjects: []runtime.Object{mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx)}, + deleteObjects: []runtime.Object{mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx)}, }, } @@ -283,9 +310,9 @@ func TestStart_5(t *testing.T) { { name: "multi watch, modify, and delete", resources: []schema.GroupVersionResource{resourceApplicationProfile, resourceNetworkNeighborhood, resourcePod}, - createObjects: []*unstructured.Unstructured{mocks.GetUnstructured(mocks.TestKindAP, mocks.TestNginx), mocks.GetUnstructured(mocks.TestKindAP, mocks.TestCollection), mocks.GetUnstructured(mocks.TestKindNN, mocks.TestNginx), mocks.GetUnstructured(mocks.TestKindNN, mocks.TestCollection), mocks.GetUnstructured(mocks.TestKindPod, mocks.TestNginx), mocks.GetUnstructured(mocks.TestKindPod, mocks.TestCollection)}, - modifiedObjects: []*unstructured.Unstructured{mocks.GetUnstructured(mocks.TestKindAP, mocks.TestNginx), mocks.GetUnstructured(mocks.TestKindAP, mocks.TestCollection), mocks.GetUnstructured(mocks.TestKindNN, mocks.TestNginx), mocks.GetUnstructured(mocks.TestKindNN, mocks.TestCollection), mocks.GetUnstructured(mocks.TestKindPod, mocks.TestNginx), mocks.GetUnstructured(mocks.TestKindPod, mocks.TestCollection)}, - deleteObjects: []*unstructured.Unstructured{mocks.GetUnstructured(mocks.TestKindAP, mocks.TestNginx), mocks.GetUnstructured(mocks.TestKindAP, mocks.TestCollection), mocks.GetUnstructured(mocks.TestKindNN, mocks.TestNginx), mocks.GetUnstructured(mocks.TestKindNN, mocks.TestCollection), mocks.GetUnstructured(mocks.TestKindPod, mocks.TestNginx), mocks.GetUnstructured(mocks.TestKindPod, mocks.TestCollection)}, + createObjects: []runtime.Object{mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx), mocks.GetRuntime(mocks.TestKindAP, mocks.TestCollection), mocks.GetRuntime(mocks.TestKindNN, mocks.TestNginx), mocks.GetRuntime(mocks.TestKindNN, mocks.TestCollection), mocks.GetRuntime(mocks.TestKindPod, mocks.TestNginx), mocks.GetRuntime(mocks.TestKindPod, mocks.TestCollection)}, + modifiedObjects: []runtime.Object{mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx), mocks.GetRuntime(mocks.TestKindAP, mocks.TestCollection), mocks.GetRuntime(mocks.TestKindNN, mocks.TestNginx), mocks.GetRuntime(mocks.TestKindNN, mocks.TestCollection), mocks.GetRuntime(mocks.TestKindPod, mocks.TestNginx), mocks.GetRuntime(mocks.TestKindPod, mocks.TestCollection)}, + deleteObjects: []runtime.Object{mocks.GetRuntime(mocks.TestKindAP, mocks.TestNginx), mocks.GetRuntime(mocks.TestKindAP, mocks.TestCollection), mocks.GetRuntime(mocks.TestKindNN, mocks.TestNginx), mocks.GetRuntime(mocks.TestKindNN, mocks.TestCollection), mocks.GetRuntime(mocks.TestKindPod, mocks.TestNginx), mocks.GetRuntime(mocks.TestKindPod, mocks.TestCollection)}, }, } @@ -295,3 +322,17 @@ func TestStart_5(t *testing.T) { }) } } + +func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool { + c := make(chan struct{}) + go func() { + defer close(c) + wg.Wait() + }() + select { + case <-c: + return false // completed normally + case <-time.After(timeout): + return true // timed out + } +} diff --git a/pkg/watcher/seccompprofilewatcher/seccompprofilewatcher.go b/pkg/watcher/seccompprofilewatcher/seccompprofilewatcher.go index ed7c61bb..752c267f 100644 --- a/pkg/watcher/seccompprofilewatcher/seccompprofilewatcher.go +++ b/pkg/watcher/seccompprofilewatcher/seccompprofilewatcher.go @@ -4,28 +4,29 @@ import ( "context" "fmt" - "github.com/kubescape/node-agent/pkg/k8sclient" "github.com/kubescape/node-agent/pkg/seccompmanager" "github.com/kubescape/node-agent/pkg/watcher" + v1beta1api "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + "github.com/kubescape/storage/pkg/generated/clientset/versioned/typed/softwarecomposition/v1beta1" + "k8s.io/apimachinery/pkg/runtime" "github.com/kubescape/go-logger" "github.com/kubescape/go-logger/helpers" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" ) type SeccompProfileWatcherImpl struct { - k8sClient k8sclient.K8sClientInterface + storageClient v1beta1.SpdxV1beta1Interface seccompManager seccompmanager.SeccompManagerClient groupVersionResource schema.GroupVersionResource } var _ watcher.Adaptor = (*SeccompProfileWatcherImpl)(nil) -func NewSeccompProfileWatcher(k8sClient k8sclient.K8sClientInterface, seccompManager seccompmanager.SeccompManagerClient) *SeccompProfileWatcherImpl { +func NewSeccompProfileWatcher(storageClient v1beta1.SpdxV1beta1Interface, seccompManager seccompmanager.SeccompManagerClient) *SeccompProfileWatcherImpl { return &SeccompProfileWatcherImpl{ - k8sClient: k8sClient, + storageClient: storageClient, seccompManager: seccompManager, groupVersionResource: schema.GroupVersionResource{ Group: "spdx.softwarecomposition.kubescape.io", @@ -46,8 +47,8 @@ func (sp *SeccompProfileWatcherImpl) WatchResources() []watcher.WatchResource { // ------------------ watcher.Watcher methods ----------------------- -func (sp *SeccompProfileWatcherImpl) AddHandler(ctx context.Context, obj *unstructured.Unstructured) { - if obj.GetKind() == "SeccompProfile" { +func (sp *SeccompProfileWatcherImpl) AddHandler(ctx context.Context, obj runtime.Object) { + if _, ok := obj.(*v1beta1api.SeccompProfile); ok { fullObj, err := sp.getFullSeccompProfile(obj) if err != nil { logger.L().Ctx(ctx).Error("SeccompProfileWatcherImpl - failed to get full seccomp profile", helpers.Error(err)) @@ -59,8 +60,8 @@ func (sp *SeccompProfileWatcherImpl) AddHandler(ctx context.Context, obj *unstru } } -func (sp *SeccompProfileWatcherImpl) ModifyHandler(ctx context.Context, obj *unstructured.Unstructured) { - if obj.GetKind() == "SeccompProfile" { +func (sp *SeccompProfileWatcherImpl) ModifyHandler(ctx context.Context, obj runtime.Object) { + if _, ok := obj.(*v1beta1api.SeccompProfile); ok { fullObj, err := sp.getFullSeccompProfile(obj) if err != nil { logger.L().Ctx(ctx).Error("SeccompProfileWatcherImpl - failed to get full seccomp profile", helpers.Error(err)) @@ -72,16 +73,17 @@ func (sp *SeccompProfileWatcherImpl) ModifyHandler(ctx context.Context, obj *uns } } -func (sp *SeccompProfileWatcherImpl) DeleteHandler(ctx context.Context, obj *unstructured.Unstructured) { - if obj.GetKind() == "SeccompProfile" { - if err := sp.seccompManager.DeleteSeccompProfile(obj); err != nil { +func (sp *SeccompProfileWatcherImpl) DeleteHandler(ctx context.Context, obj runtime.Object) { + if _, ok := obj.(*v1beta1api.SeccompProfile); ok { + if err := sp.seccompManager.DeleteSeccompProfile(obj.(*v1beta1api.SeccompProfile)); err != nil { logger.L().Ctx(ctx).Error("SeccompProfileWatcherImpl - failed to delete seccomp profile", helpers.Error(err)) } } } -func (sp *SeccompProfileWatcherImpl) getFullSeccompProfile(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) { - fullObj, err := sp.k8sClient.GetDynamicClient().Resource(sp.groupVersionResource).Namespace(obj.GetNamespace()).Get(context.Background(), obj.GetName(), metav1.GetOptions{}) +func (sp *SeccompProfileWatcherImpl) getFullSeccompProfile(obj runtime.Object) (*v1beta1api.SeccompProfile, error) { + meta := obj.(metav1.Object) + fullObj, err := sp.storageClient.SeccompProfiles(meta.GetNamespace()).Get(context.Background(), meta.GetName(), metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("failed to get full seccomp profile: %w", err) } diff --git a/pkg/watcher/watcher_interface.go b/pkg/watcher/watcher_interface.go index 6b6e1ccc..c5b710d8 100644 --- a/pkg/watcher/watcher_interface.go +++ b/pkg/watcher/watcher_interface.go @@ -3,7 +3,7 @@ package watcher import ( "context" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" ) type Adaptor interface { @@ -12,35 +12,35 @@ type Adaptor interface { } type Watcher interface { - AddHandler(ctx context.Context, obj *unstructured.Unstructured) - ModifyHandler(ctx context.Context, obj *unstructured.Unstructured) - DeleteHandler(ctx context.Context, obj *unstructured.Unstructured) + AddHandler(ctx context.Context, obj runtime.Object) + ModifyHandler(ctx context.Context, obj runtime.Object) + DeleteHandler(ctx context.Context, obj runtime.Object) } var _ Watcher = &WatcherMock{} type WatcherMock struct { - Added chan *unstructured.Unstructured - Updated chan *unstructured.Unstructured - Deleted chan *unstructured.Unstructured + Added chan runtime.Object + Updated chan runtime.Object + Deleted chan runtime.Object } func NewWatcherMock() *WatcherMock { return &WatcherMock{ - Added: make(chan *unstructured.Unstructured), - Updated: make(chan *unstructured.Unstructured), - Deleted: make(chan *unstructured.Unstructured), + Added: make(chan runtime.Object), + Updated: make(chan runtime.Object), + Deleted: make(chan runtime.Object), } } -func (wm *WatcherMock) AddHandler(_ context.Context, obj *unstructured.Unstructured) { +func (wm *WatcherMock) AddHandler(_ context.Context, obj runtime.Object) { wm.Added <- obj } -func (wm *WatcherMock) ModifyHandler(_ context.Context, obj *unstructured.Unstructured) { +func (wm *WatcherMock) ModifyHandler(_ context.Context, obj runtime.Object) { wm.Updated <- obj } -func (wm *WatcherMock) DeleteHandler(_ context.Context, obj *unstructured.Unstructured) { +func (wm *WatcherMock) DeleteHandler(_ context.Context, obj runtime.Object) { wm.Deleted <- obj } diff --git a/tests/chart/templates/node-agent/configmap.yaml b/tests/chart/templates/node-agent/configmap.yaml index 666b7737..ee1890c8 100644 --- a/tests/chart/templates/node-agent/configmap.yaml +++ b/tests/chart/templates/node-agent/configmap.yaml @@ -15,8 +15,10 @@ data: "runtimeDetectionEnabled": {{ eq .Values.capabilities.runtimeDetection "enable" }}, "networkServiceEnabled": {{ eq .Values.capabilities.networkPolicyService "enable" }}, "malwareDetectionEnabled": {{ eq .Values.capabilities.malwareDetection "enable" }}, - "InitialDelay": "{{ .Values.nodeAgent.config.learningPeriod }}", + "httpDetectionEnabled": {{ eq .Values.capabilities.httpDetection "enable" }}, + "initialDelay": "{{ .Values.nodeAgent.config.learningPeriod }}", "updateDataPeriod": "{{ .Values.nodeAgent.config.updatePeriod }}", + "maxDelaySeconds": "{{ .Values.nodeAgent.config.maxDelaySeconds }}", "maxSniffingTimePerContainer": "{{ .Values.nodeAgent.config.maxLearningPeriod }}", "exporters": { "httpExporterConfig": {{- .Values.nodeAgent.config.httpExporterConfig | toJson }}, @@ -37,4 +39,4 @@ data: {{ .Files.Get "clamav/clamd.conf" | indent 4 }} freshclam.conf: |- {{ .Files.Get "clamav/freshclam.conf" | indent 4 }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/tests/chart/values.yaml b/tests/chart/values.yaml index 2b8f74c1..5ba10fc2 100644 --- a/tests/chart/values.yaml +++ b/tests/chart/values.yaml @@ -11,6 +11,7 @@ capabilities: networkPolicyService: enable runtimeDetection: enable malwareDetection: enable + httpDetection: enable configurations: persistence: enable @@ -32,7 +33,7 @@ storage: name: "storage" image: repository: quay.io/kubescape/storage - tag: v0.0.79 + tag: v0.0.121 pullPolicy: IfNotPresent cleanupInterval: "6h" labels: @@ -57,6 +58,7 @@ nodeAgent: maxLearningPeriod: 2m learningPeriod: 1m updatePeriod: 30s + maxDelaySeconds: 1 prometheusExporter: enable httpExporterConfig: {} alertManagerExporterUrls: [ diff --git a/tests/component_test.go b/tests/component_test.go index b75140b6..46ee65f1 100644 --- a/tests/component_test.go +++ b/tests/component_test.go @@ -5,8 +5,10 @@ package tests import ( "context" "encoding/json" + "fmt" "path" "slices" + "sort" "testing" "time" @@ -15,7 +17,9 @@ import ( "github.com/kubescape/node-agent/tests/testutils" "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" spdxv1beta1client "github.com/kubescape/storage/pkg/generated/clientset/versioned/typed/softwarecomposition/v1beta1" + "github.com/kubescape/storage/pkg/registry/file/dynamicpathdetector" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -605,158 +609,517 @@ func Test_10_MalwareDetectionTest(t *testing.T) { assert.Equal(t, len(expectedMalwares), len(malwaresDetected), "Expected %d malwares to be detected, but got %d malwares", len(expectedMalwares), len(malwaresDetected)) } -// func Test_10_DemoTest(t *testing.T) { -// start := time.Now() -// defer tearDownTest(t, start) - -// //testutils.IncreaseNodeAgentSniffingTime("2m") -// wl, err := testutils.NewTestWorkload("default", path.Join(utils.CurrentDir(), "resources/ping-app-role.yaml")) -// if err != nil { -// t.Errorf("Error creating role: %v", err) -// } - -// wl, err = testutils.NewTestWorkload("default", path.Join(utils.CurrentDir(), "resources/ping-app-role-binding.yaml")) -// if err != nil { -// t.Errorf("Error creating role binding: %v", err) -// } - -// wl, err = testutils.NewTestWorkload("default", path.Join(utils.CurrentDir(), "resources/ping-app-service.yaml")) -// if err != nil { -// t.Errorf("Error creating service: %v", err) -// } - -// wl, err = testutils.NewTestWorkload("default", path.Join(utils.CurrentDir(), "resources/ping-app.yaml")) -// if err != nil { -// t.Errorf("Error creating workload: %v", err) -// } -// assert.NoError(t, wl.WaitForReady(80)) -// _, _, err = wl.ExecIntoPod([]string{"sh", "-c", "ping 1.1.1.1 -c 4"}, "") -// err = wl.WaitForApplicationProfileCompletion(80) -// if err != nil { -// t.Errorf("Error waiting for application profile to be completed: %v", err) -// } -// // err = wl.WaitForNetworkNeighborhoodCompletion(80) -// // if err != nil { -// // t.Errorf("Error waiting for network neighborhood to be completed: %v", err) -// // } - -// // Do a ls command using command injection in the ping command -// _, _, err = wl.ExecIntoPod([]string{"sh", "-c", "ping 1.1.1.1 -c 4;ls"}, "ping-app") -// if err != nil { -// t.Errorf("Error executing remote command: %v", err) -// } - -// // Do a cat command using command injection in the ping command -// _, _, err = wl.ExecIntoPod([]string{"sh", "-c", "ping 1.1.1.1 -c 4;cat /run/secrets/kubernetes.io/serviceaccount/token"}, "ping-app") -// if err != nil { -// t.Errorf("Error executing remote command: %v", err) -// } - -// // Do an uname command using command injection in the ping command -// _, _, err = wl.ExecIntoPod([]string{"sh", "-c", "ping 1.1.1.1 -c 4;uname -m | sed 's/x86_64/amd64/g' | sed 's/aarch64/arm64/g'"}, "ping-app") -// if err != nil { -// t.Errorf("Error executing remote command: %v", err) -// } - -// // Download kubectl -// _, _, err = wl.ExecIntoPod([]string{"sh", "-c", "ping 1.1.1.1 -c 4;curl -LO \"https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl\""}, "ping-app") -// if err != nil { -// t.Errorf("Error executing remote command: %v", err) -// } - -// // Sleep for 10 seconds to wait for the kubectl download -// time.Sleep(10 * time.Second) - -// // Make kubectl executable -// _, _, err = wl.ExecIntoPod([]string{"sh", "-c", "ping 1.1.1.1 -c 4;chmod +x kubectl"}, "ping-app") -// if err != nil { -// t.Errorf("Error executing remote command: %v", err) -// } - -// // Get the pods in the cluster -// output, _, err := wl.ExecIntoPod([]string{"sh", "-c", "ping 1.1.1.1 -c 4;./kubectl --server https://kubernetes.default --insecure-skip-tls-verify --token $(cat /run/secrets/kubernetes.io/serviceaccount/token) get pods"}, "ping-app") -// if err != nil { -// t.Errorf("Error executing remote command: %v", err) -// } - -// // Check that the output contains the pod-ping-app pod -// assert.Contains(t, output, "ping-app", "Expected output to contain 'ping-app'") - -// // Get the alerts and check that the alerts are generated -// alerts, err := testutils.GetAlerts(wl.Namespace) -// if err != nil { -// t.Errorf("Error getting alerts: %v", err) -// } - -// // Validate that all alerts are signaled -// expectedAlerts := map[string]bool{ -// "Unexpected process launched": false, -// "Unexpected file access": false, -// "Kubernetes Client Executed": false, -// // "Exec from malicious source": false, -// "Exec Binary Not In Base Image": false, -// "Unexpected Service Account Token Access": false, -// // "Unexpected domain request": false, -// } - -// for _, alert := range alerts { -// ruleName, ruleOk := alert.Labels["rule_name"] -// if ruleOk { -// if _, exists := expectedAlerts[ruleName]; exists { -// expectedAlerts[ruleName] = true -// } -// } -// } - -// for ruleName, signaled := range expectedAlerts { -// if !signaled { -// t.Errorf("Expected alert '%s' was not signaled", ruleName) -// } -// } -// } - -// func Test_11_DuplicationTest(t *testing.T) { -// start := time.Now() -// defer tearDownTest(t, start) - -// ns := testutils.NewRandomNamespace() -// // wl, err := testutils.NewTestWorkload(ns.Name, path.Join(utils.CurrentDir(), "resources/deployment-multiple-containers.yaml")) -// wl, err := testutils.NewTestWorkload(ns.Name, path.Join(utils.CurrentDir(), "resources/ping-app.yaml")) -// if err != nil { -// t.Errorf("Error creating workload: %v", err) -// } -// assert.NoError(t, wl.WaitForReady(80)) - -// err = wl.WaitForApplicationProfileCompletion(80) -// if err != nil { -// t.Errorf("Error waiting for application profile to be completed: %v", err) -// } - -// // process launched from nginx container -// _, _, err = wl.ExecIntoPod([]string{"ls", "-a"}, "ping-app") -// if err != nil { -// t.Errorf("Error executing remote command: %v", err) -// } - -// time.Sleep(20 * time.Second) - -// alerts, err := testutils.GetAlerts(wl.Namespace) -// if err != nil { -// t.Errorf("Error getting alerts: %v", err) -// } - -// // Validate that unexpected process launched alert is signaled only once -// count := 0 -// for _, alert := range alerts { -// ruleName, ruleOk := alert.Labels["rule_name"] -// if ruleOk { -// if ruleName == "Unexpected process launched" { -// count++ -// } -// } -// } - -// testutils.AssertContains(t, alerts, "Unexpected process launched", "ls", "ping-app") - -// assert.Equal(t, 1, count, "Expected 1 alert of type 'Unexpected process launched' but got %d", count) -// } +func Test_11_EndpointTest(t *testing.T) { + threshold := 120 + ns := testutils.NewRandomNamespace() + + endpointTraffic, err := testutils.NewTestWorkload(ns.Name, path.Join(utils.CurrentDir(), "resources/endpoint-traffic.yaml")) + if err != nil { + t.Errorf("Error creating workload: %v", err) + } + err = endpointTraffic.WaitForReady(80) + if err != nil { + t.Errorf("Error waiting for workload to be ready: %v", err) + } + + assert.NoError(t, endpointTraffic.WaitForApplicationProfile(80, "ready")) + + // Merge methods + _, _, err = endpointTraffic.ExecIntoPod([]string{"wget", "http://127.0.0.1:80"}, "") + assert.NoError(t, err) + _, _, err = endpointTraffic.ExecIntoPod([]string{"wget", "http://127.0.0.1:80", "--post-data", "test-data"}, "") + + // Merge dynamic + for i := 0; i < threshold; i++ { + endpointTraffic.ExecIntoPod([]string{"wget", fmt.Sprintf("http://127.0.0.1:80/users/%d", i)}, "") + } + + // Merge headers + _, _, err = endpointTraffic.ExecIntoPod([]string{"wget", "http://127.0.0.1:80/users/99", "--header", "Connection:1234r"}, "") + _, _, err = endpointTraffic.ExecIntoPod([]string{"wget", "http://127.0.0.1:80/users/12", "--header", "Connection:ziz"}, "") + + err = endpointTraffic.WaitForApplicationProfileCompletion(10) + + applicationProfile, err := endpointTraffic.GetApplicationProfile() + if err != nil { + t.Errorf("Error getting application profile: %v", err) + } + + headers := map[string][]string{"Connection": {"close"}, "Host": {"127.0.0.1:80"}} + rawJSON, err := json.Marshal(headers) + assert.NoError(t, err) + + endpoint2 := v1beta1.HTTPEndpoint{ + Endpoint: ":80/", + Methods: []string{"GET", "POST"}, + Internal: false, + Direction: "inbound", + Headers: rawJSON, + } + + headers = map[string][]string{"Host": {"127.0.0.1:80"}, "Connection": {"1234r", "close", "ziz"}} + rawJSON, err = json.Marshal(headers) + assert.NoError(t, err) + + endpoint1 := v1beta1.HTTPEndpoint{ + Endpoint: ":80/users/" + dynamicpathdetector.DynamicIdentifier, + Methods: []string{"GET"}, + Internal: false, + Direction: "inbound", + Headers: rawJSON, + } + + e3 := endpoint1 + e3.Direction = "outbound" + + e4 := endpoint2 + e4.Direction = "outbound" + + savedEndpoints := applicationProfile.Spec.Containers[0].Endpoints + + for i := range savedEndpoints { + + headers := savedEndpoints[i].Headers + var headersMap map[string][]string + err := json.Unmarshal([]byte(headers), &headersMap) + if err != nil { + t.Errorf("Error unmarshalling headers: %v", err) + } + + if headersMap["Connection"] != nil { + sort.Strings(headersMap["Connection"]) + rawJSON, err = json.Marshal(headersMap) + assert.NoError(t, err) + savedEndpoints[i].Headers = rawJSON + } + } + + expectedEndpoints := []v1beta1.HTTPEndpoint{endpoint1, endpoint2, e3, e4} + + for _, savedEndpoint := range savedEndpoints { + if endpoint := getEndpoint(expectedEndpoints, savedEndpoint); endpoint != nil { + e := *endpoint + sort.Strings(e.Methods) + sort.Strings(savedEndpoint.Methods) + assert.Equal(t, e, savedEndpoint) + } else { + t.Errorf("Endpoint %v not found in the saved endpoints", savedEndpoint) + } + + } +} + +func getEndpoint(endpoints []v1beta1.HTTPEndpoint, endpoint v1beta1.HTTPEndpoint) *v1beta1.HTTPEndpoint { + for _, e := range endpoints { + if endpoint.Endpoint == e.Endpoint && endpoint.Direction == e.Direction && endpoint.Internal == e.Internal { + return &e + } + } + return nil + +} + +func Test_12_MergingProfilesTest(t *testing.T) { + start := time.Now() + defer tearDownTest(t, start) + + // PHASE 1: Setup workload and initial profile + ns := testutils.NewRandomNamespace() + wl, err := testutils.NewTestWorkload(ns.Name, path.Join(utils.CurrentDir(), "resources/deployment-multiple-containers.yaml")) + require.NoError(t, err, "Failed to create workload") + require.NoError(t, wl.WaitForReady(80), "Workload failed to be ready") + require.NoError(t, wl.WaitForApplicationProfile(80, "ready"), "Application profile not ready") + + // Generate initial profile data + _, _, err = wl.ExecIntoPod([]string{"ls", "-l"}, "nginx") + require.NoError(t, err, "Failed to exec into nginx container") + _, _, err = wl.ExecIntoPod([]string{"wget", "ebpf.io", "-T", "2", "-t", "1"}, "server") + require.NoError(t, err, "Failed to exec into server container") + + require.NoError(t, wl.WaitForApplicationProfileCompletion(80), "Profile failed to complete") + time.Sleep(10 * time.Second) // Allow profile processing + + // Log initial profile state + initialProfile, err := wl.GetApplicationProfile() + require.NoError(t, err, "Failed to get initial profile") + initialProfileJSON, _ := json.Marshal(initialProfile) + t.Logf("Initial application profile:\n%s", string(initialProfileJSON)) + + // PHASE 2: Verify initial alerts + t.Log("Testing initial alert generation...") + wl.ExecIntoPod([]string{"ls", "-l"}, "nginx") // Expected: no alert + wl.ExecIntoPod([]string{"ls", "-l"}, "server") // Expected: alert + time.Sleep(30 * time.Second) // Wait for alert generation + + initialAlerts, err := testutils.GetAlerts(wl.Namespace) + require.NoError(t, err, "Failed to get initial alerts") + + // Record initial alert count + initialAlertCount := 0 + for _, alert := range initialAlerts { + if ruleName, ok := alert.Labels["rule_name"]; ok && ruleName == "Unexpected process launched" { + initialAlertCount++ + } + } + + testutils.AssertContains(t, initialAlerts, "Unexpected process launched", "ls", "server") + testutils.AssertNotContains(t, initialAlerts, "Unexpected process launched", "ls", "nginx") + + // PHASE 3: Apply user-managed profile + t.Log("Applying user-managed profile...") + // Create the user-managed profile + userProfile := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("ug-%s", initialProfile.Name), + Namespace: initialProfile.Namespace, + Annotations: map[string]string{ + "kubescape.io/managed-by": "User", + }, + }, + Spec: v1beta1.ApplicationProfileSpec{ + Architectures: []string{"amd64"}, + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: "nginx", + Execs: []v1beta1.ExecCalls{ + { + Path: "/usr/bin/ls", + Args: []string{"/usr/bin/ls", "-l"}, + }, + }, + SeccompProfile: v1beta1.SingleSeccompProfile{ + Spec: v1beta1.SingleSeccompProfileSpec{ + DefaultAction: "", + }, + }, + }, + { + Name: "server", + Execs: []v1beta1.ExecCalls{ + { + Path: "/bin/ls", + Args: []string{"/bin/ls", "-l"}, + }, + { + Path: "/bin/grpc_health_probe", + Args: []string{"-addr=:9555"}, + }, + }, + SeccompProfile: v1beta1.SingleSeccompProfile{ + Spec: v1beta1.SingleSeccompProfileSpec{ + DefaultAction: "", + }, + }, + }, + }, + }, + } + + // Log the profile we're about to create + userProfileJSON, err := json.MarshalIndent(userProfile, "", " ") + require.NoError(t, err, "Failed to marshal user profile") + t.Logf("Creating user profile:\n%s", string(userProfileJSON)) + + // Get k8s client + k8sClient := k8sinterface.NewKubernetesApi() + + // Create the user-managed profile + storageClient := spdxv1beta1client.NewForConfigOrDie(k8sClient.K8SConfig) + _, err = storageClient.ApplicationProfiles(ns.Name).Create(context.Background(), userProfile, metav1.CreateOptions{}) + require.NoError(t, err, "Failed to create user profile") + + // PHASE 4: Verify merged profile behavior + t.Log("Verifying merged profile behavior...") + time.Sleep(15 * time.Second) // Allow merge to complete + + // Test merged profile behavior + wl.ExecIntoPod([]string{"ls", "-l"}, "nginx") // Expected: no alert + wl.ExecIntoPod([]string{"ls", "-l"}, "server") // Expected: no alert (user profile should suppress alert) + time.Sleep(10 * time.Second) // Wait for potential alerts + + // Verify alert counts + finalAlerts, err := testutils.GetAlerts(wl.Namespace) + require.NoError(t, err, "Failed to get final alerts") + + // Only count new alerts (after the initial count) + newAlertCount := 0 + for _, alert := range finalAlerts { + if ruleName, ok := alert.Labels["rule_name"]; ok && ruleName == "Unexpected process launched" { + newAlertCount++ + } + } + + t.Logf("Alert counts - Initial: %d, Final: %d", initialAlertCount, newAlertCount) + + if newAlertCount > initialAlertCount { + t.Logf("Full alert details:") + for _, alert := range finalAlerts { + if ruleName, ok := alert.Labels["rule_name"]; ok && ruleName == "Unexpected process launched" { + t.Logf("Alert: %+v", alert) + } + } + t.Errorf("New alerts were generated after merge (Initial: %d, Final: %d)", initialAlertCount, newAlertCount) + } + + // PHASE 5: Check PATCH (removing the ls command from the user profile of the server container and triggering an alert) + t.Log("Patching user profile to remove ls command from server container...") + patchOperations := []utils.PatchOperation{ + {Op: "remove", Path: "/spec/containers/1/execs/0"}, + } + + patch, err := json.Marshal(patchOperations) + require.NoError(t, err, "Failed to marshal patch operations") + + _, err = storageClient.ApplicationProfiles(ns.Name).Patch(context.Background(), userProfile.Name, types.JSONPatchType, patch, metav1.PatchOptions{}) + require.NoError(t, err, "Failed to patch user profile") + + // Verify patched profile behavior + time.Sleep(15 * time.Second) // Allow merge to complete + + // Log the profile that was patched + patchedProfile, err := wl.GetApplicationProfile() + require.NoError(t, err, "Failed to get patched profile") + t.Logf("Patched application profile:\n%v", patchedProfile) + + // Test patched profile behavior + wl.ExecIntoPod([]string{"ls", "-l"}, "nginx") // Expected: no alert + wl.ExecIntoPod([]string{"ls", "-l"}, "server") // Expected: alert (ls command removed from user profile) + time.Sleep(10 * time.Second) // Wait for potential alerts + + // Verify alert counts + finalAlerts, err = testutils.GetAlerts(wl.Namespace) + require.NoError(t, err, "Failed to get final alerts") + + // Only count new alerts (after the initial count) + newAlertCount = 0 + for _, alert := range finalAlerts { + if ruleName, ok := alert.Labels["rule_name"]; ok && ruleName == "Unexpected process launched" { + newAlertCount++ + } + } + + t.Logf("Alert counts - Initial: %d, Final: %d", initialAlertCount, newAlertCount) + + if newAlertCount <= initialAlertCount { + t.Logf("Full alert details:") + for _, alert := range finalAlerts { + if ruleName, ok := alert.Labels["rule_name"]; ok && ruleName == "Unexpected process launched" { + t.Logf("Alert: %+v", alert) + } + } + t.Errorf("New alerts were not generated after patch (Initial: %d, Final: %d)", initialAlertCount, newAlertCount) + } +} + +func Test_13_MergingNetworkNeighborhoodTest(t *testing.T) { + start := time.Now() + defer tearDownTest(t, start) + + // PHASE 1: Setup workload and initial network neighborhood + ns := testutils.NewRandomNamespace() + wl, err := testutils.NewTestWorkload(ns.Name, path.Join(utils.CurrentDir(), "resources/deployment-multiple-containers.yaml")) + require.NoError(t, err, "Failed to create workload") + require.NoError(t, wl.WaitForReady(80), "Workload failed to be ready") + require.NoError(t, wl.WaitForNetworkNeighborhood(80, "ready"), "Network neighborhood not ready") + + // Generate initial network data + _, _, err = wl.ExecIntoPod([]string{"wget", "ebpf.io", "-T", "2", "-t", "1"}, "server") + require.NoError(t, err, "Failed to exec wget in server container") + _, _, err = wl.ExecIntoPod([]string{"curl", "kubernetes.io", "-m", "2"}, "nginx") + require.NoError(t, err, "Failed to exec curl in nginx container") + + require.NoError(t, wl.WaitForNetworkNeighborhoodCompletion(80), "Network neighborhood failed to complete") + time.Sleep(10 * time.Second) // Allow network neighborhood processing + + // Log initial network neighborhood state + initialNN, err := wl.GetNetworkNeighborhood() + require.NoError(t, err, "Failed to get initial network neighborhood") + initialNNJSON, _ := json.Marshal(initialNN) + t.Logf("Initial network neighborhood:\n%s", string(initialNNJSON)) + + // PHASE 2: Verify initial alerts + t.Log("Testing initial alert generation...") + _, _, err = wl.ExecIntoPod([]string{"wget", "ebpf.io", "-T", "2", "-t", "1"}, "server") // Expected: no alert (original rule) + _, _, err = wl.ExecIntoPod([]string{"wget", "httpforever.com", "-T", "2", "-t", "1"}, "server") // Expected: alert (not allowed) + _, _, err = wl.ExecIntoPod([]string{"wget", "httpforever.com", "-T", "2", "-t", "1"}, "server") // Expected: alert (not allowed) + _, _, err = wl.ExecIntoPod([]string{"wget", "httpforever.com", "-T", "2", "-t", "1"}, "server") // Expected: alert (not allowed) + _, _, err = wl.ExecIntoPod([]string{"curl", "kubernetes.io", "-m", "2"}, "nginx") // Expected: no alert (original rule) + _, _, err = wl.ExecIntoPod([]string{"curl", "github.com", "-m", "2"}, "nginx") // Expected: alert (not allowed) + time.Sleep(30 * time.Second) // Wait for alert generation + + initialAlerts, err := testutils.GetAlerts(wl.Namespace) + require.NoError(t, err, "Failed to get initial alerts") + + // Record initial alert count + initialAlertCount := 0 + for _, alert := range initialAlerts { + if ruleName, ok := alert.Labels["rule_name"]; ok && ruleName == "Unexpected domain request" && alert.Labels["container_name"] == "server" { + initialAlertCount++ + } + } + + // Verify initial alerts + testutils.AssertContains(t, initialAlerts, "Unexpected domain request", "wget", "server") + testutils.AssertContains(t, initialAlerts, "Unexpected domain request", "curl", "nginx") + + // PHASE 3: Apply user-managed network neighborhood + t.Log("Applying user-managed network neighborhood...") + userNN := &v1beta1.NetworkNeighborhood{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("ug-%s", initialNN.Name), + Namespace: initialNN.Namespace, + Annotations: map[string]string{ + "kubescape.io/managed-by": "User", + }, + }, + Spec: v1beta1.NetworkNeighborhoodSpec{ + LabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "multiple-containers-app", + }, + }, + Containers: []v1beta1.NetworkNeighborhoodContainer{ + { + Name: "nginx", + Egress: []v1beta1.NetworkNeighbor{ + { + Identifier: "nginx-github", + Type: "external", + DNSNames: []string{"github.com."}, + Ports: []v1beta1.NetworkPort{ + { + Name: "TCP-80", + Protocol: "TCP", + Port: ptr(int32(80)), + }, + { + Name: "TCP-443", + Protocol: "TCP", + Port: ptr(int32(443)), + }, + }, + }, + }, + }, + { + Name: "server", + Egress: []v1beta1.NetworkNeighbor{ + { + Identifier: "server-example", + Type: "external", + DNSNames: []string{"info.cern.ch."}, + Ports: []v1beta1.NetworkPort{ + { + Name: "TCP-80", + Protocol: "TCP", + Port: ptr(int32(80)), + }, + { + Name: "TCP-443", + Protocol: "TCP", + Port: ptr(int32(443)), + }, + }, + }, + }, + }, + }, + }, + } + + // Create user-managed network neighborhood + k8sClient := k8sinterface.NewKubernetesApi() + storageClient := spdxv1beta1client.NewForConfigOrDie(k8sClient.K8SConfig) + _, err = storageClient.NetworkNeighborhoods(ns.Name).Create(context.Background(), userNN, metav1.CreateOptions{}) + require.NoError(t, err, "Failed to create user network neighborhood") + + // PHASE 4: Verify merged behavior (no new alerts) + t.Log("Verifying merged network neighborhood behavior...") + time.Sleep(25 * time.Second) // Allow merge to complete + + _, _, err = wl.ExecIntoPod([]string{"wget", "ebpf.io", "-T", "2", "-t", "1"}, "server") // Expected: no alert (original) + // Try multiple times to ensure alert is removed + _, _, err = wl.ExecIntoPod([]string{"wget", "info.cern.ch", "-T", "2", "-t", "1"}, "server") // Expected: no alert (user added) + _, _, err = wl.ExecIntoPod([]string{"wget", "info.cern.ch", "-T", "2", "-t", "1"}, "server") // Expected: no alert (user added) + _, _, err = wl.ExecIntoPod([]string{"wget", "info.cern.ch", "-T", "2", "-t", "1"}, "server") // Expected: no alert (user added) + _, _, err = wl.ExecIntoPod([]string{"wget", "info.cern.ch", "-T", "2", "-t", "1"}, "server") // Expected: no alert (user added) + _, _, err = wl.ExecIntoPod([]string{"curl", "kubernetes.io", "-m", "2"}, "nginx") // Expected: no alert (original) + _, _, err = wl.ExecIntoPod([]string{"curl", "github.com", "-m", "2"}, "nginx") // Expected: no alert (user added) + time.Sleep(30 * time.Second) // Wait for potential alerts + + mergedAlerts, err := testutils.GetAlerts(wl.Namespace) + require.NoError(t, err, "Failed to get alerts after merge") + + // Count new alerts after merge + newAlertCount := 0 + for _, alert := range mergedAlerts { + if ruleName, ok := alert.Labels["rule_name"]; ok && ruleName == "Unexpected domain request" && alert.Labels["container_name"] == "server" { + newAlertCount++ + } + } + + t.Logf("Alert counts - Initial: %d, After merge: %d", initialAlertCount, newAlertCount) + + if newAlertCount > initialAlertCount { + t.Logf("Full alert details:") + for _, alert := range mergedAlerts { + if ruleName, ok := alert.Labels["rule_name"]; ok && ruleName == "Unexpected domain request" && alert.Labels["container_name"] == "server" { + t.Logf("Alert: %+v", alert) + } + } + t.Errorf("New alerts were generated after merge (Initial: %d, After merge: %d)", initialAlertCount, newAlertCount) + } + + // PHASE 5: Remove permission via patch and verify alerts return + t.Log("Patching user network neighborhood to remove info.cern.ch from server container...") + patchOperations := []utils.PatchOperation{ + {Op: "remove", Path: "/spec/containers/1/egress/0"}, + } + + patch, err := json.Marshal(patchOperations) + require.NoError(t, err, "Failed to marshal patch operations") + + _, err = storageClient.NetworkNeighborhoods(ns.Name).Patch(context.Background(), userNN.Name, types.JSONPatchType, patch, metav1.PatchOptions{}) + require.NoError(t, err, "Failed to patch user network neighborhood") + + time.Sleep(20 * time.Second) // Allow merge to complete + + // Test alerts after patch + _, _, err = wl.ExecIntoPod([]string{"wget", "ebpf.io", "-T", "2", "-t", "1"}, "server") // Expected: no alert + // Try multiple times to ensure alert is removed + _, _, err = wl.ExecIntoPod([]string{"wget", "info.cern.ch", "-T", "2", "-t", "1"}, "server") // Expected: alert (removed) + _, _, err = wl.ExecIntoPod([]string{"wget", "info.cern.ch", "-T", "2", "-t", "1"}, "server") // Expected: alert (removed) + _, _, err = wl.ExecIntoPod([]string{"wget", "info.cern.ch", "-T", "2", "-t", "1"}, "server") // Expected: alert (removed) + _, _, err = wl.ExecIntoPod([]string{"wget", "info.cern.ch", "-T", "2", "-t", "1"}, "server") // Expected: alert (removed) + _, _, err = wl.ExecIntoPod([]string{"wget", "info.cern.ch", "-T", "2", "-t", "1"}, "server") // Expected: alert (removed) + _, _, err = wl.ExecIntoPod([]string{"curl", "kubernetes.io", "-m", "2"}, "nginx") // Expected: no alert + _, _, err = wl.ExecIntoPod([]string{"curl", "github.com", "-m", "2"}, "nginx") // Expected: no alert + time.Sleep(30 * time.Second) // Wait for alerts + + finalAlerts, err := testutils.GetAlerts(wl.Namespace) + require.NoError(t, err, "Failed to get final alerts") + + // Count final alerts + finalAlertCount := 0 + for _, alert := range finalAlerts { + if ruleName, ok := alert.Labels["rule_name"]; ok && ruleName == "Unexpected domain request" && alert.Labels["container_name"] == "server" { + finalAlertCount++ + } + } + + t.Logf("Alert counts - Initial: %d, Final: %d", initialAlertCount, finalAlertCount) + + if finalAlertCount <= initialAlertCount { + t.Logf("Full alert details:") + for _, alert := range finalAlerts { + if ruleName, ok := alert.Labels["rule_name"]; ok && ruleName == "Unexpected domain request" && alert.Labels["container_name"] == "server" { + t.Logf("Alert: %+v", alert) + } + } + t.Errorf("New alerts were not generated after patch (Initial: %d, Final: %d)", initialAlertCount, finalAlertCount) + } +} + +func ptr(i int32) *int32 { + return &i +} diff --git a/tests/resources/endpoint-traffic.yaml b/tests/resources/endpoint-traffic.yaml new file mode 100644 index 00000000..6897faf4 --- /dev/null +++ b/tests/resources/endpoint-traffic.yaml @@ -0,0 +1,56 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: endpoint-traffic + name: endpoint-traffic-deployment +spec: + selector: + matchLabels: + app: endpoint-traffic + replicas: 1 + template: + metadata: + labels: + app: endpoint-traffic + spec: + containers: + - name: endpoint-traffic + image: armoafekb/afek-b-tests:ptrace_test + imagePullPolicy: Always + # Create a volume mount for the script + volumeMounts: + - name: server-script + mountPath: /app + command: ["/bin/sh"] + args: ["-c", "echo '$(SERVER_SCRIPT)' > /app/server.py && python3 /app/server.py"] + ports: + - containerPort: 80 + env: + - name: SERVER_SCRIPT + value: | + from http.server import HTTPServer, BaseHTTPRequestHandler + class SimpleHTTPRequestHandler(BaseHTTPRequestHandler): + def do_GET(self): + self.send_response(200) + self.send_header("Content-type", "text/plain") + self.end_headers() + self.wfile.write(b"GET request received successfully") + def do_POST(self): + content_length = int(self.headers["Content-Length"]) + post_data = self.rfile.read(content_length) + print(f"Received POST data: {post_data.decode()}") + self.send_response(200) + self.send_header("Content-type", "text/plain") + self.end_headers() + self.wfile.write(b"POST request received successfully") + def run_server(port=80): + server_address = ("", port) + httpd = HTTPServer(server_address, SimpleHTTPRequestHandler) + print(f"Server running on port {port}") + httpd.serve_forever() + if __name__ == "__main__": + run_server() + volumes: + - name: server-script + emptyDir: {} \ No newline at end of file diff --git a/tests/resources/user-profile.yaml b/tests/resources/user-profile.yaml new file mode 100644 index 00000000..97a116f6 --- /dev/null +++ b/tests/resources/user-profile.yaml @@ -0,0 +1,47 @@ +apiVersion: spdx.softwarecomposition.kubescape.io/v1beta1 +kind: ApplicationProfile +metadata: + name: {name} + namespace: {namespace} + resourceVersion: "1" # Start with "1" for new resources + annotations: + kubescape.io/managed-by: User +spec: + architectures: ["amd64"] + containers: + - name: nginx + imageID: "" + imageTag: "" + capabilities: [] + opens: [] + syscalls: [] + endpoints: [] + execs: + - path: /usr/bin/ls + args: + - /usr/bin/ls + - -l + seccompProfile: + spec: + defaultAction: "" + - name: server + imageID: "" + imageTag: "" + capabilities: [] + opens: [] + syscalls: [] + endpoints: [] + execs: + - path: /bin/ls + args: + - /bin/ls + - -l + - path: /bin/grpc_health_probe + args: + - "-addr=:9555" + seccompProfile: + spec: + defaultAction: "" + initContainers: [] + ephemeralContainers: [] +status: {} \ No newline at end of file