From 80cc0dd5a6159f8ffd66478b6e8fa9b498113487 Mon Sep 17 00:00:00 2001 From: ralongit Date: Thu, 21 Sep 2023 18:12:09 +0300 Subject: [PATCH 1/2] Refactor code & Add tests - DRY for workloads and resources related services. - Created tests - Modify go routines - Added comments --- Dockerfile | 1 - common/client.go | 35 +- common/client_test.go | 44 +++ common/logger.go | 88 +++-- common/logger_test.go | 76 ++++ common/parser.go | 128 ++++--- common/parser_test.go | 224 ++++++++++++ go.mod | 5 + main.go | 14 +- main_test.go | 32 ++ mockLogzioListener/listener.go | 292 ++++++++++++++++ mockLogzioListener/logsLister.go | 26 ++ mockLogzioListener/persistantFlagger.go | 58 ++++ resources/clusterResources.go | 83 +++-- resources/clusterResources_test.go | 280 +++++++++++++++ resources/resourceInformer.go | 284 +++++++++------ resources/resourceInformer_test.go | 84 +++++ resources/resourceServices.go | 441 +++++------------------- resources/resourceServices_test.go | 167 +++++++++ resources/workloadServices.go | 295 +++------------- resources/workloadServices_test.go | 109 ++++++ 21 files changed, 1953 insertions(+), 813 deletions(-) create mode 100644 common/client_test.go create mode 100644 common/logger_test.go create mode 100644 common/parser_test.go create mode 100644 main_test.go create mode 100644 mockLogzioListener/listener.go create mode 100644 mockLogzioListener/logsLister.go create mode 100644 mockLogzioListener/persistantFlagger.go create mode 100644 resources/clusterResources_test.go create mode 100644 resources/resourceInformer_test.go create mode 100644 resources/resourceServices_test.go create mode 100644 resources/workloadServices_test.go diff --git a/Dockerfile b/Dockerfile index fffc853..a86532f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,5 +9,4 @@ RUN go build -o main . FROM alpine:3.14 COPY --from=build /app/main /app/main - CMD ["/app/main"] \ No newline at end of file diff --git a/common/client.go b/common/client.go index 6267694..c0ca4b0 100644 --- a/common/client.go +++ b/common/client.go @@ -2,24 +2,28 @@ package common import ( "fmt" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/dynamic" // Importing the dynamic client package + "k8s.io/client-go/kubernetes" // Importing the kubernetes client package + "k8s.io/client-go/rest" // Importing the rest client package + "k8s.io/client-go/tools/clientcmd" // Importing the clientcmd package for building config from kubeconfig "log" - "os" + "os" // Importing the os package for reading environment variables ) -var K8sClient *kubernetes.Clientset -var DynamicClient *dynamic.DynamicClient +var K8sClient *kubernetes.Clientset // Global variable for the Kubernetes client +var DynamicClient *dynamic.DynamicClient // Global variable for the dynamic client func CreateClusterClient() { - // Create a Kubernetes client. + // This function creates a Kubernetes client using in-cluster configuration + + // Getting the in-cluster configuration config, err := rest.InClusterConfig() if err != nil { fmt.Println(err) return } + + // Creating the Kubernetes client using the in-cluster configuration K8sClient, err = kubernetes.NewForConfig(config) if err != nil { fmt.Println(err) @@ -28,21 +32,34 @@ func CreateClusterClient() { } func ConfigureClusterDynamicClient() (clusterClient *dynamic.DynamicClient) { - // + // This function configures a dynamic client for the Kubernetes cluster + // by either using the KUBECONFIG environment variable or falling back to in-cluster configuration + var err error var clusterConfig *rest.Config + + // Reading the KUBECONFIG environment variable kubeConfig := os.Getenv("KUBECONFIG") if kubeConfig != "" { + // If KUBECONFIG is set, build the configuration from KUBECONFIG clusterConfig, err = clientcmd.BuildConfigFromFlags("", kubeConfig) } else { + // If KUBECONFIG is not set, get the in-cluster configuration clusterConfig, err = rest.InClusterConfig() } + + // If there is an error in getting the configuration, log the error and exit if err != nil { log.Fatalln(err) } + + // Creating the dynamic client using the cluster configuration clusterClient, err = dynamic.NewForConfig(clusterConfig) + + // If there is an error in creating the dynamic client, log the error and exit if err != nil { log.Fatalln(err) } + return clusterClient } diff --git a/common/client_test.go b/common/client_test.go new file mode 100644 index 0000000..a880f2a --- /dev/null +++ b/common/client_test.go @@ -0,0 +1,44 @@ +package common + +import ( + "k8s.io/apimachinery/pkg/runtime" + fakeDynamic "k8s.io/client-go/dynamic/fake" + "k8s.io/client-go/kubernetes/fake" + "testing" +) + +func CreateFakeClient() (mockClient *fake.Clientset) { + + mockClient = fake.NewSimpleClientset() + + return mockClient +} +func CreateDynamicFakeClient() (mockDynamicClient *fakeDynamic.FakeDynamicClient) { + scheme := runtime.NewScheme() + + mockDynamicClient = fakeDynamic.NewSimpleDynamicClient(scheme) + + return mockDynamicClient +} + +// TestFakeClient demonstrates how to use a fake client with SharedInformerFactory in tests. +func TestFakeDynamicClient(t *testing.T) { + // Create the fake client. + fakeDynamicClient := CreateDynamicFakeClient() + if fakeDynamicClient == nil { + t.Error("Failed to create fake dynamic client") + } else { + t.Log("Created fake dynamic client") + } + +} +func TestFakeClusterClient(t *testing.T) { + // Create the fake client. + fakeK8sClient := CreateFakeClient() + + if fakeK8sClient == nil { + t.Error("Failed to create fake client") + } else { + t.Log("Created fake client") + } +} diff --git a/common/logger.go b/common/logger.go index 67ada9e..89dc5a3 100644 --- a/common/logger.go +++ b/common/logger.go @@ -3,23 +3,28 @@ package common import ( "encoding/json" "fmt" - "github.com/logzio/logzio-go" + "github.com/logzio/logzio-go" // Importing logz.io library for logging "log" "os" + "sync" "time" ) -var LogzioLogger *logzio.LogzioSender +var LogzioLogger *logzio.LogzioSender // Global variable for logz.io logger +var wg sync.WaitGroup // Global variable for wait group -func ConfigureLogzioLogger() (LogzioLogger *logzio.LogzioSender) { - // Creates a resources using Logz.io output configuration: https://app.logz.io/#/dashboard/send-your-data/log-sources/go +func ConfigureLogzioLogger() { + // Function to configure logz.io logger var err error - LogzioToken := os.Getenv("LOGZIO_TOKEN") // Log shipping token for Logz.io + + // Reading logz.io token from environment variables + LogzioToken := os.Getenv("LOGZIO_TOKEN") if LogzioToken != "" { LogzioListener := os.Getenv("LOGZIO_LISTENER") if LogzioListener == "" { LogzioListener = "https://listener.logz.io:8071" // Defaults to us-east-1 region } + // Creating a new logz.io logger with specified configuration LogzioLogger, err = logzio.New( LogzioToken, logzio.SetDebug(os.Stderr), @@ -29,58 +34,99 @@ func ConfigureLogzioLogger() (LogzioLogger *logzio.LogzioSender) { logzio.SetDrainDiskThreshold(99), ) if err != nil { + // If there is an error in creating the logger, log the error and exit log.Fatalf("\n[FATAL] Failed to configure the Logz.io resources.\nERROR: %v\n", err) } } else { - log.Fatalf("\n[FATAL] Invalid token configured for LOGZIO_TOKEN environemt variable.\n") + // If LOGZIO_TOKEN is not set, log error and exit + log.Fatalf("\n[FATAL] Invalid token configured for LOGZIO_TOKEN environment variable.\n") } - return LogzioLogger } -func shipLogMessage(message string) { +func shipLogEvent(eventLog string) { + // Function to ship log event to logz.io - log.Printf("\n[LOG]: %s\n", message) - err := LogzioLogger.Send([]byte(message)) + // Logging the event + log.Printf("\n[LOG]: %s\n", eventLog) + err := LogzioLogger.Send([]byte(eventLog)) // Sending the log event to logz.io if err != nil { - log.Printf("\nFailed to send log:\n%v to Logz.io.\nRelated error:\n%v.", message, err) + // If there is an error in sending the log, log the error + log.Printf("\nFailed to send log:\n%v to Logz.io.\nRelated error:\n%v.", eventLog, err) return } - LogzioLogger.Drain() + LogzioLogger.Drain() // Draining the logger + defer wg.Done() // Signaling that this function is done } -func SendLog(msg string, extraFields ...interface{}) { +func ParseEventLog(msg string, extraFields ...interface{}) (eventLog string) { + // This function parses an event log message and any extra fields, + // converting them into a JSON string. + var err error var parsedEventLog []byte var logMap map[string]interface{} + + // Reading environment variables environmentID := os.Getenv("ENV_ID") logType := os.Getenv("LOG_TYPE") + if logType == "" { - logType = "logzio-informer-events" + logType = "logzio-k8s-events" // Default log type } + + // Creating a new log event with the provided message, type and environment ID logEvent := LogEvent{Message: msg, Type: logType, EnvironmentID: environmentID} if len(extraFields) > 0 { + // If there are extra fields, convert them to a JSON string and unmarshal into logEvent extra := fmt.Sprintf("%s", extraFields...) - log.Printf("\n[DEBUG] Attemping to parse log extra data(%T): %s\tlog(%T):\n%v to Logz.io.\n", extra, extra, logEvent, logEvent) - - if err := json.Unmarshal([]byte(extra), &logEvent); err != nil && extra != "" { + if err = json.Unmarshal([]byte(extra), &logEvent); err != nil && extra != "" { + // If there is an error in parsing the extra fields, log the error log.Printf("\n[ERROR] Failed to parse log extra data(%T): %s\tlog(%T):\n%v to Logz.io.\nRelated error:\n%v", extra, logEvent, extra, logEvent, err) } } + + // Marshal the log event into a byte slice and unmarshal into logMap logByte, _ := json.Marshal(&logEvent) json.Unmarshal(logByte, &logMap) + + // Parse the log map to fit logz.io limits parsedLogMap := parseLogzioLimits(logMap) + + // Marshal the parsed log map into a byte slice parsedEventLog, err = json.Marshal(parsedLogMap) if err != nil { + // If there is an error in marshaling, log the error log.Printf("\n[ERROR] Failed to parse event log:\n%v\nERROR:\n%v", logEvent, err) } - message := fmt.Sprintf("%s", string(parsedEventLog)) - if message == "" { - log.Printf("\n[DEBUG]: Empty message, not sending to Logz.io.\n") + // Convert the parsed event log byte slice to a string + eventLog = fmt.Sprintf("%s", string(parsedEventLog)) + + return eventLog +} + +func SendLog(msg string, extraFields ...interface{}) { + // This function sends a log message and any extra fields to logz.io. + + if LogzioLogger != nil { + // Parse the log message and extra fields into a JSON string + eventLog := ParseEventLog(msg, extraFields) + + if eventLog == "" { + // If the parsed event log is empty, drop the log + } else { + // Ship the parsed event log to logz.io in a separate goroutine + go shipLogEvent(eventLog) + + // Increment the wait group counter and wait for all goroutines to finish + wg.Add(1) + wg.Wait() + } } else { - go shipLogMessage(message) + // If the logz.io logger is not configured, log a message and do not send the log + log.Printf("Logz.io logger isn't configured.\nLog won't be sent:\n%s", msg) } } diff --git a/common/logger_test.go b/common/logger_test.go new file mode 100644 index 0000000..a40640c --- /dev/null +++ b/common/logger_test.go @@ -0,0 +1,76 @@ +package common + +import ( + "github.com/logzio/logzio-go" + "main.go/mockLogzioListener" + "testing" + + "os" + "time" +) + +func TestStartMockLogzioListener(t *testing.T) { + mockLogzioListener.StartMockLogzioListener() + mockListener := mockLogzioListener.MockListener + if mockListener != nil { + mockListenerURL := mockLogzioListener.GetMockListenerURL() + logsCount := mockListener.NumberOfLogs() + if mockListenerURL != "" { + err := os.Setenv("LOGZIO_TOKEN", "test-shipping-token") + if err != nil { + return + } + err = os.Setenv("LOGZIO_LISTENER", mockListenerURL) + if err != nil { + return + } + + if logsCount > 0 { + t.Log("Successfully sent logs.") + } + } + } else { + t.Error("Failed to start mock listener") + } + +} + +func TestConfigureLogzioLogger(t *testing.T) { + // Creates a resources using Logz.io output configuration: https://app.logz.io/#/dashboard/send-your-data/log-sources/go + var err error + LogzioToken := os.Getenv("LOGZIO_TOKEN") // Log shipping token for Logz.io + if LogzioToken != "" { + LogzioListener := os.Getenv("LOGZIO_LISTENER") + if LogzioListener == "" { + LogzioListener = "https://listener.logz.io:8071" // Defaults to us-east-1 region + } + LogzioLogger, err = logzio.New( + LogzioToken, + logzio.SetDebug(os.Stderr), + logzio.SetUrl(LogzioListener), + logzio.SetDrainDuration(time.Second*5), + logzio.SetTempDirectory("myQueue"), + logzio.SetDrainDiskThreshold(99), + ) + if err != nil { + t.Errorf("\n[FATAL] Failed to configure the Logz.io logger.\nERROR: %v\n", err) + } else { + t.Log("Successfully configured the Logz.io logger.\n") + } + } else { + t.Error("\n[FATAL] Invalid token configured for LOGZIO_TOKEN environment variable.\n") + } + +} +func TestSendLog(t *testing.T) { + + t.Run("SendLog", func(t *testing.T) { + os.Setenv("ENV_ID", "dev") + os.Setenv("LOG_TYPE", "logzio-k8s-events-test") + logsListInstance := mockLogzioListener.GetLogsListInstance() + allLogs := logsListInstance.List + for _, testLog := range allLogs { + SendLog("Test log", testLog) + } + }) +} diff --git a/common/parser.go b/common/parser.go index 030101d..12d779e 100644 --- a/common/parser.go +++ b/common/parser.go @@ -1,17 +1,20 @@ package common import ( - "crypto/md5" - "encoding/json" - "fmt" - "k8s.io/utils/strings/slices" - "log" - "reflect" - "strings" + "crypto/md5" // For hashing sensitive data + "encoding/json" // For marshalling and unmarshalling JSON + "fmt" // For formatting strings + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" // For handling unstructured data + "k8s.io/utils/strings/slices" // For string slicing + "log" // For logging errors + "reflect" // For handling reflection + "strings" // For string operations ) +// Struct types for various Kubernetes event and metadata var eventKind string +// Struct types for various Kubernetes event and metadata type KubernetesMetadata struct { Name string `json:"name,omitempty"` Namespace string `json:"namespace,omitempty"` @@ -20,18 +23,21 @@ type KubernetesMetadata struct { type KubernetesEvent struct { Kind string `json:"kind,omitempty"` KubernetesMetadata `json:"metadata,omitempty"` + ResourceObjects } -type EventStruct struct { - EventType string `json:"eventType,omitempty"` +type ResourceObjects struct { NewObject map[string]interface{} `json:"newObject,omitempty"` OldObject map[string]interface{} `json:"oldObject,omitempty"` } +type EventStruct struct { + EventType string `json:"eventType,omitempty"` + KubernetesEvent +} type LogEvent struct { Message string `json:"message,omitempty"` EventStruct `json:",omitempty"` - Type string `json:"type,omitempty"` - EnvironmentID string `json:"env_id,omitempty"` - Log map[string]interface{} `json:"log,omitempty"` + Type string `json:"type,omitempty"` + EnvironmentID string `json:"env_id,omitempty"` RelatedClusterServices `json:"relatedClusterServices,omitempty"` } @@ -47,7 +53,8 @@ type RelatedClusterServices struct { ClusterRoleBindings []string `json:"clusterrolebindings,omitempty"` } -func isValidList(arrayFieldI []interface{}) (listField []interface{}, isValidArray bool) { +// IsValidList Function to check if an array is valid +func IsValidList(arrayFieldI []interface{}) (listField []interface{}, isValidArray bool) { // Logz.io doesn't support nested array objects well as they contain different data types for _, v := range arrayFieldI { _, isMap := v.(map[string]interface{}) @@ -58,7 +65,9 @@ func isValidList(arrayFieldI []interface{}) (listField []interface{}, isValidArr return arrayFieldI, isValidArray } +// ParseEventMessage Function to parse event messages func ParseEventMessage(eventType string, resourceName string, resourceKind string, resourceNamespace string, newResourceVersion string, oldResourceVersions ...string) (msg string) { + if eventType == "MODIFIED" { if len(oldResourceVersions) > 0 { oldResourceVersion := oldResourceVersions[0] @@ -75,28 +84,31 @@ func ParseEventMessage(eventType string, resourceName string, resourceKind strin return msg } -func formatFieldName(field string) (fieldName string) { +// FormatFieldName Function to format field name +func FormatFieldName(field string) (fieldName string) { fieldName = field // Check if the field contains a dot/slash/hyphen and replace it with underscore if strings.ContainsAny(field, "/.-") { - fieldName = strings.Replace(field, ".", "_", -1) - fieldName = strings.Replace(fieldName, "/", "_", -1) - fieldName = strings.Replace(fieldName, "-", "_", -1) + fieldName = strings.ReplaceAll(fieldName, ".", "_") + fieldName = strings.ReplaceAll(fieldName, "/", "_") + fieldName = strings.ReplaceAll(fieldName, "-", "_") } return fieldName } -func formatFieldValue(value interface{}) (fieldValue interface{}) { + +// FormatFieldValue Function to format field value +func FormatFieldValue(value interface{}) (fieldValue interface{}) { fieldValue = value // Check if the field value is an array and parse it to a string arrayFieldI, ok := value.([]interface{}) if ok { - _, isValidArray := isValidList(arrayFieldI) + _, isValidArray := IsValidList(arrayFieldI) if !isValidArray { arrayNestedField, err := json.Marshal(arrayFieldI) if err != nil { - log.Printf("\n[ERROR] Failed to parse array nested field. %s\nERROR:\n%v", err) + log.Printf("\n[ERROR] Failed to parse array nested field: %s\nERROR:\n%v", arrayNestedField, err) } // Flatten the array nested field fieldValue = string(arrayNestedField) @@ -106,7 +118,9 @@ func formatFieldValue(value interface{}) (fieldValue interface{}) { return fieldValue } -func formatFieldOverLimit(fieldName string, fieldValue interface{}) (fieldOverLimit string, truncatedFieldValue interface{}) { + +// FormatFieldOverLimit Function to format field over limit +func FormatFieldOverLimit(fieldName string, fieldValue interface{}) (fieldOverLimit string, truncatedFieldValue interface{}) { fieldOverLimit = fieldName truncatedFieldValue = fieldValue var valueLengthLimit = 32700 @@ -122,26 +136,47 @@ func formatFieldOverLimit(fieldName string, fieldValue interface{}) (fieldOverLi return fieldOverLimit, truncatedFieldValue } + +// IsEmptyMap Function to check if the map is empty +func IsEmptyMap(value interface{}) bool { + isEmpty := false + v := reflect.ValueOf(value) + if v.Kind() == reflect.Map && v.Len() == 0 { + isEmpty = true + } + return isEmpty +} + +// Function to parse logz.io limits func parseLogzioLimits(eventLog map[string]interface{}) (parsedLogEvent map[string]interface{}) { // Declare variables // Iterate over the log parsedLogEvent = eventLog - eventI := eventLog["newObject"] - if eventI != nil { - eventKind = eventI.(map[string]interface{})["kind"].(string) + + if eventLog["newObject"] != nil { + eventI := eventLog["newObject"].(map[string]interface{}) + if eventI["kind"] != nil { + eventKind = eventI["kind"].(string) + } } for field, value := range eventLog { - // Check if the field contains a dot - fieldName := formatFieldName(field) + // Check if the field contains a dot/slash/hyphen and replace it with underscore + // Check if the field is empty + + if !reflect.ValueOf(value).IsValid() || value == nil || IsEmptyMap(value) { + // Remove the empty or invalid/nil/struct{} field from the log + delete(parsedLogEvent, field) + } + fieldName := FormatFieldName(field) if fieldName != field { // Rename the field parsedLogEvent[fieldName] = value // Remove the original field delete(parsedLogEvent, field) } - maskedField, maskedValue := maskSensitiveData(eventKind, fieldName, value) + maskedField, maskedValue := MaskSensitiveData(eventKind, fieldName, value) if !reflect.DeepEqual(value, maskedValue) { parsedLogEvent[maskedField] = maskedValue delete(parsedLogEvent, fieldName) @@ -153,20 +188,15 @@ func parseLogzioLimits(eventLog map[string]interface{}) (parsedLogEvent map[stri if ok { parseLogzioLimits(nestedField) } else { + { - // Check if the field is empty - if !reflect.ValueOf(value).IsValid() || value == nil || value == struct{}{} { - // Remove the empty or invalid/nil/struct{} field from the log - delete(parsedLogEvent, fieldName) - } else { - - fieldValue := formatFieldValue(value) + fieldValue := FormatFieldValue(value) if !reflect.DeepEqual(value, fieldValue) { // Add the field value to the parsed log parsedLogEvent[fieldName] = fieldValue } - fieldOverLimit, truncatedFieldValue := formatFieldOverLimit(fieldName, fieldValue) + fieldOverLimit, truncatedFieldValue := FormatFieldOverLimit(fieldName, fieldValue) if fieldOverLimit != fieldName { parsedLogEvent[fieldOverLimit] = truncatedFieldValue delete(parsedLogEvent, fieldName) @@ -180,6 +210,15 @@ func parseLogzioLimits(eventLog map[string]interface{}) (parsedLogEvent map[stri return parsedLogEvent } + +// NewUnstructured Function to create new unstructured data +func NewUnstructured(rawObj map[string]interface{}) *unstructured.Unstructured { + return &unstructured.Unstructured{ + Object: rawObj, + } +} + +// Function to hash data func hashData(data interface{}) (hashedData string) { // Create a new MD5 hash object hash := md5.New() @@ -195,16 +234,23 @@ func hashData(data interface{}) (hashedData string) { return hashedData } -func maskSensitiveData(eventKind string, fieldName string, fieldValue interface{}) (maskedField string, maskedValue interface{}) { - maskedValue = fieldValue - maskedField = fieldName + +// MaskSensitiveData Function to mask sensitive data +func MaskSensitiveData(eventKind string, fieldName string, fieldValue interface{}) (maskedField string, maskedValue interface{}) { + maskedValue = fieldValue // Initialize maskedValue to original fieldValue + maskedField = fieldName // Initialize maskedField to original fieldName + + // Array of field names to consider sensitive fieldsToMask := []string{"password", "secret", "token", "key", "access_token", "api_key", "api_secret", "api_token", "api_key_id", "api_secret_id", "api_token_id", "api_key_secret", "api_secret_key", "api_token_secret"} + + // Check if the field name is in the list of fields to mask, or has "_crt" in it, or is a secret data or last applied configuration if slices.Contains(fieldsToMask, fieldName) || strings.Contains(fieldName, "_crt") || (eventKind == "Secret" && (fieldName == "data" || fieldName == "kubectl_kubernetes_io_last_applied_configuration")) { - // Mask the field from the log + // If the field is sensitive, mask the field value by hashing it stringValue := fmt.Sprintf("%v", fieldValue) maskedValue = hashData(stringValue) - maskedField = fmt.Sprintf("%s_hashed", fieldName) - + maskedField = fmt.Sprintf("%s_hashed", fieldName) // Append "_hashed" to the field name } + + // Return the masked field name and value return maskedField, maskedValue } diff --git a/common/parser_test.go b/common/parser_test.go new file mode 100644 index 0000000..4db261a --- /dev/null +++ b/common/parser_test.go @@ -0,0 +1,224 @@ +package common + +import ( + "encoding/json" + "log" + "reflect" + "testing" +) + +func GetTestEventLog() (eventLog map[string]interface{}) { + eventLogData := []byte(`{ + "de.dot": "testz", + "pokemon": null, + "relatedClusterServices": { + "statefulsets": [ + "prometheus-kube-prometheus-stack-prometheus-statefulset" + ], + "pods": [ + "prometheus-kube-prometheus-stack-prometheus-pod-0" + ], + "daemonsets": [ + "prometheus-kube-prometheus-stack-prometheus-daemonset" + ], + "deployments": [ + "prometheus-kube-prometheus-stack-prometheus-deployment" + ] + }, + "eventType": "MODIFIED", + "message": "[EVENT] Resource: prometheus-kube-prometheus-stack-prometheus of kind: Secret in namespace: monitoring was updated from version: 26515170 to new version: 27160250.\n", + "type": "logzio-k8s-events", + "env_id": "logzio-staging", + "newObject": { + "data": "deff2df0cb0dac69be67c39d9e769e0f", + "metadata": { + "uid": "c3c91497-6570-48f0-a5df-ea42ad36442b", + "managedFields": [ + { + "apiVersion": "v1", + "fieldsType": "FieldsV1", + "fieldsV1": { + "f:data": { + ".": {}, + "f:prometheus.yaml.gz": {} + }, + "f:metadata": { + "f:annotations": { + ".": {}, + "f:generated": {} + }, + "f:labels": { + ".": {}, + "f:managed-by": {} + }, + "f:ownerReferences": { + ".": {}, + "k:{\"uid\":\"90d247d0-b3e4-43e2-a2ec-bc0f4d7aad82\"}": {} + } + }, + "f:type": {} + }, + "manager": "PrometheusOperator", + "operation": "Update", + "time": "2023-09-20T10:27:04Z" + } + ], + "resourceVersion": "27160250", + "creationTimestamp": "2023-07-31T12:35:01Z", + "name": "prometheus-kube-prometheus-stack-prometheus", + "namespace": "monitoring", + "annotations": { + "generated": "true" + }, + "labels": { + "managed_by": "prometheus-operator" + }, + "ownerReferences": [ + { + "apiVersion": "monitoring.coreos.com/v1", + "blockOwnerDeletion": true, + "controller": true, + "kind": "Prometheus", + "name": "kube-prometheus-stack-prometheus", + "uid": "90d247d0-b3e4-43e2-a2ec-bc0f4d7aad82" + } + ] + }, + "apiVersion": "v1", + "kind": "Secret", + "type": "Opaque" + }, + "oldObject": { + "data": "290f52ba4cf36fb2692f7715d929e02c", + "metadata": { + "uid": "c3c91497-6570-48f0-a5df-ea42ad36442b", + "managedFields": [ + { + "apiVersion": "v1", + "fieldsType": "FieldsV1", + "fieldsV1": { + "f:data": { + ".": {}, + "f:prometheus.yaml.gz": {} + }, + "f:metadata": { + "f:annotations": { + ".": {}, + "f:generated": {} + }, + "f:labels": { + ".": {}, + "f:managed-by": {} + }, + "f:ownerReferences": { + ".": {}, + "k:{\"uid\":\"90d247d0-b3e4-43e2-a2ec-bc0f4d7aad82\"}": {} + } + }, + "f:type": {} + }, + "manager": "PrometheusOperator", + "operation": "Update", + "time": "2023-09-19T11:57:26Z" + } + ] + }, + "resourceVersion": "26515170", + "creationTimestamp": "2023-07-31T12:35:01Z", + "name": "prometheus-kube-prometheus-stack-prometheus", + "namespace": "monitoring", + "annotations": { + "generated": "true" + }, + "labels": { + "managed_by": "prometheus-operator" + }, + "ownerReferences": [ + { + "apiVersion": "monitoring.coreos.com/v1", + "blockOwnerDeletion": true, + "controller": true, + "kind": "Prometheus", + "name": "kube-prometheus-stack-prometheus", + "uid": "90d247d0-b3e4-43e2-a2ec-bc0f4d7aad82" + } + ], + "apiVersion": "v1", + "kind": "Secret", + "type": "Opaque" + } +}`) + err := json.Unmarshal(eventLogData, &eventLog) + if err != nil { + log.Printf("Error unmarshalling event log: %v", err) + return nil + } + return eventLog +} +func TestParseLogzioLimits(t *testing.T) { + eventLog := GetTestEventLog() + parsedLogEvent := eventLog + + t.Run("testNewObjectKind", func(t *testing.T) { + if eventLog["newObject"] != nil { + eventI := eventLog["newObject"].(map[string]interface{}) + if eventI["kind"] != nil { + eventKind = eventI["kind"].(string) + } + } + }) + + for field, value := range eventLog { + var fieldName string + var fieldValue interface{} + t.Run("testRemoveEmptyField", func(t *testing.T) { + if !reflect.ValueOf(value).IsValid() || value == nil || IsEmptyMap(value) { + delete(parsedLogEvent, field) + t.Logf("Successfully removed empty/invalid field %s from the log.", field) + } + }) + + t.Run("testRenameField", func(t *testing.T) { + fieldName = FormatFieldName(field) + if fieldName != field { + parsedLogEvent[fieldName] = value + delete(parsedLogEvent, field) + t.Logf("Successfully renamed badly named field: %s to: %s in the log.", field, fieldName) + } + }) + + t.Run("testMaskSensitiveData", func(t *testing.T) { + maskedField, maskedValue := MaskSensitiveData(eventKind, fieldName, value) + if !reflect.DeepEqual(value, maskedValue) { + parsedLogEvent[maskedField] = maskedValue + delete(parsedLogEvent, fieldName) + t.Logf("Successfully masked field: %s in the log.", field) + } + }) + + t.Run("testNestedMapOrStruct", func(t *testing.T) { + nestedField, ok := value.(map[string]interface{}) + if ok { + t.Logf("The field: %s is a nested map/struct, so parsing its limits.", field) + parseLogzioLimits(nestedField) + } + }) + + t.Run("testFormatFieldValue", func(t *testing.T) { + fieldValue = FormatFieldValue(value) + if !reflect.DeepEqual(value, fieldValue) { + parsedLogEvent[fieldName] = fieldValue + t.Logf("Successfully formatted field: %s with value: %s to the parsed log.", fieldName, fieldValue) + } + }) + + t.Run("testFormatFieldOverLimit", func(t *testing.T) { + fieldOverLimit, truncatedFieldValue := FormatFieldOverLimit(fieldName, fieldValue) + if fieldOverLimit != fieldName { + parsedLogEvent[fieldOverLimit] = truncatedFieldValue + delete(parsedLogEvent, fieldName) + t.Logf("Successfully truncated field: %s with value: %s to the parsed log.", fieldOverLimit, truncatedFieldValue) + } + }) + } +} diff --git a/go.mod b/go.mod index 5290ae8..14cd489 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.19 require ( github.com/logzio/logzio-go v1.0.6 + github.com/stretchr/testify v1.8.1 k8s.io/api v0.27.4 k8s.io/apimachinery v0.27.4 k8s.io/client-go v0.27.4 @@ -14,6 +15,7 @@ require ( github.com/beeker1121/goque v2.1.0+incompatible // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect @@ -36,9 +38,12 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/onsi/ginkgo/v2 v2.9.5 // indirect github.com/onsi/gomega v1.27.7 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/shirou/gopsutil/v3 v3.22.3 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/objx v0.5.0 // indirect github.com/syndtr/goleveldb v1.0.0 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect go.uber.org/atomic v1.9.0 // indirect diff --git a/main.go b/main.go index e9093ea..0e14150 100644 --- a/main.go +++ b/main.go @@ -1,19 +1,23 @@ package main import ( - "main.go/common" - "main.go/resources" + "main.go/common" // Importing common package for application wide functions + "main.go/resources" // Importing resources package for handling resources ) func main() { - common.LogzioLogger = common.ConfigureLogzioLogger() - // + common.ConfigureLogzioLogger() // Configure logz.io logger + + // Sending a log message indicating the start of K8S Events Logz.io Integration common.SendLog("Starting K8S Events Logz.io Integration.") + + // Configuring dynamic client for kubernetes cluster common.DynamicClient = common.ConfigureClusterDynamicClient() if common.DynamicClient != nil { + // Adding event handlers if dynamic client is configured successfully resources.AddEventHandlers() } - common.LogzioLogger.Stop() + common.LogzioLogger.Stop() // Stopping the logz.io logger after the application finishes } diff --git a/main_test.go b/main_test.go new file mode 100644 index 0000000..b49b04c --- /dev/null +++ b/main_test.go @@ -0,0 +1,32 @@ +package main + +import ( + "log" + "main.go/common" + "main.go/mockLogzioListener" + "testing" +) + +func TestDeployEvents(t *testing.T) { + isListening := mockLogzioListener.SetupMockListener() + log.Printf("Setup mock listener.") + + if isListening { + log.Printf("Attempting configuring K8S Events Logz.io logger.") + common.ConfigureLogzioLogger() + + if common.LogzioLogger != nil { + common.SendLog("Started K8S Events Logz.io Integration.") + } + + } + + //// + // + //common.DynamicClient = common.ConfigureClusterDynamicClient() + //if common.DynamicClient != nil { + // resources.AddEventHandlers() + //} + // + //common.LogzioLogger.Stop() +} diff --git a/mockLogzioListener/listener.go b/mockLogzioListener/listener.go new file mode 100644 index 0000000..f377d41 --- /dev/null +++ b/mockLogzioListener/listener.go @@ -0,0 +1,292 @@ +package mockLogzioListener + +import ( + "encoding/json" + "fmt" + "io" + "log" + "net" + "net/http" + "os" + "strings" + "sync" +) + +// ListenerHandler is an empty struct that will implement the http.Handler interface +type ListenerHandler struct{} + +// MockLogzioListener is a struct that represents a mock Logz.io listener for testing +type MockLogzioListener struct { + Port int + Host string + LogsList []string + PersistentFlags *PersistentFlags + server *http.Server + listeningThread *Thread +} + +// Global variables for the application +var logsList LogsList +var persistentFlags *PersistentFlags +var mutex sync.Mutex // Mutex for synchronizing access to shared variables +var serverError bool // Flag to simulate server errors +var MockListener *MockLogzioListener // The mock Logz.io listener + +// ServeHTTP is the function that gets called on each HTTP request +func (h *ListenerHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + + if r.Method == "POST" { // Handle POST requests + logsList = *GetLogsListInstance() + + // Define the structure of the expected request body + type RequestBody struct { + Message string `json:"message"` + } + + // Read the request body + reqBody, err := io.ReadAll(r.Body) + + var requestBody RequestBody + json.Unmarshal(reqBody, &requestBody) + b, err := json.Marshal(requestBody) + + fmt.Printf("%s", b) + if err != nil { + http.Error(w, fmt.Sprintf("Bad Request\nRequest:\n%v", requestBody), http.StatusBadRequest) + return + } + + // Log the received POST request + log.Printf("Received POST request to: %s \nRequest:\n%v", r.Host, requestBody) + + // Split the logs by new line + allLogs := strings.Split(string(b), "\n") + + // If no logs are received, return a bad request error + if len(allLogs) == 0 { + http.Error(w, "Bad Request", http.StatusBadRequest) + return + } + + mutex.Lock() // Lock the mutex to ensure thread-safe access to shared variables + defer mutex.Unlock() + + // If the serverError flag is set, return an internal server error + if serverError { + http.Error(w, "Issue!!!!!!!", http.StatusInternalServerError) + return + } + + // Append each log to the logs list + for _, testLog := range allLogs { + if testLog != "" { + logsList.List = append(logsList.List, testLog) + } + } + + // Return a success response + w.WriteHeader(http.StatusOK) + w.Write([]byte("Shabam! got logs.")) + } else { // For all other HTTP methods, return a method not allowed error + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + } +} + +// NewMockLogzioListener function creates a new instance of MockLogzioListener +// It finds an available port and initializes the server with that port and a listener handler +func NewMockLogzioListener() *MockLogzioListener { + // find an available port + port, err := findAvailablePort() + if err != nil { + fmt.Println("Error finding available port:", err) + } + + // define the host + host := "localhost" + + // create a new listener handler + listenerHandler := &ListenerHandler{} + + // create a new http server + server := &http.Server{ + Addr: fmt.Sprintf("%s:%d", host, port), + Handler: listenerHandler, + } + + // create a new thread for listening + listeningThread := &Thread{ + target: startListening, + } + + // return a new instance of MockLogzioListener + return &MockLogzioListener{ + Port: port, + Host: host, + LogsList: logsList.List, + PersistentFlags: persistentFlags, + server: server, + listeningThread: listeningThread, + } +} + +// StartListening is a method that starts the listening thread +func (ml *MockLogzioListener) StartListening() { + ml.listeningThread.Start() +} + +// startListening is a function that starts the http server +func startListening() { + err := MockListener.server.ListenAndServe() + if err != nil { + fmt.Println("HTTP server error:", err) + } +} + +// FindLog is a method that locks the mutex and searches the logs list +// for a specific log string, returning a boolean value +func (ml *MockLogzioListener) FindLog(searchLog string) bool { + mutex.Lock() + defer mutex.Unlock() + + // search for the log in the logs list + for _, currentLog := range ml.LogsList { + if strings.Contains(currentLog, searchLog) { + return true + } + } + + return false +} + +// NumberOfLogs is a method that locks the mutex and returns the number of logs in the logs list +func (ml *MockLogzioListener) NumberOfLogs() int { + mutex.Lock() + defer mutex.Unlock() + + // return the number of logs + return len(ml.LogsList) +} + +// ClearLogsBuffer is a method that locks the mutex and clears the logs list +func (ml *MockLogzioListener) ClearLogsBuffer() { + mutex.Lock() + defer mutex.Unlock() + + // clear the logs list + ml.LogsList = nil +} + +// SetServerError is a method that locks the mutex and sets the server error to true +func (ml *MockLogzioListener) SetServerError() { + mutex.Lock() + defer mutex.Unlock() + + serverError = true +} + +// ClearServerError is a method that locks the mutex and sets the server error to false +func (ml *MockLogzioListener) ClearServerError() { + mutex.Lock() + defer mutex.Unlock() + + serverError = false +} + +// GetServerError is a method that locks the mutex and returns the server error status +func (pf *PersistentFlags) GetServerError() bool { + mutex.Lock() + defer mutex.Unlock() + + return serverError +} + +// SetServerError is a method that locks the mutex and sets the server error to true +func (pf *PersistentFlags) SetServerError() { + mutex.Lock() + defer mutex.Unlock() + + serverError = true +} + +// ClearServerError is a method that locks the mutex and sets the server error to false +func (pf *PersistentFlags) ClearServerError() { + mutex.Lock() + defer mutex.Unlock() + + serverError = false +} + +// Thread is a struct that wraps a function to be run in a separate goroutine +type Thread struct { + target func() // the function to be run in the goroutine +} + +func (t *Thread) Start() { + go t.target() +} + +func findAvailablePort() (int, error) { + listener, err := net.Listen("tcp", ":0") + if err != nil { + return 0, err + } + defer listener.Close() + addr := listener.Addr().(*net.TCPAddr) + return addr.Port, nil +} +func GetMockListenerURL() (mockListerURL string) { + mockListerURL = fmt.Sprintf("http://%s:%d", MockListener.Host, MockListener.Port) + return mockListerURL +} + +// StartMockLogzioListener is a function that initializes a new mock Logzio listener +// and starts it in a separate goroutine. It also logs the URL on which the listener is running. +func StartMockLogzioListener() { + MockListener = NewMockLogzioListener() // create a new mock listener + go MockListener.StartListening() // start the listener in a goroutine + mockListerURL := GetMockListenerURL() // get the URL where the listener is running + log.Printf("Listening on %s\n", mockListerURL) // log the URL +} + +// SetupMockListener is a function that starts the mock Logzio listener, sets environment variables +// for the Logzio token and the listener URL, and returns a boolean indicating whether the listener +// is running successfully. It waits for the listener to be ready before setting the environment variables. +func SetupMockListener() (isListening bool) { + // Start mock listener + // Create a channel to signal when the listener is ready + listenerReady := make(chan bool) // a channel to signal when the listener is ready + + // Start mock listener in a separate goroutine + go func() { + StartMockLogzioListener() // start the listener + // Signal that the listener is ready + log.Println("Mock listener started") + listenerReady <- true // Send signal to listenerReady + + }() + + // Wait for the listener to be ready + <-listenerReady // block until the listener is ready + mockListener := MockListener + if mockListener != nil { + mockListenerURL := GetMockListenerURL() + if mockListenerURL != "" { + // Set the LOGZIO_TOKEN environment variable + err := os.Setenv("LOGZIO_TOKEN", "test-shipping-token") + if err != nil { + log.Println("Failed to set LOGZIO_TOKEN:", err) + return + } + // Set the LOGZIO_LISTENER environment variable + err = os.Setenv("LOGZIO_LISTENER", mockListenerURL) + if err != nil { + log.Println("Failed to set LOGZIO_LISTENER:", err) + return + } + isListening = true // indicate that the listener is running successfully + } + } else { + log.Printf("Failed to start mock listener") // log a message if the listener failed to start + } + return isListening // return the status of the listener +} diff --git a/mockLogzioListener/logsLister.go b/mockLogzioListener/logsLister.go new file mode 100644 index 0000000..c22e562 --- /dev/null +++ b/mockLogzioListener/logsLister.go @@ -0,0 +1,26 @@ +// Package mockLogzioListener contains the code for mocking a Logz.io listener for testing +package mockLogzioListener + +import "sync" // Import the sync package for safe concurrent access to shared variables + +// LogsList is a struct that represents a list of logs +type LogsList struct { + List []string +} + +// Global variables for the application +var logsListInstance *LogsList // Singleton instance of LogsList +var logsListMutex sync.Mutex // Mutex for synchronizing access to logsListInstance + +// GetLogsListInstance is a function that returns a singleton instance of LogsList +// It uses the "double-check locking" pattern to ensure that only one instance of LogsList is created +func GetLogsListInstance() *LogsList { + if logsListInstance == nil { // First check (not thread-safe) + logsListMutex.Lock() // Lock the mutex to ensure thread-safe access to logsListInstance + defer logsListMutex.Unlock() + if logsListInstance == nil { // Second check (thread-safe) + logsListInstance = &LogsList{} // Create a new instance of LogsList + } + } + return logsListInstance +} diff --git a/mockLogzioListener/persistantFlagger.go b/mockLogzioListener/persistantFlagger.go new file mode 100644 index 0000000..c380512 --- /dev/null +++ b/mockLogzioListener/persistantFlagger.go @@ -0,0 +1,58 @@ +// Package mockLogzioListener contains the code for mocking a Logz.io listener for testing +package mockLogzioListener + +import ( + "sync" // Import the sync package for safe concurrent access to shared variables +) + +// PersistentFlags is a struct that represents persistent flags like server errors +type PersistentFlags struct { + ServerError bool +} + +// Global variables for the application +var persistentFlagsInstance *PersistentFlags // Singleton instance of PersistentFlags +var persistentFlagsMutex sync.Mutex // Mutex for synchronizing access to persistentFlagsInstance + +// GetPersistentFlagsInstance is a function that returns a singleton instance of PersistentFlags +// It uses the "double-check locking" pattern to ensure that only one instance of PersistentFlags is created +func GetPersistentFlagsInstance() *PersistentFlags { + CreatePersistentFlags() + if persistentFlagsInstance == nil { // First check (not thread-safe) + persistentFlagsMutex.Lock() // Lock the mutex to ensure thread-safe access to persistentFlagsInstance + defer persistentFlagsMutex.Unlock() + if persistentFlagsInstance == nil { // Second check (thread-safe) + persistentFlagsInstance = &PersistentFlags{} // Create a new instance of PersistentFlags + } + } + return persistentFlagsInstance +} + +// CreatePersistentFlags is a function that creates an instance of PersistentFlags, +// sets and checks a server error, clears the server error, and checks the server error again +func CreatePersistentFlags() { + // Create an instance of the PersistentFlags class + persistentFlagsInstance = GetPersistentFlagsInstance() + + // Set server error + persistentFlagsInstance.SetServerError() + + // Check server error + serverError = persistentFlagsInstance.GetServerError() + if serverError { + println("Server error is true") + } else { + println("Server error is false") + } + + // Clear server error + persistentFlagsInstance.ClearServerError() + + // Check server error again + serverError = persistentFlagsInstance.GetServerError() + if serverError { + println("Server error is true") + } else { + println("Server error is false") + } +} diff --git a/resources/clusterResources.go b/resources/clusterResources.go index 3fa4802..d06d154 100644 --- a/resources/clusterResources.go +++ b/resources/clusterResources.go @@ -1,5 +1,7 @@ +// Package resources provides functionalities to interact with Kubernetes resources package resources +// Import necessary packages import ( "context" appsv1 "k8s.io/api/apps/v1" @@ -11,8 +13,46 @@ import ( "reflect" ) -func GetClusterRoleBindings() (relatedClusterRoleBindings []rbacv1.ClusterRoleBinding) { +// Workload is an interface that provides a common API for Kubernetes workloads (Pod, Deployment, etc.) +type Workload interface { + GetName() string + GetContainers() []corev1.Container + GetVolumes() []corev1.Volume + GetServiceAccountName() string +} +// Define types for different Kubernetes resources +type Pod corev1.Pod +type Deployment appsv1.Deployment +type DaemonSet appsv1.DaemonSet +type StatefulSet appsv1.StatefulSet + +// Implement the Workload interface for the Pod type +func (p Pod) GetName() string { return p.Name } +func (p Pod) GetContainers() []corev1.Container { return p.Spec.Containers } +func (p Pod) GetVolumes() []corev1.Volume { return p.Spec.Volumes } +func (p Pod) GetServiceAccountName() string { return p.Spec.ServiceAccountName } + +// Implement the Workload interface for the Deployment type +func (d Deployment) GetName() string { return d.Name } +func (d Deployment) GetContainers() []corev1.Container { return d.Spec.Template.Spec.Containers } +func (d Deployment) GetVolumes() []corev1.Volume { return d.Spec.Template.Spec.Volumes } +func (d Deployment) GetServiceAccountName() string { return d.Spec.Template.Spec.ServiceAccountName } + +// Implement the Workload interface for the DaemonSet type +func (d DaemonSet) GetServiceAccountName() string { return d.Spec.Template.Spec.ServiceAccountName } +func (d DaemonSet) GetName() string { return d.Name } +func (d DaemonSet) GetContainers() []corev1.Container { return d.Spec.Template.Spec.Containers } +func (d DaemonSet) GetVolumes() []corev1.Volume { return d.Spec.Template.Spec.Volumes } + +// Implement the Workload interface for the StatefulSet type +func (s StatefulSet) GetName() string { return s.Name } +func (s StatefulSet) GetContainers() []corev1.Container { return s.Spec.Template.Spec.Containers } +func (s StatefulSet) GetVolumes() []corev1.Volume { return s.Spec.Template.Spec.Volumes } +func (s StatefulSet) GetServiceAccountName() string { return s.Spec.Template.Spec.ServiceAccountName } + +// GetClusterRoleBindings retrieves all ClusterRoleBindings in the cluster +func GetClusterRoleBindings() (relatedClusterRoleBindings []rbacv1.ClusterRoleBinding) { // List clusterRoleBinding clusterRoleBindingsClient := common.K8sClient.RbacV1().ClusterRoleBindings() clusterRoleBindings, err := clusterRoleBindingsClient.List(context.TODO(), metav1.ListOptions{}) @@ -31,31 +71,29 @@ func GetClusterRoleBindings() (relatedClusterRoleBindings []rbacv1.ClusterRoleBi return relatedClusterRoleBindings } +// GetDeployments retrieves all Deployments in the cluster func GetDeployments() (relatedDeployments []appsv1.Deployment) { - - // // List Deployments + // List Deployments deploymentsClient := common.K8sClient.AppsV1().Deployments("") deployments, err := deploymentsClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { - // Handle error by common the error and returning an empty list of related DaemonSets. + // Handle error by common the error and returning an empty list of related deployments. log.Printf("[ERROR] Error listing Deployments: %v", err) return } - // Create a map of DaemonSet names to DaemonSet objects. + // Create a map of deployment names to deployment objects. for _, deployment := range deployments.Items { if reflect.ValueOf(deployment).IsValid() { relatedDeployments = append(relatedDeployments, deployment) } } - // Iterate through the DaemonSets and check for the config map. - return relatedDeployments } +// GetPods retrieves all Pods in the cluster func GetPods() (relatedPods []corev1.Pod) { - - // List DaemonSets + // List Pods podsClient := common.K8sClient.CoreV1().Pods("") pods, err := podsClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { @@ -64,7 +102,7 @@ func GetPods() (relatedPods []corev1.Pod) { return } - // Create a map of Pods names to DaemonSet objects. + // Create a map of Pods names to Pod objects. for _, pod := range pods.Items { if reflect.ValueOf(pod).IsValid() { relatedPods = append(relatedPods, pod) @@ -74,8 +112,8 @@ func GetPods() (relatedPods []corev1.Pod) { return relatedPods } +// GetDaemonSets retrieves all DaemonSets in the cluster func GetDaemonSets() (relatedDaemonSets []appsv1.DaemonSet) { - // List DaemonSets daemonSetsClient := common.K8sClient.AppsV1().DaemonSets("") daemonSets, err := daemonSetsClient.List(context.TODO(), metav1.ListOptions{}) @@ -96,8 +134,8 @@ func GetDaemonSets() (relatedDaemonSets []appsv1.DaemonSet) { return relatedDaemonSets } +// GetStatefulSets retrieves all StatefulSets in the cluster func GetStatefulSets() (relatedStatefulSets []appsv1.StatefulSet) { - // List statefulSet statefulSetsClient := common.K8sClient.AppsV1().StatefulSets("") statefulSets, err := statefulSetsClient.List(context.TODO(), metav1.ListOptions{}) @@ -116,6 +154,7 @@ func GetStatefulSets() (relatedStatefulSets []appsv1.StatefulSet) { return relatedStatefulSets } +// GetDeployment retrieves a specific Deployment by name and namespace func GetDeployment(deploymentName string, namespace string) (relatedDeployment appsv1.Deployment) { deploymentsClient := common.K8sClient.AppsV1().Deployments(namespace) @@ -131,6 +170,7 @@ func GetDeployment(deploymentName string, namespace string) (relatedDeployment a return relatedDeployment } +// GetDaemonSet retrieves a specific DaemonSet by name and namespace func GetDaemonSet(daemonSetName string, namespace string) (relatedDaemonSet appsv1.DaemonSet) { daemonSetsClient := common.K8sClient.AppsV1().DaemonSets(namespace) @@ -146,6 +186,7 @@ func GetDaemonSet(daemonSetName string, namespace string) (relatedDaemonSet apps return relatedDaemonSet } +// GetStatefulSet retrieves a specific StatefulSet by name and namespace func GetStatefulSet(statefulSetName string, namespace string) (relatedStatefulSet appsv1.StatefulSet) { statefulSetsClient := common.K8sClient.AppsV1().StatefulSets(namespace) @@ -161,6 +202,7 @@ func GetStatefulSet(statefulSetName string, namespace string) (relatedStatefulSe return relatedStatefulSet } +// GetClusterRoleBinding retrieves a specific ClusterRoleBinding by name and namespace func GetClusterRoleBinding(clusterRoleBindingName string) (relatedClusterRoleBinding rbacv1.ClusterRoleBinding) { clusterRoleBindingsClient := common.K8sClient.RbacV1().ClusterRoleBindings() @@ -178,6 +220,7 @@ func GetClusterRoleBinding(clusterRoleBindingName string) (relatedClusterRoleBin return relatedClusterRoleBinding } +// GetClusterRelatedResources retrieves all related resources for a given resource kind, name and namespace. func GetClusterRelatedResources(resourceKind string, resourceName string, namespace string) (relatedClusterServices common.RelatedClusterServices) { // log.Printf("[DEBUG] Attemping to parse Resource: %s of kind: %s related cluster services.\n", resourceName, resourceKind) @@ -186,21 +229,21 @@ func GetClusterRelatedResources(resourceKind string, resourceName string, namesp switch resourceKind { case "ConfigMap": - relatedClusterServices = GetConfigMapRelatedWorkloads(resourceName) + relatedClusterServices = ConfigMapRelatedWorkloads(resourceName) case "Secret": - relatedClusterServices = GetSecretRelatedWorkloads(resourceName) + relatedClusterServices = SecretRelatedWorkloads(resourceName) case "ClusterRoleBinding": - relatedClusterServices = GetClusterRoleBindingRelatedWorkloads(resourceName) + relatedClusterServices = ClusterRoleBindingRelatedWorkloads(resourceName) case "ServiceAccount": - relatedClusterServices = GetServiceAccountRelatedWorkloads(resourceName) + relatedClusterServices = ServiceAccountRelatedWorkloads(resourceName) case "ClusterRole": - relatedClusterServices = GetClusterRoleRelatedWorkloads(resourceName) + relatedClusterServices = ClusterRoleRelatedWorkloads(resourceName) case "Deployment": - relatedClusterServices = GetDeploymentRelatedResources(resourceName, namespace) + relatedClusterServices = DeploymentRelatedResources(resourceName, namespace) case "DaemonSet": - relatedClusterServices = GetDaemonSetRelatedResources(resourceName, namespace) + relatedClusterServices = DaemonSetRelatedResources(resourceName, namespace) case "StatefulSet": - relatedClusterServices = GetStatefulSetRelatedResources(resourceName, namespace) + relatedClusterServices = StatefulSetRelatedResources(resourceName, namespace) default: log.Printf("[ERROR] Unknown resource kind %s", resourceKind) } diff --git a/resources/clusterResources_test.go b/resources/clusterResources_test.go new file mode 100644 index 0000000..986dd92 --- /dev/null +++ b/resources/clusterResources_test.go @@ -0,0 +1,280 @@ +package resources + +import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func workloadTestEnvVars() (workloadEnvVars []corev1.EnvVar) { + workloadEnvVars = []corev1.EnvVar{ + { + Name: "TOKEN_ENV_VAR", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "test-secret", + }, + }, + }, + }, + { + Name: "TOKEN_ENV_VAR", + ValueFrom: &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "test-configmap", + }, + }, + }, + }, + } + return workloadEnvVars +} +func workloadTestVolumes() (workloadVolumes []corev1.Volume) { + + workloadVolumes = []corev1.Volume{ + { + Name: "test-secret-volume", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "test-secret", + }, + }, + }, + { + Name: "test-configmap-volume", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "test-configmap", + }, + }, + }, + }, + } + return workloadVolumes +} + +func GetTestClusterRoleBinding(clusterRoleBindingName string) (clusterRoleBinding rbacv1.ClusterRoleBinding) { + + clusterRoleBinding = rbacv1.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterRoleBinding", + APIVersion: "rbac.authorization.k8s.io/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: clusterRoleBindingName, + Labels: map[string]string{ + "app": "nginx", + }, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: "test-serviceaccount", + Namespace: "default", + APIGroup: "rbac.authorization.k8s.io", + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: "test-clusterrole", + APIGroup: "rbac.authorization.k8s.io", + }, + } + + return clusterRoleBinding +} +func GetTestPod() (pod corev1.Pod) { + pod = corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "container-nginx", + Image: "container-image-nginx", + Env: workloadTestEnvVars(), + }, + }, + Volumes: workloadTestVolumes(), + ServiceAccountName: "test-serviceaccount", + }, + } + return pod +} +func GetTestDeployment() (deployment appsv1.Deployment) { + deployment = appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Replicas: new(int32), // use new(int32) to create a pointer to an int32 + Selector: &metav1.LabelSelector{ // label selector for pods + MatchLabels: map[string]string{ + "app": "nginx", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "nginx", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "container-nginx", + Image: "container-image-nginx", + Env: workloadTestEnvVars(), + }, + }, + Volumes: workloadTestVolumes(), + ServiceAccountName: "test-serviceaccount", + }, + }, + }} + return deployment +} +func GetTestDaemonSet() (relatedDaemonsets appsv1.DaemonSet) { + daemonSet := appsv1.DaemonSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "DaemonSet", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-daemonset", + Namespace: "default", + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ // label selector for pods + MatchLabels: map[string]string{ + "app": "nginx", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "nginx", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "container-nginx", + Image: "container-image-nginx", + Env: workloadTestEnvVars(), + }, + }, + Volumes: workloadTestVolumes(), + ServiceAccountName: "test-serviceaccount", + }, + }, + }, + } + return daemonSet +} +func GetTestStatefulSet() (relatedStatefulsets appsv1.StatefulSet) { + statefulSet := appsv1.StatefulSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "StatefulSet", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-statefulset", + Namespace: "default", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: new(int32), // use new(int32) to create a pointer to an int32 + ServiceName: "nginx", // a service that governs this StatefulSet + Selector: &metav1.LabelSelector{ // label selector for pods + MatchLabels: map[string]string{ + "app": "nginx", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "nginx", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "container-nginx", + Image: "container-image-nginx", + Env: workloadTestEnvVars(), + }, + }, + Volumes: workloadTestVolumes(), + ServiceAccountName: "test-serviceaccount", + }, + }, + }, + Status: appsv1.StatefulSetStatus{ + Replicas: *new(int32), + }, + } + return statefulSet +} + +// Resources lists +func GetTestDaemonSets() (relatedDaemonsets []appsv1.DaemonSet) { + daemonSet := GetTestDaemonSet() + testDaemonSet := daemonSet + testDaemonSet.Name = "test-daemonset-2" + relatedDaemonsets = append(relatedDaemonsets, daemonSet) + relatedDaemonsets = append(relatedDaemonsets, testDaemonSet) + + return relatedDaemonsets +} +func GetTestStatefulSets() (relatedStatefulsets []appsv1.StatefulSet) { + statefulSet := GetTestStatefulSet() + testStatefulSet := statefulSet + testStatefulSet.Name = "test-statefulset-2" + relatedStatefulsets = append(relatedStatefulsets, statefulSet) + relatedStatefulsets = append(relatedStatefulsets, testStatefulSet) + + return relatedStatefulsets +} +func GetTestPods() (relatedPods []corev1.Pod) { + pod := GetTestPod() + testPod := pod + testPod.Name = "test-pod-2" + + relatedPods = append(relatedPods, pod) + relatedPods = append(relatedPods, testPod) + + return relatedPods +} +func GetTestDeployments() (relatedDeployments []appsv1.Deployment) { + deployment := GetTestDeployment() + testDeployment := deployment + testDeployment.Name = "test-deployment-2" + relatedDeployments = append(relatedDeployments, deployment) + relatedDeployments = append(relatedDeployments, testDeployment) + + return relatedDeployments +} +func GetTestClusterRoleBindings() (relatedClusterRoleBindings []rbacv1.ClusterRoleBinding) { + clusterRoleBinding := GetTestClusterRoleBinding("test-clusterrolebinding") + testClusterRoleBinding := clusterRoleBinding + testClusterRoleBinding.Name = "test-clusterrolebinding-2" + relatedClusterRoleBindings = append(relatedClusterRoleBindings, clusterRoleBinding) + relatedClusterRoleBindings = append(relatedClusterRoleBindings, testClusterRoleBinding) + + return relatedClusterRoleBindings +} diff --git a/resources/resourceInformer.go b/resources/resourceInformer.go index 78b4507..60fc518 100644 --- a/resources/resourceInformer.go +++ b/resources/resourceInformer.go @@ -1,6 +1,7 @@ package resources import ( + "bytes" "context" "encoding/json" "fmt" @@ -14,99 +15,124 @@ import ( "main.go/common" "os" "os/signal" - "reflect" "sync" ) -func createResourceInformer(resourceGroup string, resourceType string, clusterClient *dynamic.DynamicClient) (informer cache.SharedIndexInformer) { - // - resource := schema.GroupVersionResource{Group: resourceGroup, Version: "v1", Resource: resourceType} +var wg sync.WaitGroup + +func createResourceInformer(resourceGVR schema.GroupVersionResource, clusterClient *dynamic.DynamicClient) (resourceInformer cache.SharedIndexInformer) { + // Creates a Kubernetes dynamic informer for the cluster API resources factory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(clusterClient, 0, corev1.NamespaceAll, nil) - informer = factory.ForResource(resource).Informer() + resourceInformer = factory.ForResource(resourceGVR).Informer() + + if resourceInformer == nil { + common.SendLog(fmt.Sprintf("Failed to create informer for resource GVR: '%v'", resourceGVR)) + return nil + } + // Creates a Kubernetes dynamic informer for the cluster API resources + // Get a lister for the resource, which is part of the informer + lister := factory.ForResource(resourceGVR).Lister() - return informer + // If the lister is nil, informer creation likely failed + if lister == nil { + common.SendLog(fmt.Sprintf("Failed to create informer for resource GVR: '%v'", resourceGVR)) + return nil + } + + return resourceInformer } -func addInformerEventHandler(resourceInformer cache.SharedIndexInformer) { - var event map[string]interface{} - synced := false +// AddInformerEventHandler adds a new event handler to a given resource informer. +// It logs events when a resource is added, updated, or deleted. +func AddInformerEventHandler(resourceInformer cache.SharedIndexInformer) (synced bool) { + // Check if the resource informer is nil + if resourceInformer == nil { + log.Println("[ERROR] Resource informer is nil") + return false + } + + // Create a new mutex for handling read and write locks mux := &sync.RWMutex{} + + // Add event handler to the resource informer _, err := resourceInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + // This function gets called when a resource gets added AddFunc: func(obj interface{}) { mux.RLock() defer mux.RUnlock() - if !synced { - return - } - // Handler logic - - event = map[string]interface{}{ - "newObject": obj, - "eventType": "ADDED", + if synced { + StructResourceLog(map[string]interface{}{ + "eventType": "ADDED", + "newObject": obj, + }) } - go resourceInformerLog(event) - }, + // This function gets called when a resource gets updated UpdateFunc: func(oldObj, newObj interface{}) { mux.RLock() defer mux.RUnlock() - if !synced { - return - } - // Handler logic - - event = map[string]interface{}{ - "oldObject": oldObj, - "newObject": newObj, - "eventType": "MODIFIED", + if synced { + StructResourceLog(map[string]interface{}{ + "eventType": "MODIFIED", + "newObject": newObj, + "oldObject": oldObj, + }) } - go resourceInformerLog(event) - }, + // This function gets called when a resource gets deleted DeleteFunc: func(obj interface{}) { mux.RLock() defer mux.RUnlock() - if !synced { - return - } - - // Handler logic - - event = map[string]interface{}{ - "newObject": obj, - "eventType": "DELETED", + if synced { + StructResourceLog(map[string]interface{}{ + "eventType": "DELETED", + "newObject": obj, + }) } - go resourceInformerLog(event) - }, }) + // Log any errors in adding the event handler if err != nil { - msg := fmt.Sprintf("[ERROR] Failed to add event handler for informer.\nERROR:\n%v", err) - common.SendLog(msg) - + common.SendLog(fmt.Sprintf("[ERROR] Failed to add event handler for informer.\nERROR:\n%v", err)) return } + // Create a new context that will get cancelled when an interrupt signal is received ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) defer cancel() - go resourceInformer.Run(ctx.Done()) + // Create a channel to indicate when the informer has started + started := make(chan bool) + + // Run the informer in a separate goroutine + go func() { + resourceInformer.Run(ctx.Done()) + close(started) + }() - isSynced := cache.WaitForCacheSync(ctx.Done(), resourceInformer.HasSynced) + // Wait for the informer to start + <-started + + // Wait for the informer's cache to sync mux.Lock() - synced = isSynced + synced = cache.WaitForCacheSync(ctx.Done(), resourceInformer.HasSynced) mux.Unlock() - if !isSynced { - log.Fatal("Informer event handler failed to sync.") + // Log if the informer failed to sync + if !synced { + log.Printf("Informer event handler failed to sync.") } + // Wait for the context to be done <-ctx.Done() + // Return whether the informer's cache was synced successfully + return synced + } func AddEventHandlers() { - + // Creates informer for each cluster API and events handler for each informer resourceAPIList := map[string]string{ "configmaps": "", "deployments": "apps", @@ -119,80 +145,114 @@ func AddEventHandlers() { } var eventHandlerSync sync.WaitGroup resourceIndex := 0 + routinesLimit := make(chan bool, len(resourceAPIList)) // limit to number of concurrent goroutines to list resources + for resourceType, resourceGroup := range resourceAPIList { + routinesLimit <- true // will block if there is already goroutines running for the limit number of resources resourceIndex = resourceIndex + 1 - + resourceGVR := schema.GroupVersionResource{Group: resourceGroup, Version: "v1", Resource: resourceType} resourceAPI := fmt.Sprintf("%s/v1/%s", resourceGroup, resourceType) - - common.SendLog(fmt.Sprintf("Attempting to create informer for resource API: '%s'", resourceAPI)) - resourceInformer := createResourceInformer(resourceGroup, resourceType, common.DynamicClient) - if resourceInformer != nil { - common.SendLog(fmt.Sprintf("Attempting to add event handler to informer for resource API: '%s'", resourceAPI)) - eventHandlerSync.Add(resourceIndex) - go addInformerEventHandler(resourceInformer) - { - defer eventHandlerSync.Done() - common.SendLog(fmt.Sprintf("Finished adding event handler to informer for resource API: '%s'", resourceAPI)) - } - } else { + resourceInformer := createResourceInformer(resourceGVR, common.DynamicClient) + if resourceInformer == nil { common.SendLog(fmt.Sprintf("Failed to create informer for resource API: '%s'", resourceAPI)) + <-routinesLimit // release a slot when the informer is nil + continue // Skip to next iteration if informer is nil } - } + common.SendLog(fmt.Sprintf("Attempting to add event handler to informer for resource API: '%s'", resourceAPI)) + eventHandlerSync.Add(1) + go func(resourceInformer cache.SharedIndexInformer, resourceAPI string) { + // Use defer to ensure Done() is called even if the goroutine exits prematurely + defer eventHandlerSync.Done() + + // Log when the goroutine starts + common.SendLog(fmt.Sprintf("Adding event handler for resource API: '%s'", resourceAPI)) + AddInformerEventHandler(resourceInformer) + + // Log when the goroutine finishes + common.SendLog(fmt.Sprintf("Finished adding event handler to informer for resource API: '%s'", resourceAPI)) + <-routinesLimit // release a slot when the goroutine finishes + + }(resourceInformer, resourceAPI) // Pass the loop variables here + } eventHandlerSync.Wait() } -func resourceInformerLog(event map[string]interface{}) { - var msg string - if reflect.ValueOf(event).IsValid() { - logEvent := &common.LogEvent{} - jsonString, _ := json.Marshal(event) - json.Unmarshal(jsonString, logEvent) - eventType := event["eventType"].(string) - newRawObjUnstructured := &unstructured.Unstructured{} - newRawObjUnstructured.Object = logEvent.NewObject - newResourceObj := common.KubernetesEvent{} - unstructuredObjectJSON, err := newRawObjUnstructured.MarshalJSON() - if err != nil { - fmt.Println(err) - } - json.Unmarshal(unstructuredObjectJSON, &newResourceObj) - if err != nil { - log.Printf("[ERROR] Failed to parse resource event logs.\nERROR:\n%v", err) - } else { - resourceKind := newResourceObj.Kind - resourceName := newResourceObj.KubernetesMetadata.Name - resourceNamespace := newResourceObj.KubernetesMetadata.Namespace - newResourceVersion := newResourceObj.KubernetesMetadata.ResourceVersion - msg = common.ParseEventMessage(eventType, resourceName, resourceKind, resourceNamespace, newResourceVersion) - if eventType == "MODIFIED" { - oldRawObjUnstructured := &unstructured.Unstructured{} - oldRawObjUnstructured.Object = logEvent.OldObject - oldResourceObj := common.KubernetesEvent{} - unstructuredObjectJSON, err = oldRawObjUnstructured.MarshalJSON() - if err != nil { - fmt.Println(err) - } - json.Unmarshal(unstructuredObjectJSON, &oldResourceObj) - if err == nil { - oldResourceName := oldResourceObj.KubernetesMetadata.Name - oldResourceNamespace := oldResourceObj.KubernetesMetadata.Namespace - oldResourceVersion := oldResourceObj.KubernetesMetadata.ResourceVersion - msg = common.ParseEventMessage(eventType, oldResourceName, resourceKind, oldResourceNamespace, newResourceVersion, oldResourceVersion) - } - } - clusterRelatedResources := GetClusterRelatedResources(resourceKind, resourceName, resourceNamespace) +// EventObject transforms a raw object into a KubernetesEvent object. +// It takes a map representing the raw object and a boolean indicating whether the object is new or not. +func EventObject(rawObj map[string]interface{}, isNew bool) (resourceObject common.KubernetesEvent) { + // Check if the raw object or its "newObject" and "oldObject" fields are nil + if rawObj == nil || rawObj["newObject"] == nil || rawObj["oldObject"] == nil { + log.Println("[ERROR] rawObj is nil or does not have required fields: newObject/oldObject.") + // Return an empty KubernetesEvent object if the raw object is invalid + return resourceObject + } + // Initialize an empty unstructured object + rawUnstructuredObj := unstructured.Unstructured{} + // Initialize a buffer to store the JSON-encoded raw object + var buffer bytes.Buffer + // Encode the raw object into JSON and write it to the buffer + json.NewEncoder(&buffer).Encode(rawObj) - if reflect.ValueOf(clusterRelatedResources).IsValid() { - event["relatedClusterServices"] = clusterRelatedResources - } - } - marshaledEvent, err := json.Marshal(event) - if err != nil { - log.Printf("[ERROR] Failed to marshel resource event logs.\nERROR:\n%v", err) + // Unmarshal the JSON-encoded raw object into a KubernetesEvent object + err := json.Unmarshal(buffer.Bytes(), &resourceObject) + if err != nil { + // Log the error if unmarshalling fails + log.Printf("Failed to unmarshal resource object: %v", err) + // handle error as necessary + } else { + // If unmarshalling is successful, determine whether to set the unstructured object's content based on the "isNew" flag + if isNew { + // If the object is new, set the unstructured object's content to the new object's content + rawUnstructuredObj.Object = resourceObject.NewObject + } else { + // If the object is not new, set the unstructured object's content to the old object's content + rawUnstructuredObj.Object = resourceObject.OldObject } - common.SendLog(msg, marshaledEvent) + } + // Return the KubernetesEvent object + return resourceObject +} +func StructResourceLog(event map[string]interface{}) (isStructured bool) { + var msg string + if event == nil { + log.Println("[ERROR] Event is nil") + return false + } + eventType, ok := event["eventType"].(string) + if !ok { + log.Println("[ERROR] eventType is not a string") + return false } + logEvent := &common.LogEvent{} + eventStr, _ := json.Marshal(event) + json.Unmarshal(eventStr, logEvent) + newEventObj := EventObject(logEvent.NewObject, true) + resourceKind := newEventObj.Kind + resourceName := newEventObj.KubernetesMetadata.Name + resourceNamespace := newEventObj.KubernetesMetadata.Namespace + newResourceVersion := newEventObj.ResourceVersion + if eventType == "MODIFIED" { + oldEventObj := EventObject(logEvent.OldObject, false) + oldResourceName := oldEventObj.KubernetesMetadata.Name + oldResourceNamespace := oldEventObj.KubernetesMetadata.Namespace + oldResourceVersion := oldEventObj.KubernetesMetadata.ResourceVersion + msg = common.ParseEventMessage(eventType, oldResourceName, resourceKind, oldResourceNamespace, newResourceVersion, oldResourceVersion) + + } else { + msg = common.ParseEventMessage(eventType, resourceName, resourceKind, resourceNamespace, newResourceVersion) + } + event["relatedClusterServices"] = GetClusterRelatedResources(resourceKind, resourceName, resourceNamespace) + + marshaledEvent, err := json.Marshal(event) + if err != nil { + log.Printf("[ERROR] Failed to marshel resource event logs.\nERROR:\n%v", err) + } + common.SendLog(msg, marshaledEvent) + defer wg.Done() + isStructured = true + + return isStructured } diff --git a/resources/resourceInformer_test.go b/resources/resourceInformer_test.go new file mode 100644 index 0000000..b6b2420 --- /dev/null +++ b/resources/resourceInformer_test.go @@ -0,0 +1,84 @@ +package resources + +import ( + "github.com/stretchr/testify/mock" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic/dynamicinformer" + fakeDynamic "k8s.io/client-go/dynamic/fake" + "k8s.io/client-go/tools/cache" + "log" + "testing" +) + +func createFakeResourceInformer(gvr schema.GroupVersionResource) cache.SharedIndexInformer { + fakeDynamicClient := fakeDynamic.NewSimpleDynamicClient(runtime.NewScheme()) + factory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(fakeDynamicClient, 0, corev1.NamespaceAll, nil) + fakeResourceInformer := factory.ForResource(gvr).Informer() + if fakeResourceInformer == nil { + log.Printf("[ERROR] Resource Informer was not created") + } else { + log.Printf("Resource Informer created successfully") + } + return fakeResourceInformer +} + +func TestCreateResourceInformer(t *testing.T) { + resourceGVR := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"} + informer := createFakeResourceInformer(resourceGVR) + + if informer == nil { + t.Errorf("Failed to create resource informer") + } +} + +// Define an interface that includes the function you want to mock +type InformerCreator interface { + createFakeResourceInformer(gvr schema.GroupVersionResource, dynamicClient *fakeDynamic.FakeDynamicClient) cache.SharedIndexInformer +} + +// Have your mock type implement the interface +type MockInformerCreator struct { + mock.Mock +} + +// Replace createResourceInformer with an instance of the interface + +func TestAddEventHandlers(t *testing.T) { + // Create a new mock informer creator + fakeDynamicClient := fakeDynamic.NewSimpleDynamicClient(runtime.NewScheme()) + mockInformerCreator := new(MockInformerCreator) + mockInformer := createFakeResourceInformer(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "deployments"}) + // Define what should be returned when the mock is called + mockInformerCreator.On("CreateFakeResourceInformer", mock.Anything, mock.Anything).Return(mockInformer) + + // Run the function that you're testing + AddEventHandlers() + + // Check that the mock was called with the expected parameters + mockInformerCreator.AssertCalled(t, "CreateFakeResourceInformer", schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}, fakeDynamicClient) + mockInformerCreator.AssertCalled(t, "CreateFakeResourceInformer", schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, fakeDynamicClient) + // ... add more assertions here ... +} +func (m *MockInformerCreator) CreateFakeResourceInformer(gvr schema.GroupVersionResource, dynamicClient *fakeDynamic.FakeDynamicClient) cache.SharedIndexInformer { + args := m.Called(gvr, dynamicClient) + return args.Get(0).(cache.SharedIndexInformer) +} + +//func TestAddInformerEventHandler(t *testing.T) { +// resourceGVR := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"} +// mockDynamicClient := fakeDynamic.NewSimpleDynamicClient(runtime.NewScheme()) +// +// informer := CreateFakeResourceInformer(resourceGVR, mockDynamicClient) +// +// if informer == nil { +// t.Errorf("Failed to create resource informer") +// } +// +// synced := AddInformerEventHandler(informer) +// +// if !synced { +// t.Errorf("Failed to add event handler for informer") +// } +//} diff --git a/resources/resourceServices.go b/resources/resourceServices.go index 3b71879..5c3a136 100644 --- a/resources/resourceServices.go +++ b/resources/resourceServices.go @@ -1,309 +1,109 @@ package resources import ( - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" "k8s.io/utils/strings/slices" "main.go/common" "reflect" ) -// Secret Kind -func getSecretRelatedPods(secretName string) (relatedPods []string) { +// Similarly, define methods for other workload types... - // List Pods - pods := GetPods() - // Create a map of Pods names to Pod objects. - podsMap := map[string]corev1.Pod{} - for _, pod := range pods { - if reflect.ValueOf(pod).IsValid() { - podsMap[pod.Name] = pod +func GetSecretRelatedWorkloads(secretName string, workloads []Workload) (relatedWorkloads []string) { + // Create a map of workload names to workloads. + workloadsMap := map[string]Workload{} + for _, workload := range workloads { + if reflect.ValueOf(workload).IsValid() { + workloadsMap[workload.GetName()] = workload } } - // Iterate through the Pods and check for the secret name. - for podName, pod := range podsMap { - for _, container := range pod.Spec.Containers { - containerEnv := container.Env - if containerEnv != nil { - for _, env := range containerEnv { - if reflect.ValueOf(env).IsValid() && env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == secretName && !slices.Contains(relatedPods, podName) { - relatedPods = append(relatedPods, podName) - } + // Iterate through the workloads and check for the secret name. + for workloadName, workload := range workloadsMap { + for _, container := range workload.GetContainers() { + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == secretName && !slices.Contains(relatedWorkloads, workloadName) { + relatedWorkloads = append(relatedWorkloads, workloadName) } } - } - for _, volume := range pod.Spec.Volumes { - if reflect.ValueOf(volume).IsValid() && volume.Secret != nil && volume.Secret.SecretName == secretName && !slices.Contains(relatedPods, podName) { - relatedPods = append(relatedPods, podName) - + for _, volume := range workload.GetVolumes() { + if volume.Secret != nil && volume.Secret.SecretName == secretName && !slices.Contains(relatedWorkloads, workloadName) { + relatedWorkloads = append(relatedWorkloads, workloadName) } } } - return relatedPods + return relatedWorkloads } -func getSecretRelatedDaemonSets(secretName string) (relatedDaemonSets []string) { - // List DaemonSets - daemonSets := GetDaemonSets() - // Create a map of DaemonSet names to DaemonSet objects. - daemonSetsMap := map[string]appsv1.DaemonSet{} - for _, daemonSet := range daemonSets { - if reflect.ValueOf(daemonSet).IsValid() { - - daemonSetsMap[daemonSet.Name] = daemonSet - } +func SecretRelatedWorkloads(secretName string) (relatedWorkloads common.RelatedClusterServices) { + var pods []Workload + var daemonsets []Workload + var deployments []Workload + var statefulsets []Workload + for _, pod := range GetPods() { + pods = append(pods, Pod(pod)) } - - // Iterate through the DaemonSets and check for the config map. - for daemonSetName, daemonSet := range daemonSetsMap { - for _, container := range daemonSet.Spec.Template.Spec.Containers { - containerEnv := container.Env - if containerEnv != nil { - for _, env := range containerEnv { - if reflect.ValueOf(env).IsValid() && env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == secretName && !slices.Contains(relatedDaemonSets, daemonSetName) { - relatedDaemonSets = append(relatedDaemonSets, daemonSetName) - } - } - } - } - for _, volume := range daemonSet.Spec.Template.Spec.Volumes { - if reflect.ValueOf(volume).IsValid() && volume.Secret != nil && volume.Secret.SecretName == secretName && !slices.Contains(relatedDaemonSets, daemonSetName) { - relatedDaemonSets = append(relatedDaemonSets, daemonSetName) - } - } + for _, daemonset := range GetDaemonSets() { + daemonsets = append(daemonsets, DaemonSet(daemonset)) } - - return relatedDaemonSets -} -func getSecretRelatedDeployments(secretName string) (relatedDeployments []string) { - - // List Deployments - deployments := GetDeployments() - // Create a map of Deployment names to DaemonSet objects. - deploymentsMap := map[string]appsv1.Deployment{} - for _, deployment := range deployments { - if reflect.ValueOf(deployment).IsValid() { - deploymentsMap[deployment.Name] = deployment - } + for _, deployment := range GetDeployments() { + deployments = append(deployments, Deployment(deployment)) } - - // Iterate through the Deployments and check for the config map. - for deploymentName, deployment := range deploymentsMap { - for _, container := range deployment.Spec.Template.Spec.Containers { - containerEnv := container.Env - if containerEnv != nil { - for _, env := range containerEnv { - if reflect.ValueOf(env).IsValid() && env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == secretName && !slices.Contains(relatedDeployments, deploymentName) { - relatedDeployments = append(relatedDeployments, deploymentName) - } - } - } - } - for _, volume := range deployment.Spec.Template.Spec.Volumes { - if reflect.ValueOf(volume).IsValid() && volume.Secret != nil && volume.Secret.SecretName == secretName && !slices.Contains(relatedDeployments, deploymentName) { - relatedDeployments = append(relatedDeployments, deploymentName) - } - } + for _, statefulset := range GetStatefulSets() { + statefulsets = append(statefulsets, StatefulSet(statefulset)) } - - return relatedDeployments -} -func getSecretRelatedStatefulSets(secretName string) (relatedStatefulSets []string) { - - // List StatefulSets - statefulSets := GetStatefulSets() - // Create a map of StatefulSet names to StatefulSet objects. - statefulSetsMap := map[string]appsv1.StatefulSet{} - for _, statefulSet := range statefulSets { - if reflect.ValueOf(statefulSet).IsValid() { - - statefulSetsMap[statefulSet.Name] = statefulSet - } - } - - // Iterate through the StatefulSets and check for the config map. - for statefulSetName, statefulSet := range statefulSetsMap { - for _, container := range statefulSet.Spec.Template.Spec.Containers { - containerEnv := container.Env - if containerEnv != nil { - for _, env := range containerEnv { - if reflect.ValueOf(env).IsValid() && env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == secretName && !slices.Contains(relatedStatefulSets, statefulSetName) { - relatedStatefulSets = append(relatedStatefulSets, statefulSetName) - } - } - } - } - for _, volume := range statefulSet.Spec.Template.Spec.Volumes { - if reflect.ValueOf(volume).IsValid() && volume.Secret != nil && volume.Secret.SecretName == secretName && !slices.Contains(relatedStatefulSets, statefulSetName) { - relatedStatefulSets = append(relatedStatefulSets, statefulSetName) - } - } - } - - return relatedStatefulSets -} -func GetSecretRelatedWorkloads(secretName string) (relatedWorkloads common.RelatedClusterServices) { - - relatedDeployments := getSecretRelatedDeployments(secretName) - relatedDaemonsets := getSecretRelatedDaemonSets(secretName) - relatedStatefulSets := getSecretRelatedStatefulSets(secretName) - relatedPods := getSecretRelatedPods(secretName) + relatedPods := GetSecretRelatedWorkloads(secretName, pods) + relatedDaemonsets := GetSecretRelatedWorkloads(secretName, daemonsets) + relatedDeployments := GetSecretRelatedWorkloads(secretName, deployments) + relatedStatefulSets := GetSecretRelatedWorkloads(secretName, statefulsets) + // Similarly, call getRelatedWorkloads for other workload types... relatedWorkloads = common.RelatedClusterServices{Deployments: relatedDeployments, DaemonSets: relatedDaemonsets, StatefulSets: relatedStatefulSets, Pods: relatedPods} return relatedWorkloads } - -// ConfigMap Kind -func getConfigMapRelatedPods(configMapName string) (relatedPods []string) { - - // List Pods - pods := GetPods() - // Create a map of Pods names to DaemonSet objects. - podsMap := map[string]corev1.Pod{} - for _, pod := range pods { - if reflect.ValueOf(pod).IsValid() { - podsMap[pod.Name] = pod - } - } - - // Iterate through the Pods and check for the config map. - for podName, pod := range podsMap { - for _, container := range pod.Spec.Containers { - containerEnv := container.Env - if containerEnv != nil { - for _, env := range containerEnv { - if reflect.ValueOf(env).IsValid() && env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == configMapName && !slices.Contains(relatedPods, podName) { - relatedPods = append(relatedPods, podName) +func GetConfigMapRelatedWorkloads(configMapName string, workloads []Workload) (relatedWorkloads []string) { + for _, workload := range workloads { + if reflect.ValueOf(workload).IsValid() { + for _, container := range workload.GetContainers() { + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == configMapName && !slices.Contains(relatedWorkloads, workload.GetName()) { + relatedWorkloads = append(relatedWorkloads, workload.GetName()) } } } - - } - for _, volume := range pod.Spec.Volumes { - if reflect.ValueOf(volume).IsValid() && !slices.Contains(relatedPods, podName) { - if volume.ConfigMap != nil { - if volume.ConfigMap.Name == configMapName { - relatedPods = append(relatedPods, podName) - } + for _, volume := range workload.GetVolumes() { + if volume.ConfigMap != nil && volume.ConfigMap.Name == configMapName && !slices.Contains(relatedWorkloads, workload.GetName()) { + relatedWorkloads = append(relatedWorkloads, workload.GetName()) } - } } } - - return relatedPods -} -func getConfigMapRelatedDaemonSets(configMapName string) (relatedDaemonSets []string) { - - // List DaemonSets - daemonSets := GetDaemonSets() - // Create a map of DaemonSet names to DaemonSet objects. - daemonSetsMap := map[string]appsv1.DaemonSet{} - for _, daemonSet := range daemonSets { - if reflect.ValueOf(daemonSet).IsValid() { - - daemonSetsMap[daemonSet.Name] = daemonSet - } - } - - // Iterate through the DaemonSets and check for the config map. - for daemonSetName, daemonSet := range daemonSetsMap { - for _, container := range daemonSet.Spec.Template.Spec.Containers { - containerEnv := container.Env - if containerEnv != nil { - for _, env := range containerEnv { - if reflect.ValueOf(env).IsValid() && env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == configMapName && !slices.Contains(relatedDaemonSets, daemonSetName) { - relatedDaemonSets = append(relatedDaemonSets, daemonSetName) - } - } - } - } - for _, volume := range daemonSet.Spec.Template.Spec.Volumes { - if reflect.ValueOf(volume).IsValid() && volume.ConfigMap != nil && volume.ConfigMap.Name == configMapName && !slices.Contains(relatedDaemonSets, daemonSetName) { - relatedDaemonSets = append(relatedDaemonSets, daemonSetName) - } - } - } - - return relatedDaemonSets + return relatedWorkloads } -func getConfigMapRelatedDeployments(configMapName string) (relatedDeployments []string) { - // List Deployments - deployments := GetDeployments() - // Create a map of Deployment names to DaemonSet objects. - deploymentsMap := map[string]appsv1.Deployment{} - for _, deployment := range deployments { - if reflect.ValueOf(deployment).IsValid() { - deploymentsMap[deployment.Name] = deployment - } +func ConfigMapRelatedWorkloads(configMapName string) (relatedWorkloads common.RelatedClusterServices) { + var pods []Workload + var daemonsets []Workload + var deployments []Workload + var statefulsets []Workload + for _, pod := range GetPods() { + pods = append(pods, Pod(pod)) } - - // Iterate through the Deployments and check for the config map. - for deploymentName, deployment := range deploymentsMap { - for _, container := range deployment.Spec.Template.Spec.Containers { - containerEnv := container.Env - if containerEnv != nil { - for _, env := range containerEnv { - if reflect.ValueOf(env).IsValid() && env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == configMapName && !slices.Contains(relatedDeployments, deploymentName) { - relatedDeployments = append(relatedDeployments, deploymentName) - } - } - } - } - for _, volume := range deployment.Spec.Template.Spec.Volumes { - if reflect.ValueOf(volume).IsValid() && volume.ConfigMap != nil && volume.ConfigMap.Name == configMapName && !slices.Contains(relatedDeployments, deploymentName) { - relatedDeployments = append(relatedDeployments, deploymentName) - } - } + for _, daemonset := range GetDaemonSets() { + daemonsets = append(daemonsets, DaemonSet(daemonset)) } - - return relatedDeployments -} -func getConfigMapRelatedStatefulSets(configMapName string) (relatedStatefulSets []string) { - - // List StatefulSets - statefulSets := GetStatefulSets() - // Create a map of StatefulSet names to StatefulSet objects. - statefulSetsMap := map[string]appsv1.StatefulSet{} - for _, statefulSet := range statefulSets { - if reflect.ValueOf(statefulSet).IsValid() { - - statefulSetsMap[statefulSet.Name] = statefulSet - } + for _, deployment := range GetDeployments() { + deployments = append(deployments, Deployment(deployment)) } - - // Iterate through the StatefulSets and check for the config map. - for statefulSetName, statefulSet := range statefulSetsMap { - for _, container := range statefulSet.Spec.Template.Spec.Containers { - containerEnv := container.Env - if containerEnv != nil { - for _, env := range containerEnv { - if reflect.ValueOf(env).IsValid() && env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == configMapName && !slices.Contains(relatedStatefulSets, statefulSetName) { - relatedStatefulSets = append(relatedStatefulSets, statefulSetName) - } - } - } - } - for _, volume := range statefulSet.Spec.Template.Spec.Volumes { - if reflect.ValueOf(volume).IsValid() && volume.ConfigMap != nil && volume.ConfigMap.Name == configMapName && !slices.Contains(relatedStatefulSets, statefulSetName) { - relatedStatefulSets = append(relatedStatefulSets, statefulSetName) - } - } + for _, statefulset := range GetStatefulSets() { + statefulsets = append(statefulsets, StatefulSet(statefulset)) } - - return relatedStatefulSets -} -func GetConfigMapRelatedWorkloads(configMapName string) (relatedWorkloads common.RelatedClusterServices) { - - relatedDeployments := getConfigMapRelatedDeployments(configMapName) - relatedDaemonsets := getConfigMapRelatedDaemonSets(configMapName) - relatedStatefulSets := getConfigMapRelatedStatefulSets(configMapName) - relatedPods := getConfigMapRelatedPods(configMapName) + relatedPods := GetConfigMapRelatedWorkloads(configMapName, pods) + relatedDaemonsets := GetConfigMapRelatedWorkloads(configMapName, daemonsets) + relatedDeployments := GetConfigMapRelatedWorkloads(configMapName, deployments) + relatedStatefulSets := GetConfigMapRelatedWorkloads(configMapName, statefulsets) relatedWorkloads = common.RelatedClusterServices{Deployments: relatedDeployments, DaemonSets: relatedDaemonsets, StatefulSets: relatedStatefulSets, Pods: relatedPods} @@ -311,100 +111,37 @@ func GetConfigMapRelatedWorkloads(configMapName string) (relatedWorkloads common } // ServiceAccount Kind -func getServiceAccountRelatedPods(serviceAccountName string) (relatedPods []string) { - // List Pods - pods := GetPods() - // Create a map of Pods names to DaemonSet objects. - podsMap := map[string]corev1.Pod{} - for _, pod := range pods { - if reflect.ValueOf(pod).IsValid() { - podsMap[pod.Name] = pod +func GetServiceAccountRelatedWorkloads(serviceAccountName string, workloads []Workload) (relatedWorkloads []string) { + for _, workload := range workloads { + if reflect.ValueOf(workload).IsValid() && (workload.GetServiceAccountName() == serviceAccountName || workload.GetServiceAccountName() == serviceAccountName) { + relatedWorkloads = append(relatedWorkloads, workload.GetName()) } } - - // Iterate through the Pods and check for the config map. - for podName, pod := range podsMap { - if pod.Spec.ServiceAccountName == serviceAccountName || pod.Spec.DeprecatedServiceAccount == serviceAccountName { - relatedPods = append(relatedPods, podName) - } - } - - return relatedPods -} -func getServiceAccountRelatedDaemonSets(serviceAccountName string) (relatedDaemonSets []string) { - - // List DaemonSets - daemonSets := GetDaemonSets() - // Create a map of DaemonSet names to DaemonSet objects. - daemonSetsMap := map[string]appsv1.DaemonSet{} - for _, daemonSet := range daemonSets { - if reflect.ValueOf(daemonSet).IsValid() { - - daemonSetsMap[daemonSet.Name] = daemonSet - } - } - - // Iterate through the DaemonSets and check for the config map. - for daemonSetName, daemonSet := range daemonSetsMap { - if daemonSet.Spec.Template.Spec.ServiceAccountName == serviceAccountName || daemonSet.Spec.Template.Spec.DeprecatedServiceAccount == serviceAccountName { - relatedDaemonSets = append(relatedDaemonSets, daemonSetName) - } - - } - - return relatedDaemonSets + return relatedWorkloads } -func getServiceAccountRelatedDeployments(serviceAccountName string) (relatedDeployments []string) { - // List Deployments - deployments := GetDeployments() - // Create a map of Deployment names to DaemonSet objects. - deploymentsMap := map[string]appsv1.Deployment{} - for _, deployment := range deployments { - if reflect.ValueOf(deployment).IsValid() { - deploymentsMap[deployment.Name] = deployment - } +func ServiceAccountRelatedWorkloads(serviceAccountName string) (relatedWorkloads common.RelatedClusterServices) { + var pods []Workload + var daemonsets []Workload + var deployments []Workload + var statefulsets []Workload + for _, pod := range GetPods() { + pods = append(pods, Pod(pod)) } - - // Iterate through the Deployments and check for the config map. - for deploymentName, deployment := range deploymentsMap { - if deployment.Spec.Template.Spec.ServiceAccountName == serviceAccountName || deployment.Spec.Template.Spec.DeprecatedServiceAccount == serviceAccountName { - - relatedDeployments = append(relatedDeployments, deploymentName) - } + for _, daemonset := range GetDaemonSets() { + daemonsets = append(daemonsets, DaemonSet(daemonset)) } - - return relatedDeployments -} -func getServiceAccountRelatedStatefulSets(serviceAccountName string) (relatedStatefulSets []string) { - - // List StatefulSets - statefulSets := GetStatefulSets() - // Create a map of StatefulSet names to StatefulSet objects. - statefulSetsMap := map[string]appsv1.StatefulSet{} - for _, statefulSet := range statefulSets { - if reflect.ValueOf(statefulSet).IsValid() { - - statefulSetsMap[statefulSet.Name] = statefulSet - } + for _, deployment := range GetDeployments() { + deployments = append(deployments, Deployment(deployment)) } - - // Iterate through the StatefulSets and check for the config map. - for statefulSetName, statefulSet := range statefulSetsMap { - if statefulSet.Spec.Template.Spec.ServiceAccountName == serviceAccountName || statefulSet.Spec.Template.Spec.DeprecatedServiceAccount == serviceAccountName { - - relatedStatefulSets = append(relatedStatefulSets, statefulSetName) - } + for _, statefulset := range GetStatefulSets() { + statefulsets = append(statefulsets, StatefulSet(statefulset)) } - - return relatedStatefulSets -} -func GetServiceAccountRelatedWorkloads(serviceAccountName string) (relatedWorkloads common.RelatedClusterServices) { - relatedDeployments := getServiceAccountRelatedDeployments(serviceAccountName) - relatedDaemonsets := getServiceAccountRelatedDaemonSets(serviceAccountName) - relatedStatefulSets := getServiceAccountRelatedStatefulSets(serviceAccountName) - relatedPods := getServiceAccountRelatedPods(serviceAccountName) + relatedPods := GetServiceAccountRelatedWorkloads(serviceAccountName, pods) + relatedDaemonsets := GetServiceAccountRelatedWorkloads(serviceAccountName, daemonsets) + relatedDeployments := GetServiceAccountRelatedWorkloads(serviceAccountName, deployments) + relatedStatefulSets := GetServiceAccountRelatedWorkloads(serviceAccountName, statefulsets) relatedWorkloads = common.RelatedClusterServices{Deployments: relatedDeployments, DaemonSets: relatedDaemonsets, StatefulSets: relatedStatefulSets, Pods: relatedPods} @@ -413,7 +150,7 @@ func GetServiceAccountRelatedWorkloads(serviceAccountName string) (relatedWorklo // ClusterRoleBinding Kind -func GetClusterRoleBindingRelatedWorkloads(clusterRoleBindingName string) (relatedWorkloads common.RelatedClusterServices) { +func ClusterRoleBindingRelatedWorkloads(clusterRoleBindingName string) (relatedWorkloads common.RelatedClusterServices) { // clusterRoleBinding := GetClusterRoleBinding(clusterRoleBindingName) @@ -422,7 +159,7 @@ func GetClusterRoleBindingRelatedWorkloads(clusterRoleBindingName string) (relat for _, clusterRoleBindingSubject := range clusterRoleBinding.Subjects { if clusterRoleBindingSubject.Kind == "ServiceAccount" { serviceAccountName := clusterRoleBindingSubject.Name - relatedWorkloads = GetServiceAccountRelatedWorkloads(serviceAccountName) + relatedWorkloads = ServiceAccountRelatedWorkloads(serviceAccountName) } } @@ -433,7 +170,7 @@ func GetClusterRoleBindingRelatedWorkloads(clusterRoleBindingName string) (relat // ClusterRole Kind -func GetClusterRoleRelatedWorkloads(clusterRoleName string) (relatedWorkloads common.RelatedClusterServices) { +func ClusterRoleRelatedWorkloads(clusterRoleName string) (relatedWorkloads common.RelatedClusterServices) { clusterRoleBindings := GetClusterRoleBindings() @@ -443,7 +180,7 @@ func GetClusterRoleRelatedWorkloads(clusterRoleName string) (relatedWorkloads co for _, clusterRoleBindingSubject := range clusterRoleBinding.Subjects { if clusterRoleBindingSubject.Kind == "ServiceAccount" { serviceAccountName := clusterRoleBindingSubject.Name - relatedWorkloads = GetServiceAccountRelatedWorkloads(serviceAccountName) + relatedWorkloads = ServiceAccountRelatedWorkloads(serviceAccountName) } } diff --git a/resources/resourceServices_test.go b/resources/resourceServices_test.go new file mode 100644 index 0000000..bd792f3 --- /dev/null +++ b/resources/resourceServices_test.go @@ -0,0 +1,167 @@ +package resources + +import ( + "main.go/common" + "reflect" + "testing" +) + +func TestSecretRelatedWorkloads(t *testing.T) { + var pods []Workload + var daemonsets []Workload + var deployments []Workload + var statefulsets []Workload + for _, pod := range GetTestPods() { + pods = append(pods, Pod(pod)) + } + for _, daemonset := range GetTestDaemonSets() { + daemonsets = append(daemonsets, DaemonSet(daemonset)) + } + for _, deployment := range GetTestDeployments() { + deployments = append(deployments, Deployment(deployment)) + } + for _, statefulset := range GetTestStatefulSets() { + statefulsets = append(statefulsets, StatefulSet(statefulset)) + } + + secretName := "test-secret" + relatedPods := GetSecretRelatedWorkloads(secretName, pods) + relatedDaemonsets := GetSecretRelatedWorkloads(secretName, daemonsets) + relatedDeployments := GetSecretRelatedWorkloads(secretName, deployments) + relatedStatefulSets := GetSecretRelatedWorkloads(secretName, statefulsets) + // Similarly, call getRelatedWorkloads for other workload types... + + relatedWorkloads := common.RelatedClusterServices{Deployments: relatedDeployments, DaemonSets: relatedDaemonsets, StatefulSets: relatedStatefulSets, Pods: relatedPods} + + if reflect.ValueOf(relatedWorkloads).IsZero() { + t.Errorf("Expected related workloads for secret: %s, got zero", secretName) + } else { + t.Logf("Secret: %s related workloads:\n%v", secretName, relatedWorkloads) + } +} +func TestConfigMapRelatedWorkloads(t *testing.T) { + + var pods []Workload + var daemonsets []Workload + var deployments []Workload + var statefulsets []Workload + for _, pod := range GetTestPods() { + pods = append(pods, Pod(pod)) + } + for _, daemonset := range GetTestDaemonSets() { + daemonsets = append(daemonsets, DaemonSet(daemonset)) + } + for _, deployment := range GetTestDeployments() { + deployments = append(deployments, Deployment(deployment)) + } + for _, statefulset := range GetTestStatefulSets() { + statefulsets = append(statefulsets, StatefulSet(statefulset)) + } + + configMapName := "test-configmap" + relatedPods := GetConfigMapRelatedWorkloads(configMapName, pods) + relatedDaemonsets := GetConfigMapRelatedWorkloads(configMapName, daemonsets) + relatedDeployments := GetConfigMapRelatedWorkloads(configMapName, deployments) + relatedStatefulSets := GetConfigMapRelatedWorkloads(configMapName, statefulsets) + + relatedWorkloads := common.RelatedClusterServices{Deployments: relatedDeployments, DaemonSets: relatedDaemonsets, StatefulSets: relatedStatefulSets, Pods: relatedPods} + + if reflect.ValueOf(relatedWorkloads).IsZero() { + t.Errorf("Expected related workloads for configmap: %s, got zero", configMapName) + } else { + t.Logf("ConfigMap: %s related workloads:\n%v", configMapName, relatedWorkloads) + } +} + +// ServiceAccount Kind +func TestServiceAccountRelatedWorkloads(t *testing.T) { + serviceAccountName := "test-serviceaccount" + + relatedWorkloads := ServiceAccountTestRelatedWorkloads(serviceAccountName) + if reflect.ValueOf(relatedWorkloads).IsZero() { + t.Errorf("Expected related workloads for serviceaccount: %s, got zero", serviceAccountName) + } else { + t.Logf("Service account: %s related workloads:\n%v", serviceAccountName, relatedWorkloads) + } +} +func ServiceAccountTestRelatedWorkloads(serviceAccountName string) (relatedWorkloads common.RelatedClusterServices) { + var pods []Workload + var daemonsets []Workload + var deployments []Workload + var statefulsets []Workload + for _, pod := range GetTestPods() { + pods = append(pods, Pod(pod)) + } + for _, daemonset := range GetTestDaemonSets() { + daemonsets = append(daemonsets, DaemonSet(daemonset)) + } + for _, deployment := range GetTestDeployments() { + deployments = append(deployments, Deployment(deployment)) + } + for _, statefulset := range GetTestStatefulSets() { + statefulsets = append(statefulsets, StatefulSet(statefulset)) + } + + relatedPods := GetServiceAccountRelatedWorkloads(serviceAccountName, pods) + relatedDaemonsets := GetServiceAccountRelatedWorkloads(serviceAccountName, daemonsets) + relatedDeployments := GetServiceAccountRelatedWorkloads(serviceAccountName, deployments) + relatedStatefulSets := GetServiceAccountRelatedWorkloads(serviceAccountName, statefulsets) + + relatedWorkloads = common.RelatedClusterServices{Deployments: relatedDeployments, DaemonSets: relatedDaemonsets, StatefulSets: relatedStatefulSets, Pods: relatedPods} + + return relatedWorkloads +} + +// ClusterRoleBinding Kind + +func TestClusterRoleBindingRelatedWorkloads(t *testing.T) { + // + var relatedWorkloads common.RelatedClusterServices + clusterRoleBindingName := "test-clusterrolebinding" + clusterRoleBinding := GetTestClusterRoleBinding(clusterRoleBindingName) + + // Iterate through the StatefulSets and check for the config map. + if reflect.ValueOf(clusterRoleBinding).IsValid() { + for _, clusterRoleBindingSubject := range clusterRoleBinding.Subjects { + if clusterRoleBindingSubject.Kind == "ServiceAccount" { + serviceAccountName := clusterRoleBindingSubject.Name + relatedWorkloads = ServiceAccountTestRelatedWorkloads(serviceAccountName) + } + } + + } + + if reflect.ValueOf(relatedWorkloads).IsZero() { + t.Errorf("Expected related workloads for cluster role binding: %s, got zero", clusterRoleBindingName) + } else { + t.Logf("Cluster role binding: %s related workloads:\n%v", clusterRoleBindingName, relatedWorkloads) + } +} + +// ClusterRole Kind + +func TestClusterRoleRelatedWorkloads(t *testing.T) { + var relatedWorkloads common.RelatedClusterServices + clusterRoleName := "test-clusterrole" + clusterRoleBindings := GetTestClusterRoleBindings() + + for _, clusterRoleBinding := range clusterRoleBindings { + clusterRoleRef := clusterRoleBinding.RoleRef.Name + if clusterRoleRef == clusterRoleName && reflect.ValueOf(clusterRoleBinding).IsValid() { + for _, clusterRoleBindingSubject := range clusterRoleBinding.Subjects { + if clusterRoleBindingSubject.Kind == "ServiceAccount" { + serviceAccountName := clusterRoleBindingSubject.Name + relatedWorkloads = ServiceAccountTestRelatedWorkloads(serviceAccountName) + } + } + + } + + } + + if reflect.ValueOf(relatedWorkloads).IsZero() { + t.Errorf("Expected related workloads for clusterrole: %s, got zero", clusterRoleName) + } else { + t.Logf("Cluster role: %s related workloads:\n%v", clusterRoleName, relatedWorkloads) + } +} diff --git a/resources/workloadServices.go b/resources/workloadServices.go index 7fba591..cadd69e 100644 --- a/resources/workloadServices.go +++ b/resources/workloadServices.go @@ -1,73 +1,54 @@ package resources import ( - appsv1 "k8s.io/api/apps/v1" "k8s.io/utils/strings/slices" "main.go/common" "reflect" ) -// Deployment Kind - -func getDeploymentRelatedConfigMaps(deployment appsv1.Deployment) (relatedConfigMaps []string) { - - for _, container := range deployment.Spec.Template.Spec.Containers { - containerEnv := container.Env - if containerEnv != nil { - for _, env := range containerEnv { - if reflect.ValueOf(env).IsValid() && env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name != "" && !slices.Contains(relatedConfigMaps, env.ValueFrom.ConfigMapKeyRef.Name) { - configMapName := env.ValueFrom.ConfigMapKeyRef.Name - relatedConfigMaps = append(relatedConfigMaps, configMapName) - } +func GetWorkloadRelatedConfigMaps(workload Workload) (relatedConfigMaps []string) { + for _, container := range workload.GetContainers() { + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name != "" && !slices.Contains(relatedConfigMaps, env.ValueFrom.ConfigMapKeyRef.Name) { + relatedConfigMaps = append(relatedConfigMaps, env.ValueFrom.ConfigMapKeyRef.Name) } } } - for _, volume := range deployment.Spec.Template.Spec.Volumes { - - if reflect.ValueOf(volume).IsValid() && volume.ConfigMap != nil && volume.ConfigMap.Name != "" && !slices.Contains(relatedConfigMaps, volume.ConfigMap.Name) { - configMapName := volume.ConfigMap.Name - relatedConfigMaps = append(relatedConfigMaps, configMapName) + for _, volume := range workload.GetVolumes() { + if volume.ConfigMap != nil && volume.ConfigMap.Name != "" && !slices.Contains(relatedConfigMaps, volume.ConfigMap.Name) { + relatedConfigMaps = append(relatedConfigMaps, volume.ConfigMap.Name) } } - return relatedConfigMaps } - -func getDeploymentRelatedSecrets(deployment appsv1.Deployment) (relatedSecrets []string) { - - for _, container := range deployment.Spec.Template.Spec.Containers { - containerEnv := container.Env - if containerEnv != nil { - for _, env := range containerEnv { - if reflect.ValueOf(env).IsValid() && env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name != "" && !slices.Contains(relatedSecrets, env.ValueFrom.SecretKeyRef.Name) { - secretName := env.ValueFrom.SecretKeyRef.Name - relatedSecrets = append(relatedSecrets, secretName) - } +func GetWorkloadRelatedSecrets(workload Workload) (relatedSecrets []string) { + for _, container := range workload.GetContainers() { + for _, env := range container.Env { + if reflect.ValueOf(env).IsValid() && env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name != "" && !slices.Contains(relatedSecrets, env.ValueFrom.SecretKeyRef.Name) { + relatedSecrets = append(relatedSecrets, env.ValueFrom.SecretKeyRef.Name) } } } - for _, volume := range deployment.Spec.Template.Spec.Volumes { + for _, volume := range workload.GetVolumes() { if reflect.ValueOf(volume).IsValid() && volume.Secret != nil && volume.Secret.SecretName != "" && !slices.Contains(relatedSecrets, volume.Secret.SecretName) { secretName := volume.Secret.SecretName relatedSecrets = append(relatedSecrets, secretName) } } - return relatedSecrets } -func getDeploymentRelatedServiceAccounts(deployment appsv1.Deployment) (relatedServiceAccounts []string) { +func GetWorkloadRelatedServiceAccounts(workload Workload) (relatedServiceAccounts []string) { - if deployment.Spec.Template.Spec.ServiceAccountName != "" && !slices.Contains(relatedServiceAccounts, deployment.Spec.Template.Spec.ServiceAccountName) { - serviceAccountName := deployment.Spec.Template.Spec.ServiceAccountName - relatedServiceAccounts = append(relatedServiceAccounts, serviceAccountName) + if workload.GetServiceAccountName() != "" && !slices.Contains(relatedServiceAccounts, workload.GetServiceAccountName()) { + relatedServiceAccounts = append(relatedServiceAccounts, workload.GetServiceAccountName()) } return relatedServiceAccounts } -func getDeploymentRelatedClusterRoleBindings(deployment appsv1.Deployment) (relatedClusterRoleBindings []string) { +func GetWorkloadRelatedClusterRoleBindings(workload Workload) (relatedClusterRoleBindings []string) { - if serviceAccountName := deployment.Spec.Template.Spec.ServiceAccountName; serviceAccountName != "" { + if serviceAccountName := workload.GetServiceAccountName(); serviceAccountName != "" { clusterRoleBindings := GetClusterRoleBindings() for _, clusterRoleBinding := range clusterRoleBindings { if reflect.ValueOf(clusterRoleBinding).IsValid() { @@ -83,9 +64,9 @@ func getDeploymentRelatedClusterRoleBindings(deployment appsv1.Deployment) (rela return relatedClusterRoleBindings } -func getDeploymentRelatedClusterRoles(deployment appsv1.Deployment) (relatedClusterRoles []string) { +func GetWorkloadRelatedClusterRoles(workload Workload) (relatedClusterRoles []string) { - if serviceAccountName := deployment.Spec.Template.Spec.ServiceAccountName; serviceAccountName != "" { + if serviceAccountName := workload.GetServiceAccountName(); serviceAccountName != "" { clusterRoleBindings := GetClusterRoleBindings() for _, clusterRoleBinding := range clusterRoleBindings { if reflect.ValueOf(clusterRoleBinding).IsValid() { @@ -102,130 +83,35 @@ func getDeploymentRelatedClusterRoles(deployment appsv1.Deployment) (relatedClus return relatedClusterRoles } -func GetDeploymentRelatedResources(deploymentName string, namespace string) (relatedResources common.RelatedClusterServices) { +// Deployment Kind +func DeploymentRelatedResources(deploymentName string, namespace string) (relatedResources common.RelatedClusterServices) { // deployment := GetDeployment(deploymentName, namespace) + deploymentWorkload := Deployment(deployment) if reflect.ValueOf(deployment).IsValid() { - relatedConfigMaps := getDeploymentRelatedConfigMaps(deployment) - relatedSecrets := getDeploymentRelatedSecrets(deployment) - relatedServiceAccounts := getDeploymentRelatedServiceAccounts(deployment) - relatedClusterRoleBindings := getDeploymentRelatedClusterRoleBindings(deployment) - relatedClusterRoles := getDeploymentRelatedClusterRoles(deployment) + relatedConfigMaps := GetWorkloadRelatedConfigMaps(deploymentWorkload) + relatedSecrets := GetWorkloadRelatedSecrets(deploymentWorkload) + relatedServiceAccounts := GetWorkloadRelatedServiceAccounts(deploymentWorkload) + relatedClusterRoleBindings := GetWorkloadRelatedClusterRoleBindings(deploymentWorkload) + relatedClusterRoles := GetWorkloadRelatedClusterRoles(deploymentWorkload) relatedResources = common.RelatedClusterServices{ConfigMaps: relatedConfigMaps, Secrets: relatedSecrets, ServiceAccounts: relatedServiceAccounts, ClusterRoleBindings: relatedClusterRoleBindings, ClusterRoles: relatedClusterRoles} } - return relatedResources } // DaemonSet Kind -func getDaemonSetRelatedConfigMaps(daemonSet appsv1.DaemonSet) (relatedConfigMaps []string) { - // - - for _, container := range daemonSet.Spec.Template.Spec.Containers { - containerEnv := container.Env - if containerEnv != nil { - for _, env := range containerEnv { - if reflect.ValueOf(env).IsValid() && env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name != "" && !slices.Contains(relatedConfigMaps, env.ValueFrom.ConfigMapKeyRef.Name) { - configMapName := env.ValueFrom.ConfigMapKeyRef.Name - relatedConfigMaps = append(relatedConfigMaps, configMapName) - } - } - } - } - for _, volume := range daemonSet.Spec.Template.Spec.Volumes { - - if reflect.ValueOf(volume).IsValid() && volume.ConfigMap != nil && volume.ConfigMap.Name != "" && !slices.Contains(relatedConfigMaps, volume.ConfigMap.Name) { - configMapName := volume.ConfigMap.Name - relatedConfigMaps = append(relatedConfigMaps, configMapName) - } - } - - return relatedConfigMaps -} - -func getDaemonSetRelatedSecrets(daemonSet appsv1.DaemonSet) (relatedSecrets []string) { - // - - for _, container := range daemonSet.Spec.Template.Spec.Containers { - containerEnv := container.Env - if containerEnv != nil { - for _, env := range containerEnv { - if reflect.ValueOf(env).IsValid() && env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name != "" && !slices.Contains(relatedSecrets, env.ValueFrom.SecretKeyRef.Name) { - secretName := env.ValueFrom.SecretKeyRef.Name - relatedSecrets = append(relatedSecrets, secretName) - } - } - } - } - for _, volume := range daemonSet.Spec.Template.Spec.Volumes { - if reflect.ValueOf(volume).IsValid() && volume.Secret != nil && volume.Secret.SecretName != "" && !slices.Contains(relatedSecrets, volume.Secret.SecretName) { - secretName := volume.Secret.SecretName - relatedSecrets = append(relatedSecrets, secretName) - } - } - - return relatedSecrets -} - -func getDaemonSetRelatedServiceAccounts(daemonSet appsv1.DaemonSet) (relatedServiceAccounts []string) { - // - - if daemonSet.Spec.Template.Spec.ServiceAccountName != "" && !slices.Contains(relatedServiceAccounts, daemonSet.Spec.Template.Spec.ServiceAccountName) { - serviceAccountName := daemonSet.Spec.Template.Spec.ServiceAccountName - relatedServiceAccounts = append(relatedServiceAccounts, serviceAccountName) - } - return relatedServiceAccounts -} -func getDaemonSetRelatedClusterRoleBindings(daemonSet appsv1.DaemonSet) (relatedClusterRoleBindings []string) { - // - - if serviceAccountName := daemonSet.Spec.Template.Spec.ServiceAccountName; serviceAccountName != "" { - clusterRoleBindings := GetClusterRoleBindings() - for _, clusterRoleBinding := range clusterRoleBindings { - if reflect.ValueOf(clusterRoleBinding).IsValid() { - for _, clusterRoleBindingSubject := range clusterRoleBinding.Subjects { - if clusterRoleBindingSubject.Kind == "ServiceAccount" && clusterRoleBindingSubject.Name == serviceAccountName { - clusterRoleBindingName := clusterRoleBinding.Name - relatedClusterRoleBindings = append(relatedClusterRoleBindings, clusterRoleBindingName) - } - } - } - } - } - return relatedClusterRoleBindings -} - -func getDaemonSetRelatedClusterRoles(daemonSet appsv1.DaemonSet) (relatedClusterRoles []string) { - // - - if serviceAccountName := daemonSet.Spec.Template.Spec.ServiceAccountName; serviceAccountName != "" { - clusterRoleBindings := GetClusterRoleBindings() - for _, clusterRoleBinding := range clusterRoleBindings { - if reflect.ValueOf(clusterRoleBinding).IsValid() { - for _, clusterRoleBindingSubject := range clusterRoleBinding.Subjects { - if clusterRoleBindingSubject.Kind == "ServiceAccount" && clusterRoleBindingSubject.Name == serviceAccountName { - clusterRoleName := clusterRoleBinding.RoleRef.Name - relatedClusterRoles = append(relatedClusterRoles, clusterRoleName) - } - } - } - } - } - - return relatedClusterRoles -} - -func GetDaemonSetRelatedResources(daemonSetName string, namespace string) (relatedResources common.RelatedClusterServices) { +func DaemonSetRelatedResources(daemonSetName string, namespace string) (relatedResources common.RelatedClusterServices) { // daemonSet := GetDaemonSet(daemonSetName, namespace) + daemonSetWorkload := DaemonSet(daemonSet) if reflect.ValueOf(daemonSet).IsValid() { - relatedConfigMaps := getDaemonSetRelatedConfigMaps(daemonSet) - relatedSecrets := getDaemonSetRelatedSecrets(daemonSet) - relatedServiceAccounts := getDaemonSetRelatedServiceAccounts(daemonSet) - relatedClusterRoleBindings := getDaemonSetRelatedClusterRoleBindings(daemonSet) - relatedClusterRoles := getDaemonSetRelatedClusterRoles(daemonSet) + relatedConfigMaps := GetWorkloadRelatedConfigMaps(daemonSetWorkload) + relatedSecrets := GetWorkloadRelatedSecrets(daemonSetWorkload) + relatedServiceAccounts := GetWorkloadRelatedServiceAccounts(daemonSetWorkload) + relatedClusterRoleBindings := GetWorkloadRelatedClusterRoleBindings(daemonSetWorkload) + relatedClusterRoles := GetWorkloadRelatedClusterRoles(daemonSetWorkload) relatedResources = common.RelatedClusterServices{ConfigMaps: relatedConfigMaps, Secrets: relatedSecrets, ServiceAccounts: relatedServiceAccounts, ClusterRoleBindings: relatedClusterRoleBindings, ClusterRoles: relatedClusterRoles} } @@ -233,112 +119,17 @@ func GetDaemonSetRelatedResources(daemonSetName string, namespace string) (relat } // StatefulSet Kind -func getStatefulSetRelatedConfigMaps(statefulSet appsv1.StatefulSet) (relatedConfigMaps []string) { - // - for _, container := range statefulSet.Spec.Template.Spec.Containers { - containerEnv := container.Env - if containerEnv != nil { - for _, env := range containerEnv { - if reflect.ValueOf(env).IsValid() && env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name != "" && !slices.Contains(relatedConfigMaps, env.ValueFrom.ConfigMapKeyRef.Name) { - configMapName := env.ValueFrom.ConfigMapKeyRef.Name - relatedConfigMaps = append(relatedConfigMaps, configMapName) - } - } - } - } - for _, volume := range statefulSet.Spec.Template.Spec.Volumes { - - if reflect.ValueOf(volume).IsValid() && volume.ConfigMap != nil && volume.ConfigMap.Name != "" && !slices.Contains(relatedConfigMaps, volume.ConfigMap.Name) { - configMapName := volume.ConfigMap.Name - relatedConfigMaps = append(relatedConfigMaps, configMapName) - } - } - - return relatedConfigMaps -} - -func getStatefulSetRelatedSecrets(statefulSet appsv1.StatefulSet) (relatedSecrets []string) { - // - - for _, container := range statefulSet.Spec.Template.Spec.Containers { - containerEnv := container.Env - if containerEnv != nil { - for _, env := range containerEnv { - if reflect.ValueOf(env).IsValid() && env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name != "" && !slices.Contains(relatedSecrets, env.ValueFrom.SecretKeyRef.Name) { - secretName := env.ValueFrom.SecretKeyRef.Name - relatedSecrets = append(relatedSecrets, secretName) - } - } - } - } - for _, volume := range statefulSet.Spec.Template.Spec.Volumes { - if reflect.ValueOf(volume).IsValid() && volume.Secret != nil && volume.Secret.SecretName != "" && !slices.Contains(relatedSecrets, volume.Secret.SecretName) { - secretName := volume.Secret.SecretName - relatedSecrets = append(relatedSecrets, secretName) - } - } - - return relatedSecrets -} - -func getStatefulSetRelatedServiceAccounts(statefulSet appsv1.StatefulSet) (relatedServiceAccounts []string) { - // - if statefulSet.Spec.Template.Spec.ServiceAccountName != "" && !slices.Contains(relatedServiceAccounts, statefulSet.Spec.Template.Spec.ServiceAccountName) { - serviceAccountName := statefulSet.Spec.Template.Spec.ServiceAccountName - relatedServiceAccounts = append(relatedServiceAccounts, serviceAccountName) - } - return relatedServiceAccounts -} - -func getStatefulSetRelatedClusterRoleBindings(statefulSet appsv1.StatefulSet) (relatedClusterRoleBindings []string) { - // - - if serviceAccountName := statefulSet.Spec.Template.Spec.ServiceAccountName; serviceAccountName != "" { - clusterRoleBindings := GetClusterRoleBindings() - for _, clusterRoleBinding := range clusterRoleBindings { - if reflect.ValueOf(clusterRoleBinding).IsValid() { - for _, clusterRoleBindingSubject := range clusterRoleBinding.Subjects { - if clusterRoleBindingSubject.Kind == "ServiceAccount" && clusterRoleBindingSubject.Name == serviceAccountName { - clusterRoleBindingName := clusterRoleBinding.Name - relatedClusterRoleBindings = append(relatedClusterRoleBindings, clusterRoleBindingName) - } - } - } - } - } - return relatedClusterRoleBindings -} - -func getStatefulSetRelatedClusterRoles(statefulSet appsv1.StatefulSet) (relatedClusterRoles []string) { - // - - if serviceAccountName := statefulSet.Spec.Template.Spec.ServiceAccountName; serviceAccountName != "" { - clusterRoleBindings := GetClusterRoleBindings() - for _, clusterRoleBinding := range clusterRoleBindings { - if reflect.ValueOf(clusterRoleBinding).IsValid() { - for _, clusterRoleBindingSubject := range clusterRoleBinding.Subjects { - if clusterRoleBindingSubject.Kind == "ServiceAccount" && clusterRoleBindingSubject.Name == serviceAccountName { - clusterRoleName := clusterRoleBinding.RoleRef.Name - relatedClusterRoles = append(relatedClusterRoles, clusterRoleName) - } - } - } - } - } - - return relatedClusterRoles -} - -func GetStatefulSetRelatedResources(statefulSetName string, namespace string) (relatedResources common.RelatedClusterServices) { +func StatefulSetRelatedResources(statefulSetName string, namespace string) (relatedResources common.RelatedClusterServices) { // statefulSet := GetStatefulSet(statefulSetName, namespace) + statefulSetWorkload := StatefulSet(statefulSet) if reflect.ValueOf(statefulSet).IsValid() { - relatedConfigMaps := getStatefulSetRelatedConfigMaps(statefulSet) - relatedSecrets := getStatefulSetRelatedSecrets(statefulSet) - relatedServiceAccounts := getStatefulSetRelatedServiceAccounts(statefulSet) - relatedClusterRoleBindings := getStatefulSetRelatedClusterRoleBindings(statefulSet) - relatedClusterRoles := getStatefulSetRelatedClusterRoles(statefulSet) + relatedConfigMaps := GetWorkloadRelatedConfigMaps(statefulSetWorkload) + relatedSecrets := GetWorkloadRelatedSecrets(statefulSetWorkload) + relatedServiceAccounts := GetWorkloadRelatedServiceAccounts(statefulSetWorkload) + relatedClusterRoleBindings := GetWorkloadRelatedClusterRoleBindings(statefulSetWorkload) + relatedClusterRoles := GetWorkloadRelatedClusterRoles(statefulSetWorkload) relatedResources = common.RelatedClusterServices{ConfigMaps: relatedConfigMaps, Secrets: relatedSecrets, ServiceAccounts: relatedServiceAccounts, ClusterRoleBindings: relatedClusterRoleBindings, ClusterRoles: relatedClusterRoles} } diff --git a/resources/workloadServices_test.go b/resources/workloadServices_test.go new file mode 100644 index 0000000..8f26c83 --- /dev/null +++ b/resources/workloadServices_test.go @@ -0,0 +1,109 @@ +package resources + +import ( + "main.go/common" + "reflect" + "testing" +) + +func getTestWorkloadRelatedClusterRoleBindings(workload Workload) (relatedClusterRoleBindings []string) { + + if serviceAccountName := workload.GetServiceAccountName(); serviceAccountName != "" { + clusterRoleBindings := GetTestClusterRoleBindings() + for _, clusterRoleBinding := range clusterRoleBindings { + if reflect.ValueOf(clusterRoleBinding).IsValid() { + for _, clusterRoleBindingSubject := range clusterRoleBinding.Subjects { + if clusterRoleBindingSubject.Kind == "ServiceAccount" && clusterRoleBindingSubject.Name == serviceAccountName { + clusterRoleBindingName := clusterRoleBinding.Name + relatedClusterRoleBindings = append(relatedClusterRoleBindings, clusterRoleBindingName) + } + } + } + } + } + return relatedClusterRoleBindings +} + +func getTestWorkloadRelatedClusterRoles(workload Workload) (relatedClusterRoles []string) { + + if serviceAccountName := workload.GetServiceAccountName(); serviceAccountName != "" { + clusterRoleBindings := GetTestClusterRoleBindings() + for _, clusterRoleBinding := range clusterRoleBindings { + if reflect.ValueOf(clusterRoleBinding).IsValid() { + for _, clusterRoleBindingSubject := range clusterRoleBinding.Subjects { + if clusterRoleBindingSubject.Kind == "ServiceAccount" && clusterRoleBindingSubject.Name == serviceAccountName { + clusterRoleName := clusterRoleBinding.RoleRef.Name + relatedClusterRoles = append(relatedClusterRoles, clusterRoleName) + } + } + } + } + } + + return relatedClusterRoles +} + +// Deployment Kind +func TestDeploymentRelatedResources(t *testing.T) { + // + var relatedResources common.RelatedClusterServices + deployment := GetTestDeployment() + deploymentWorkload := Deployment(deployment) + if reflect.ValueOf(deployment).IsValid() { + relatedConfigMaps := GetWorkloadRelatedConfigMaps(deploymentWorkload) + relatedSecrets := GetWorkloadRelatedSecrets(deploymentWorkload) + relatedServiceAccounts := GetWorkloadRelatedServiceAccounts(deploymentWorkload) + relatedClusterRoleBindings := getTestWorkloadRelatedClusterRoleBindings(deploymentWorkload) + relatedClusterRoles := getTestWorkloadRelatedClusterRoles(deploymentWorkload) + relatedResources = common.RelatedClusterServices{ConfigMaps: relatedConfigMaps, Secrets: relatedSecrets, ServiceAccounts: relatedServiceAccounts, ClusterRoleBindings: relatedClusterRoleBindings, ClusterRoles: relatedClusterRoles} + } + + if reflect.ValueOf(relatedResources).IsZero() { + t.Errorf("Expected related resources for deployment: %s, got zero", deployment.Name) + } else { + t.Logf("Deployment: %s related resources:\n %v", deployment.Name, relatedResources) + } + +} + +// DaemonSet Kind +func TestDaemonSetRelatedResources(t *testing.T) { + var relatedResources common.RelatedClusterServices + daemonSet := GetTestDaemonSet() + daemonSetWorkload := DaemonSet(daemonSet) + if reflect.ValueOf(daemonSet).IsValid() { + relatedConfigMaps := GetWorkloadRelatedConfigMaps(daemonSetWorkload) + relatedSecrets := GetWorkloadRelatedSecrets(daemonSetWorkload) + relatedServiceAccounts := GetWorkloadRelatedServiceAccounts(daemonSetWorkload) + relatedClusterRoleBindings := getTestWorkloadRelatedClusterRoleBindings(daemonSetWorkload) + relatedClusterRoles := getTestWorkloadRelatedClusterRoles(daemonSetWorkload) + relatedResources = common.RelatedClusterServices{ConfigMaps: relatedConfigMaps, Secrets: relatedSecrets, ServiceAccounts: relatedServiceAccounts, ClusterRoleBindings: relatedClusterRoleBindings, ClusterRoles: relatedClusterRoles} + } + if reflect.ValueOf(relatedResources).IsZero() { + t.Errorf("Expected related resources for daemonset: %s, got zero", daemonSet.Name) + } else { + t.Logf("Daemonset: %s related resources:\n %v", daemonSet.Name, relatedResources) + } + +} + +// StatefulSet Kind +func TestStatefulSetRelatedResources(t *testing.T) { + // + var relatedResources common.RelatedClusterServices + statefulSet := GetTestStatefulSet() + statefulSetWorkload := StatefulSet(statefulSet) + if reflect.ValueOf(statefulSet).IsValid() { + relatedConfigMaps := GetWorkloadRelatedConfigMaps(statefulSetWorkload) + relatedSecrets := GetWorkloadRelatedSecrets(statefulSetWorkload) + relatedServiceAccounts := GetWorkloadRelatedServiceAccounts(statefulSetWorkload) + relatedClusterRoleBindings := getTestWorkloadRelatedClusterRoleBindings(statefulSetWorkload) + relatedClusterRoles := getTestWorkloadRelatedClusterRoles(statefulSetWorkload) + relatedResources = common.RelatedClusterServices{ConfigMaps: relatedConfigMaps, Secrets: relatedSecrets, ServiceAccounts: relatedServiceAccounts, ClusterRoleBindings: relatedClusterRoleBindings, ClusterRoles: relatedClusterRoles} + } + if reflect.ValueOf(relatedResources).IsZero() { + t.Errorf("Expected related resources for statefulset: %s, got zero", statefulSet.Name) + } else { + t.Logf("Statefulset: %s related resources:\n %v", statefulSet.Name, relatedResources) + } +} From 1509f85fec8763df89bde9103f205ab883e90f7d Mon Sep 17 00:00:00 2001 From: ralongit Date: Sun, 24 Sep 2023 13:49:53 +0300 Subject: [PATCH 2/2] Add comments & modify tests - Added more comments - Updated some tests --- common/logger.go | 12 ++-- main_test.go | 9 --- mockLogzioListener/listener.go | 1 - resources/resourceInformer.go | 85 ++++++++++++++++------ resources/resourceInformer_test.go | 109 ++++++++++++++++------------- resources/resourceServices.go | 8 +-- 6 files changed, 132 insertions(+), 92 deletions(-) diff --git a/common/logger.go b/common/logger.go index 89dc5a3..a43af41 100644 --- a/common/logger.go +++ b/common/logger.go @@ -80,9 +80,9 @@ func ParseEventLog(msg string, extraFields ...interface{}) (eventLog string) { // If there are extra fields, convert them to a JSON string and unmarshal into logEvent extra := fmt.Sprintf("%s", extraFields...) - if err = json.Unmarshal([]byte(extra), &logEvent); err != nil && extra != "" { + if err = json.Unmarshal([]byte(extra), &logEvent); err != nil && extra != "" && extra != "[]" { // If there is an error in parsing the extra fields, log the error - log.Printf("\n[ERROR] Failed to parse log extra data(%T): %s\tlog(%T):\n%v to Logz.io.\nRelated error:\n%v", extra, logEvent, extra, logEvent, err) + log.Printf("\n[ERROR] Failed to parse log extra data(%T): %s\tlog(%T):\n%v to Logz.io.\nRelated error:\n%v", extra, extra, logEvent, logEvent, err) } } @@ -102,9 +102,9 @@ func ParseEventLog(msg string, extraFields ...interface{}) (eventLog string) { } // Convert the parsed event log byte slice to a string - eventLog = fmt.Sprintf("%s", string(parsedEventLog)) + //eventLog = fmt.Sprintf("%s", string(parsedEventLog)) - return eventLog + return string(parsedEventLog) } func SendLog(msg string, extraFields ...interface{}) { @@ -124,9 +124,5 @@ func SendLog(msg string, extraFields ...interface{}) { wg.Add(1) wg.Wait() } - } else { - // If the logz.io logger is not configured, log a message and do not send the log - log.Printf("Logz.io logger isn't configured.\nLog won't be sent:\n%s", msg) } - } diff --git a/main_test.go b/main_test.go index b49b04c..397b472 100644 --- a/main_test.go +++ b/main_test.go @@ -20,13 +20,4 @@ func TestDeployEvents(t *testing.T) { } } - - //// - // - //common.DynamicClient = common.ConfigureClusterDynamicClient() - //if common.DynamicClient != nil { - // resources.AddEventHandlers() - //} - // - //common.LogzioLogger.Stop() } diff --git a/mockLogzioListener/listener.go b/mockLogzioListener/listener.go index f377d41..31b92b3 100644 --- a/mockLogzioListener/listener.go +++ b/mockLogzioListener/listener.go @@ -40,7 +40,6 @@ func (h *ListenerHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Define the structure of the expected request body type RequestBody struct { - Message string `json:"message"` } // Read the request body diff --git a/resources/resourceInformer.go b/resources/resourceInformer.go index 60fc518..cb66a21 100644 --- a/resources/resourceInformer.go +++ b/resources/resourceInformer.go @@ -45,6 +45,7 @@ func createResourceInformer(resourceGVR schema.GroupVersionResource, clusterClie // AddInformerEventHandler adds a new event handler to a given resource informer. // It logs events when a resource is added, updated, or deleted. func AddInformerEventHandler(resourceInformer cache.SharedIndexInformer) (synced bool) { + var parsedEventLog []byte // Check if the resource informer is nil if resourceInformer == nil { log.Println("[ERROR] Resource informer is nil") @@ -61,7 +62,7 @@ func AddInformerEventHandler(resourceInformer cache.SharedIndexInformer) (synced mux.RLock() defer mux.RUnlock() if synced { - StructResourceLog(map[string]interface{}{ + _, parsedEventLog = StructResourceLog(map[string]interface{}{ "eventType": "ADDED", "newObject": obj, }) @@ -72,7 +73,7 @@ func AddInformerEventHandler(resourceInformer cache.SharedIndexInformer) (synced mux.RLock() defer mux.RUnlock() if synced { - StructResourceLog(map[string]interface{}{ + _, parsedEventLog = StructResourceLog(map[string]interface{}{ "eventType": "MODIFIED", "newObject": newObj, "oldObject": oldObj, @@ -84,7 +85,7 @@ func AddInformerEventHandler(resourceInformer cache.SharedIndexInformer) (synced mux.RLock() defer mux.RUnlock() if synced { - StructResourceLog(map[string]interface{}{ + _, parsedEventLog = StructResourceLog(map[string]interface{}{ "eventType": "DELETED", "newObject": obj, }) @@ -97,7 +98,7 @@ func AddInformerEventHandler(resourceInformer cache.SharedIndexInformer) (synced common.SendLog(fmt.Sprintf("[ERROR] Failed to add event handler for informer.\nERROR:\n%v", err)) return } - + common.SendLog(string(parsedEventLog)) // Create a new context that will get cancelled when an interrupt signal is received ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) defer cancel() @@ -131,6 +132,7 @@ func AddInformerEventHandler(resourceInformer cache.SharedIndexInformer) (synced return synced } + func AddEventHandlers() { // Creates informer for each cluster API and events handler for each informer resourceAPIList := map[string]string{ @@ -179,14 +181,32 @@ func AddEventHandlers() { } // EventObject transforms a raw object into a KubernetesEvent object. -// It takes a map representing the raw object and a boolean indicating whether the object is new or not. +// It takes a map representing the raw object and a boolean indicating whether the object is new or n +// ot. func EventObject(rawObj map[string]interface{}, isNew bool) (resourceObject common.KubernetesEvent) { + // Check if the raw object or its "newObject" and "oldObject" fields are nil - if rawObj == nil || rawObj["newObject"] == nil || rawObj["oldObject"] == nil { - log.Println("[ERROR] rawObj is nil or does not have required fields: newObject/oldObject.") + // Check if the raw object is nil + if rawObj == nil { + log.Println("[ERROR] rawObj is nil.") + // Return an empty KubernetesEvent object if the raw object is invalid + return resourceObject + } + + // Check if the newObject field of rawObj is nil, if it is required + if isNew && rawObj["newObject"] == nil { + log.Println("[ERROR] rawObj does not have required field: newObject.") + // Return an empty KubernetesEvent object if the raw object is invalid + return resourceObject + } + + // Check if the oldObject field of rawObj is nil, if it is required + if !isNew && rawObj["oldObject"] == nil { + log.Println("[ERROR] rawObj does not have required field: oldObject.") // Return an empty KubernetesEvent object if the raw object is invalid return resourceObject } + // Initialize an empty unstructured object rawUnstructuredObj := unstructured.Unstructured{} // Initialize a buffer to store the JSON-encoded raw object @@ -197,9 +217,7 @@ func EventObject(rawObj map[string]interface{}, isNew bool) (resourceObject comm // Unmarshal the JSON-encoded raw object into a KubernetesEvent object err := json.Unmarshal(buffer.Bytes(), &resourceObject) if err != nil { - // Log the error if unmarshalling fails - log.Printf("Failed to unmarshal resource object: %v", err) - // handle error as necessary + log.Printf("Failed to unmarshal resource object:\n%v\nError:\n%v", rawObj, err) } else { // If unmarshalling is successful, determine whether to set the unstructured object's content based on the "isNew" flag if isNew { @@ -214,45 +232,72 @@ func EventObject(rawObj map[string]interface{}, isNew bool) (resourceObject comm return resourceObject } -func StructResourceLog(event map[string]interface{}) (isStructured bool) { - var msg string +// StructResourceLog receives an event and logs it in a structured format. +func StructResourceLog(event map[string]interface{}) (isStructured bool, marshaledEvent []byte) { + + // Check if event is nil if event == nil { log.Println("[ERROR] Event is nil") - return false + return false, nil } + + // Assert that event["eventType"] is a string eventType, ok := event["eventType"].(string) if !ok { log.Println("[ERROR] eventType is not a string") - return false + return false, nil } + + // Initialize an empty LogEvent logEvent := &common.LogEvent{} + + // Marshal the event to a string eventStr, _ := json.Marshal(event) - json.Unmarshal(eventStr, logEvent) + + // Unmarshal the string back to a logEvent + err := json.Unmarshal(eventStr, logEvent) + if err != nil { + + return false, nil + } + + // Get the new event object newEventObj := EventObject(logEvent.NewObject, true) + + // Get the resource details from the new event object resourceKind := newEventObj.Kind resourceName := newEventObj.KubernetesMetadata.Name resourceNamespace := newEventObj.KubernetesMetadata.Namespace newResourceVersion := newEventObj.ResourceVersion + + var msg string + // If event is a modification event, get the old event object and parse the event message accordingly if eventType == "MODIFIED" { oldEventObj := EventObject(logEvent.OldObject, false) oldResourceName := oldEventObj.KubernetesMetadata.Name oldResourceNamespace := oldEventObj.KubernetesMetadata.Namespace oldResourceVersion := oldEventObj.KubernetesMetadata.ResourceVersion msg = common.ParseEventMessage(eventType, oldResourceName, resourceKind, oldResourceNamespace, newResourceVersion, oldResourceVersion) - } else { + // If event is not a modification event, parse the event message with only the new event object msg = common.ParseEventMessage(eventType, resourceName, resourceKind, resourceNamespace, newResourceVersion) } + + // Get the related cluster services for the resource event["relatedClusterServices"] = GetClusterRelatedResources(resourceKind, resourceName, resourceNamespace) + event["message"] = msg - marshaledEvent, err := json.Marshal(event) + // Marshal the event to a string + marshaledEvent, err = json.Marshal(event) if err != nil { log.Printf("[ERROR] Failed to marshel resource event logs.\nERROR:\n%v", err) } - common.SendLog(msg, marshaledEvent) + + // Mark the goroutine as done defer wg.Done() - isStructured = true - return isStructured + // Return true indicating the log is structured + isStructured = true + return isStructured, marshaledEvent } diff --git a/resources/resourceInformer_test.go b/resources/resourceInformer_test.go index b6b2420..d593f5d 100644 --- a/resources/resourceInformer_test.go +++ b/resources/resourceInformer_test.go @@ -1,7 +1,8 @@ package resources import ( - "github.com/stretchr/testify/mock" + "encoding/json" + "fmt" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -9,15 +10,15 @@ import ( fakeDynamic "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/tools/cache" "log" + "sigs.k8s.io/yaml" "testing" ) -func createFakeResourceInformer(gvr schema.GroupVersionResource) cache.SharedIndexInformer { - fakeDynamicClient := fakeDynamic.NewSimpleDynamicClient(runtime.NewScheme()) +func createFakeResourceInformer(gvr schema.GroupVersionResource, fakeDynamicClient *fakeDynamic.FakeDynamicClient) (fakeResourceInformer cache.SharedIndexInformer) { factory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(fakeDynamicClient, 0, corev1.NamespaceAll, nil) - fakeResourceInformer := factory.ForResource(gvr).Informer() + fakeResourceInformer = factory.ForResource(gvr).Informer() if fakeResourceInformer == nil { - log.Printf("[ERROR] Resource Informer was not created") + log.Fatalf("[ERROR] Resource Informer was not created") // program will exit if this happens } else { log.Printf("Resource Informer created successfully") } @@ -25,60 +26,70 @@ func createFakeResourceInformer(gvr schema.GroupVersionResource) cache.SharedInd } func TestCreateResourceInformer(t *testing.T) { + fakeDynamicClient := fakeDynamic.NewSimpleDynamicClient(runtime.NewScheme()) resourceGVR := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"} - informer := createFakeResourceInformer(resourceGVR) + informer := createFakeResourceInformer(resourceGVR, fakeDynamicClient) if informer == nil { - t.Errorf("Failed to create resource informer") + t.Fatalf("Failed to create resource informer") // test will fail if this happens } } -// Define an interface that includes the function you want to mock -type InformerCreator interface { - createFakeResourceInformer(gvr schema.GroupVersionResource, dynamicClient *fakeDynamic.FakeDynamicClient) cache.SharedIndexInformer -} +func TestEventObject(t *testing.T) { + testDeployment := GetTestDeployment() + // Marshal the struct to JSON + jsonData, err := yaml.Marshal(testDeployment) + if err != nil { + fmt.Printf("error: %s", err) + return + } + + // Unmarshal the JSON to a map + var deploymentMap map[string]interface{} + err = yaml.Unmarshal(jsonData, &deploymentMap) + if err != nil { + fmt.Printf("error: %s", err) + return + } + deploymentMap["eventType"] = "ADDED" + deploymentMap["kind"] = "Deployment" + deploymentMap["newObject"] = &deploymentMap + eventObject := EventObject(deploymentMap, true) + + if eventObject.Kind != "Deployment" { + t.Errorf("Failed to create event object, expected kind Deployment, got %s", eventObject.Kind) + } + + if eventObject.KubernetesMetadata.Name != "test-deployment" { + t.Errorf("Failed to create event object, expected name test-deployment, got %s", eventObject.KubernetesMetadata.Name) + } -// Have your mock type implement the interface -type MockInformerCreator struct { - mock.Mock + if eventObject.KubernetesMetadata.Namespace != "default" { + t.Errorf("Failed to create event object, expected namespace default, got %s", eventObject.KubernetesMetadata.Namespace) + } } -// Replace createResourceInformer with an instance of the interface +func TestStructResourceLog(t *testing.T) { + var deploymentMap map[string]interface{} + testDeployment := GetTestDeployment() + jsonDeployment, err := json.Marshal(testDeployment) + if err != nil { + t.Errorf("Failed to marshal test deployment.\nError:\n %v", err) + } -func TestAddEventHandlers(t *testing.T) { - // Create a new mock informer creator - fakeDynamicClient := fakeDynamic.NewSimpleDynamicClient(runtime.NewScheme()) - mockInformerCreator := new(MockInformerCreator) - mockInformer := createFakeResourceInformer(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "deployments"}) - // Define what should be returned when the mock is called - mockInformerCreator.On("CreateFakeResourceInformer", mock.Anything, mock.Anything).Return(mockInformer) + err = json.Unmarshal(jsonDeployment, &deploymentMap) + if err != nil { + t.Errorf("Failed to unmarshal test deployment.\nError:\n %v", err) + } + deploymentEventMap := map[string]interface{}{ - // Run the function that you're testing - AddEventHandlers() + "eventType": "ADDED", + "kind": "Deployment", + "newObject": deploymentMap, + } + isStructured, _ := StructResourceLog(deploymentEventMap) - // Check that the mock was called with the expected parameters - mockInformerCreator.AssertCalled(t, "CreateFakeResourceInformer", schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}, fakeDynamicClient) - mockInformerCreator.AssertCalled(t, "CreateFakeResourceInformer", schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, fakeDynamicClient) - // ... add more assertions here ... -} -func (m *MockInformerCreator) CreateFakeResourceInformer(gvr schema.GroupVersionResource, dynamicClient *fakeDynamic.FakeDynamicClient) cache.SharedIndexInformer { - args := m.Called(gvr, dynamicClient) - return args.Get(0).(cache.SharedIndexInformer) + if !isStructured { + t.Errorf("Failed to structure resource log") + } } - -//func TestAddInformerEventHandler(t *testing.T) { -// resourceGVR := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"} -// mockDynamicClient := fakeDynamic.NewSimpleDynamicClient(runtime.NewScheme()) -// -// informer := CreateFakeResourceInformer(resourceGVR, mockDynamicClient) -// -// if informer == nil { -// t.Errorf("Failed to create resource informer") -// } -// -// synced := AddInformerEventHandler(informer) -// -// if !synced { -// t.Errorf("Failed to add event handler for informer") -// } -//} diff --git a/resources/resourceServices.go b/resources/resourceServices.go index 5c3a136..f44ca0f 100644 --- a/resources/resourceServices.go +++ b/resources/resourceServices.go @@ -6,8 +6,6 @@ import ( "reflect" ) -// Similarly, define methods for other workload types... - func GetSecretRelatedWorkloads(secretName string, workloads []Workload) (relatedWorkloads []string) { // Create a map of workload names to workloads. workloadsMap := map[string]Workload{} @@ -149,7 +147,7 @@ func ServiceAccountRelatedWorkloads(serviceAccountName string) (relatedWorkloads } // ClusterRoleBinding Kind - +// ClusterRoleBindingRelatedWorkloads gets all the workloads in the cluster related to a specific ClusterRoleBinding. func ClusterRoleBindingRelatedWorkloads(clusterRoleBindingName string) (relatedWorkloads common.RelatedClusterServices) { // clusterRoleBinding := GetClusterRoleBinding(clusterRoleBindingName) @@ -168,8 +166,8 @@ func ClusterRoleBindingRelatedWorkloads(clusterRoleBindingName string) (relatedW return relatedWorkloads } -// ClusterRole Kind - +// ClusterRoleRelatedWorkloads ClusterRole Kind +// ClusterRoleRelatedWorkloads gets all the workloads in the cluster related to a specific ClusterRole. func ClusterRoleRelatedWorkloads(clusterRoleName string) (relatedWorkloads common.RelatedClusterServices) { clusterRoleBindings := GetClusterRoleBindings()