diff --git a/.golangci.yml b/.golangci.yml index 321c2871..f0faae0a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,13 +1,15 @@ run: - timeout: 10m + timeout: 5m allow-parallel-runners: true issues: # don't skip warning about doc comments # don't exclude the default set of lint exclude-use-default: false + # restore some of the defaults + # (fill in the rest as needed) exclude-dirs: - - test + - tests exclude-files: - ".*_test\\.go" @@ -16,6 +18,7 @@ linters: enable: - dupl - errcheck + - copyloopvar - ginkgolinter - goconst - gocyclo @@ -33,3 +36,8 @@ linters: - unconvert - unparam - unused + +linters-settings: + revive: + rules: + - name: comment-spacings \ No newline at end of file diff --git a/api/v1alpha1/validationresult_types.go b/api/v1alpha1/validationresult_types.go index 44683b6b..12e562b7 100644 --- a/api/v1alpha1/validationresult_types.go +++ b/api/v1alpha1/validationresult_types.go @@ -118,11 +118,11 @@ func DefaultValidationCondition() ValidationCondition { } } -//+kubebuilder:object:root=true -//+kubebuilder:subresource:status -//+kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age" -//+kubebuilder:printcolumn:name="Plugin",type="string",JSONPath=".spec.plugin",description="Plugin" -//+kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.state",description="State" +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age" +// +kubebuilder:printcolumn:name="Plugin",type="string",JSONPath=".spec.plugin",description="Plugin" +// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.state",description="State" // ValidationResult is the Schema for the validationresults API. type ValidationResult struct { @@ -151,7 +151,7 @@ func (r *ValidationResult) Hash() string { return base64.StdEncoding.EncodeToString(hash) } -//+kubebuilder:object:root=true +// +kubebuilder:object:root=true // ValidationResultList contains a list of ValidationResult. type ValidationResultList struct { diff --git a/api/v1alpha1/validatorconfig_types.go b/api/v1alpha1/validatorconfig_types.go index 9eb6277d..66befaed 100644 --- a/api/v1alpha1/validatorconfig_types.go +++ b/api/v1alpha1/validatorconfig_types.go @@ -121,8 +121,8 @@ type ConditionType string // HelmChartDeployedCondition defines whether the helm chart was installed/pulled/upgraded correctly. const HelmChartDeployedCondition ConditionType = "HelmChartDeployed" -//+kubebuilder:object:root=true -//+kubebuilder:subresource:status +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status // ValidatorConfig is the Schema for the validatorconfigs API. type ValidatorConfig struct { @@ -133,7 +133,7 @@ type ValidatorConfig struct { Status ValidatorConfigStatus `json:"status,omitempty"` } -//+kubebuilder:object:root=true +// +kubebuilder:object:root=true // ValidatorConfigList contains a list of ValidatorConfig. type ValidatorConfigList struct { diff --git a/build b/build index d0ef938e..c8473518 160000 --- a/build +++ b/build @@ -1 +1 @@ -Subproject commit d0ef938e9dc11f1a938d1d45646fd56e1d891086 +Subproject commit c84735189816c1bafc7569af23dca9002993f6ed diff --git a/chart/validator/README.md b/chart/validator/README.md index cbbde27f..6a0a12c5 100644 --- a/chart/validator/README.md +++ b/chart/validator/README.md @@ -11,16 +11,7 @@ The following table lists the configurable parameters of the Validator chart and | Parameter | Description | Default | | ------------------------ | ----------------------- | -------------- | -| `controllerManager.kubeRbacProxy.args` | | `["--secure-listen-address=0.0.0.0:8443", "--upstream=http://127.0.0.1:8080/", "--logtostderr=true", "--v=0"]` | -| `controllerManager.kubeRbacProxy.containerSecurityContext.allowPrivilegeEscalation` | | `false` | -| `controllerManager.kubeRbacProxy.containerSecurityContext.capabilities.drop` | | `["ALL"]` | -| `controllerManager.kubeRbacProxy.image.repository` | | `"gcr.io/kubebuilder/kube-rbac-proxy"` | -| `controllerManager.kubeRbacProxy.image.tag` | | `"v0.16.0"` | -| `controllerManager.kubeRbacProxy.resources.limits.cpu` | | `"500m"` | -| `controllerManager.kubeRbacProxy.resources.limits.memory` | | `"128Mi"` | -| `controllerManager.kubeRbacProxy.resources.requests.cpu` | | `"5m"` | -| `controllerManager.kubeRbacProxy.resources.requests.memory` | | `"64Mi"` | -| `controllerManager.manager.args` | | `["--health-probe-bind-address=:8081", "--leader-elect"]` | +| `controllerManager.manager.args` | | `["--health-probe-bind-address=:8081", "--metrics-bind-address=:8443", "--leader-elect"]` | | `controllerManager.manager.containerSecurityContext.allowPrivilegeEscalation` | | `false` | | `controllerManager.manager.containerSecurityContext.capabilities.drop` | | `["ALL"]` | | `controllerManager.manager.image.repository` | | `"quay.io/validator-labs/validator"` | @@ -54,7 +45,7 @@ The following table lists the configurable parameters of the Validator chart and | `pluginSecrets.oci.pubKeys` | Don't forget to delete these square brackets if you're specifying public keys here! | `[]` | | `pluginSecrets.vSphere` | Don't forget to delete these curly braces if you're specifying credentials here! | `{}` | | `helmConfig.registry` | | `"https://validator-labs.github.io"` | -| `plugins` | | `[{"chart": {"name": "validator-plugin-azure", "repository": "validator-plugin-azure", "version": "v0.0.24"}, "values": "controllerManager:\n kubeRbacProxy:\n args:\n - --secure-listen-address=0.0.0.0:8443\n - --upstream=http://127.0.0.1:8080/\n - --logtostderr=true\n - --v=0\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: gcr.io/kubebuilder/kube-rbac-proxy\n tag: v0.16.0\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 5m\n memory: 64Mi\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-azure\n tag: v0.0.24\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n # Optionally specify a volumeMount to mount a volume containing a private key\n # to leverage Azure Service principal with certificate authentication.\n volumeMounts: []\n replicas: 1\n serviceAccount:\n annotations: {}\n # Optionally specify a volume containing a private key to leverage Azure Service\n # principal with certificate authentication.\n volumes: []\n # Optionally specify additional labels to use for the controller-manager Pods.\n podLabels: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP\nauth:\n # Override the service account used by Azure validator (optional, could be used for WorkloadIdentityCredentials on AKS)\n # WARNING: the chosen service account must include all RBAC privileges found in templates/manager-rbac.yaml\n serviceAccountName: \"\"\n# Optionally specify the Azure environment to use. Defaults to \"AzureCloud\" for public Azure cloud.\n# Other acceptable values are \"AzureUSGovernment\" and \"AzureChinaCloud\".\nazureEnvironment: \"AzureCloud\""}, {"chart": {"name": "validator-plugin-oci", "repository": "validator-plugin-oci", "version": "v0.3.3"}, "values": "controllerManager:\n kubeRbacProxy:\n args:\n - --secure-listen-address=0.0.0.0:8443\n - --upstream=http://127.0.0.1:8080/\n - --logtostderr=true\n - --v=0\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: gcr.io/kubebuilder/kube-rbac-proxy\n tag: v0.16.0\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 5m\n memory: 64Mi\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-oci\n tag: v0.3.3\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n replicas: 1\n serviceAccount:\n annotations: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP"}, {"chart": {"name": "validator-plugin-kubescape", "repository": "validator-plugin-kubescape", "version": "v0.0.4"}, "values": "controllerManager:\n kubeRbacProxy:\n args:\n - --secure-listen-address=0.0.0.0:8443\n - --upstream=http://127.0.0.1:8080/\n - --logtostderr=true\n - --v=0\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: gcr.io/kubebuilder/kube-rbac-proxy\n tag: v0.16.0\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 5m\n memory: 64Mi\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-kubescape\n tag: v0.0.4\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n # Optionally specify a volumeMount to mount a volume containing a private key\n # to leverage Azure Service principal with certificate authentication.\n volumeMounts: []\n replicas: 1\n serviceAccount:\n annotations: {}\n # Optionally specify a volume containing a private key to leverage Azure Service\n # principal with certificate authentication.\n volumes: []\n # Optionally specify additional labels to use for the controller-manager Pods.\n podLabels: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP"}, {"chart": {"name": "validator-plugin-aws", "repository": "validator-plugin-aws", "version": "v0.1.10"}, "values": "controllerManager:\n kubeRbacProxy:\n args:\n - --secure-listen-address=0.0.0.0:8443\n - --upstream=http://127.0.0.1:8080/\n - --logtostderr=true\n - --v=0\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: gcr.io/kubebuilder/kube-rbac-proxy\n tag: v0.16.0\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 5m\n memory: 64Mi\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-aws\n tag: v0.1.10\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n replicas: 1\n serviceAccount:\n annotations: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP\nauth:\n # Override the service account used by AWS validator (optional, could be used for IAM roles for Service Accounts)\n # WARNING: the chosen service account must have the same RBAC privileges as seen in templates/manager-rbac.yaml\n serviceAccountName: \"\""}, {"chart": {"name": "validator-plugin-network", "repository": "validator-plugin-network", "version": "v0.1.0"}, "values": "controllerManager:\n kubeRbacProxy:\n args:\n - --secure-listen-address=0.0.0.0:8443\n - --upstream=http://127.0.0.1:8080/\n - --logtostderr=true\n - --v=0\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: gcr.io/kubebuilder/kube-rbac-proxy\n tag: v0.16.0\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 5m\n memory: 64Mi\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: true\n capabilities:\n add:\n - NET_RAW\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-network\n tag: v0.1.0\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n replicas: 1\n serviceAccount:\n annotations: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP"}, {"chart": {"name": "validator-plugin-maas", "repository": "validator-plugin-maas", "version": "v0.0.12"}, "values": "controllerManager:\n kubeRbacProxy:\n args:\n - --secure-listen-address=0.0.0.0:8443\n - --upstream=http://127.0.0.1:8080/\n - --logtostderr=true\n - --v=0\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: gcr.io/kubebuilder/kube-rbac-proxy\n tag: v0.16.0\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 5m\n memory: 64Mi\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-maas\n tag: v0.0.12\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n replicas: 1\n serviceAccount:\n annotations: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP"}, {"chart": {"name": "validator-plugin-vsphere", "repository": "validator-plugin-vsphere", "version": "v0.1.4"}, "values": "controllerManager:\n kubeRbacProxy:\n args:\n - --secure-listen-address=0.0.0.0:8443\n - --upstream=http://127.0.0.1:8080/\n - --logtostderr=true\n - --v=0\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: gcr.io/kubebuilder/kube-rbac-proxy\n tag: v0.16.0\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 5m\n memory: 64Mi\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --metrics-bind-address=127.0.0.1:8080\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-vsphere\n tag: v0.1.4\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n replicas: 1\n serviceAccount:\n annotations: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP"}]` | +| `plugins` | | `[{"chart": {"name": "validator-plugin-azure", "repository": "validator-plugin-azure", "version": "v0.0.25"}, "values": "controllerManager:\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --metrics-bind-address=:8443\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-azure\n tag: v0.0.25\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n # Optionally specify a volumeMount to mount a volume containing a private key\n # to leverage Azure Service principal with certificate authentication.\n volumeMounts: []\n replicas: 1\n serviceAccount:\n annotations: {}\n # Optionally specify a volume containing a private key to leverage Azure Service\n # principal with certificate authentication.\n volumes: []\n # Optionally specify additional labels to use for the controller-manager Pods.\n podLabels: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP\nauth:\n # Override the service account used by Azure validator (optional, could be used for WorkloadIdentityCredentials on AKS)\n # WARNING: the chosen service account must include all RBAC privileges found in templates/manager-rbac.yaml\n serviceAccountName: \"\"\n# Optionally specify the Azure environment to use. Defaults to \"AzureCloud\" for public Azure cloud.\n# Other acceptable values are \"AzureUSGovernment\" and \"AzureChinaCloud\".\nazureEnvironment: \"AzureCloud\""}, {"chart": {"name": "validator-plugin-oci", "repository": "validator-plugin-oci", "version": "v0.3.4"}, "values": "controllerManager:\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --metrics-bind-address=:8443\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-oci\n tag: v0.3.4\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n replicas: 1\n serviceAccount:\n annotations: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP"}, {"chart": {"name": "validator-plugin-kubescape", "repository": "validator-plugin-kubescape", "version": "v0.0.5"}, "values": "controllerManager:\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --metrics-bind-address=:8443\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-kubescape\n tag: v0.0.5\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n # Optionally specify a volumeMount to mount a volume containing a private key\n # to leverage Azure Service principal with certificate authentication.\n volumeMounts: []\n replicas: 1\n serviceAccount:\n annotations: {}\n # Optionally specify a volume containing a private key to leverage Azure Service\n # principal with certificate authentication.\n volumes: []\n # Optionally specify additional labels to use for the controller-manager Pods.\n podLabels: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP"}, {"chart": {"name": "validator-plugin-aws", "repository": "validator-plugin-aws", "version": "v0.1.11"}, "values": "controllerManager:\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --metrics-bind-address=:8443\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-aws\n tag: v0.1.11\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n replicas: 1\n serviceAccount:\n annotations: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP\nauth:\n # Override the service account used by AWS validator (optional, could be used for IAM roles for Service Accounts)\n # WARNING: the chosen service account must have the same RBAC privileges as seen in templates/manager-rbac.yaml\n serviceAccountName: \"\""}, {"chart": {"name": "validator-plugin-network", "repository": "validator-plugin-network", "version": "v0.1.1"}, "values": "controllerManager:\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --metrics-bind-address=:8443\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: true\n capabilities:\n add:\n - NET_RAW\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-network\n tag: v0.1.1\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n replicas: 1\n serviceAccount:\n annotations: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP"}, {"chart": {"name": "validator-plugin-maas", "repository": "validator-plugin-maas", "version": "v0.0.13"}, "values": "controllerManager:\n manager:\n args:\n - --metrics-bind-address=:8443\n - --health-probe-bind-address=:8081\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-maas\n tag: v0.0.13\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n replicas: 1\n serviceAccount:\n annotations: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: 8443\n type: ClusterIP"}, {"chart": {"name": "validator-plugin-vsphere", "repository": "validator-plugin-vsphere", "version": "v0.1.5"}, "values": "controllerManager:\n manager:\n args:\n - --health-probe-bind-address=:8081\n - --metrics-bind-address=:8443\n - --leader-elect\n containerSecurityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n image:\n repository: quay.io/validator-labs/validator-plugin-vsphere\n tag: v0.1.5\n resources:\n limits:\n cpu: 500m\n memory: 128Mi\n requests:\n cpu: 10m\n memory: 64Mi\n replicas: 1\n serviceAccount:\n annotations: {}\nkubernetesClusterDomain: cluster.local\nmetricsService:\n ports:\n - name: https\n port: 8443\n protocol: TCP\n targetPort: https\n type: ClusterIP"}]` | diff --git a/chart/validator/templates/deployment.yaml b/chart/validator/templates/deployment.yaml index f62510c3..13b5e5bd 100644 --- a/chart/validator/templates/deployment.yaml +++ b/chart/validator/templates/deployment.yaml @@ -39,18 +39,6 @@ spec: runAsNonRoot: false {{- end }} containers: - - args: {{- toYaml .Values.controllerManager.kubeRbacProxy.args | nindent 8 }} - env: - - name: KUBERNETES_CLUSTER_DOMAIN - value: {{ quote .Values.kubernetesClusterDomain }} - image: {{ .Values.controllerManager.kubeRbacProxy.image.repository }}:{{ .Values.controllerManager.kubeRbacProxy.image.tag | default .Chart.AppVersion }} - name: kube-rbac-proxy - ports: - - containerPort: 8443 - name: https - protocol: TCP - resources: {{- toYaml .Values.controllerManager.kubeRbacProxy.resources | nindent 10 }} - securityContext: {{- toYaml .Values.controllerManager.kubeRbacProxy.containerSecurityContext | nindent 10 }} - args: {{- toYaml .Values.controllerManager.manager.args | nindent 8 }} command: - /manager diff --git a/chart/validator/templates/proxy-rbac.yaml b/chart/validator/templates/metrics-auth-rbac.yaml similarity index 75% rename from chart/validator/templates/proxy-rbac.yaml rename to chart/validator/templates/metrics-auth-rbac.yaml index 2765387d..ec603c44 100644 --- a/chart/validator/templates/proxy-rbac.yaml +++ b/chart/validator/templates/metrics-auth-rbac.yaml @@ -1,9 +1,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: {{ include "chart.fullname" . }}-proxy-role + name: {{ include "chart.fullname" . }}-metrics-auth-role labels: - app.kubernetes.io/component: kube-rbac-proxy app.kubernetes.io/created-by: validator app.kubernetes.io/part-of: validator {{- include "chart.labels" . | nindent 4 }} @@ -24,16 +23,15 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: {{ include "chart.fullname" . }}-proxy-rolebinding + name: {{ include "chart.fullname" . }}-metrics-auth-rolebinding labels: - app.kubernetes.io/component: kube-rbac-proxy app.kubernetes.io/created-by: validator app.kubernetes.io/part-of: validator {{- include "chart.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: '{{ include "chart.fullname" . }}-proxy-role' + name: '{{ include "chart.fullname" . }}-metrics-auth-role' subjects: - kind: ServiceAccount name: '{{ include "chart.fullname" . }}-controller-manager' diff --git a/chart/validator/templates/metrics-reader-rbac.yaml b/chart/validator/templates/metrics-reader-rbac.yaml index a99d7eb7..c1def927 100644 --- a/chart/validator/templates/metrics-reader-rbac.yaml +++ b/chart/validator/templates/metrics-reader-rbac.yaml @@ -3,7 +3,6 @@ kind: ClusterRole metadata: name: {{ include "chart.fullname" . }}-metrics-reader labels: - app.kubernetes.io/component: kube-rbac-proxy app.kubernetes.io/created-by: validator app.kubernetes.io/part-of: validator {{- include "chart.labels" . | nindent 4 }} diff --git a/chart/validator/templates/metrics-service.yaml b/chart/validator/templates/metrics-service.yaml index ce0618db..efc5bea5 100644 --- a/chart/validator/templates/metrics-service.yaml +++ b/chart/validator/templates/metrics-service.yaml @@ -3,7 +3,6 @@ kind: Service metadata: name: {{ include "chart.fullname" . }}-controller-manager-metrics-service labels: - app.kubernetes.io/component: kube-rbac-proxy app.kubernetes.io/created-by: validator app.kubernetes.io/part-of: validator control-plane: controller-manager diff --git a/chart/validator/values.yaml b/chart/validator/values.yaml index 544024c1..90448bd7 100644 --- a/chart/validator/values.yaml +++ b/chart/validator/values.yaml @@ -1,28 +1,8 @@ controllerManager: - kubeRbacProxy: - args: - - --secure-listen-address=0.0.0.0:8443 - - --upstream=http://127.0.0.1:8080/ - - --logtostderr=true - - --v=0 - containerSecurityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - image: - repository: gcr.io/kubebuilder/kube-rbac-proxy - tag: v0.16.0 - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - memory: 64Mi manager: args: - --health-probe-bind-address=:8081 + - --metrics-bind-address=:8443 - --leader-elect containerSecurityContext: allowPrivilegeEscalation: false @@ -216,33 +196,13 @@ plugins: - chart: name: validator-plugin-azure repository: validator-plugin-azure - version: v0.0.24 + version: v0.0.25 values: |- controllerManager: - kubeRbacProxy: - args: - - --secure-listen-address=0.0.0.0:8443 - - --upstream=http://127.0.0.1:8080/ - - --logtostderr=true - - --v=0 - containerSecurityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - image: - repository: gcr.io/kubebuilder/kube-rbac-proxy - tag: v0.16.0 - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - memory: 64Mi manager: args: - --health-probe-bind-address=:8081 + - --metrics-bind-address=:8443 - --leader-elect containerSecurityContext: allowPrivilegeEscalation: false @@ -251,7 +211,7 @@ plugins: - ALL image: repository: quay.io/validator-labs/validator-plugin-azure - tag: v0.0.24 + tag: v0.0.25 resources: limits: cpu: 500m @@ -288,33 +248,13 @@ plugins: - chart: name: validator-plugin-oci repository: validator-plugin-oci - version: v0.3.3 + version: v0.3.4 values: |- controllerManager: - kubeRbacProxy: - args: - - --secure-listen-address=0.0.0.0:8443 - - --upstream=http://127.0.0.1:8080/ - - --logtostderr=true - - --v=0 - containerSecurityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - image: - repository: gcr.io/kubebuilder/kube-rbac-proxy - tag: v0.16.0 - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - memory: 64Mi manager: args: - --health-probe-bind-address=:8081 + - --metrics-bind-address=:8443 - --leader-elect containerSecurityContext: allowPrivilegeEscalation: false @@ -323,7 +263,7 @@ plugins: - ALL image: repository: quay.io/validator-labs/validator-plugin-oci - tag: v0.3.3 + tag: v0.3.4 resources: limits: cpu: 500m @@ -345,33 +285,13 @@ plugins: - chart: name: validator-plugin-kubescape repository: validator-plugin-kubescape - version: v0.0.4 + version: v0.0.5 values: |- controllerManager: - kubeRbacProxy: - args: - - --secure-listen-address=0.0.0.0:8443 - - --upstream=http://127.0.0.1:8080/ - - --logtostderr=true - - --v=0 - containerSecurityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - image: - repository: gcr.io/kubebuilder/kube-rbac-proxy - tag: v0.16.0 - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - memory: 64Mi manager: args: - --health-probe-bind-address=:8081 + - --metrics-bind-address=:8443 - --leader-elect containerSecurityContext: allowPrivilegeEscalation: false @@ -380,7 +300,7 @@ plugins: - ALL image: repository: quay.io/validator-labs/validator-plugin-kubescape - tag: v0.0.4 + tag: v0.0.5 resources: limits: cpu: 500m @@ -410,33 +330,13 @@ plugins: - chart: name: validator-plugin-aws repository: validator-plugin-aws - version: v0.1.10 + version: v0.1.11 values: |- controllerManager: - kubeRbacProxy: - args: - - --secure-listen-address=0.0.0.0:8443 - - --upstream=http://127.0.0.1:8080/ - - --logtostderr=true - - --v=0 - containerSecurityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - image: - repository: gcr.io/kubebuilder/kube-rbac-proxy - tag: v0.16.0 - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - memory: 64Mi manager: args: - --health-probe-bind-address=:8081 + - --metrics-bind-address=:8443 - --leader-elect containerSecurityContext: allowPrivilegeEscalation: false @@ -445,7 +345,7 @@ plugins: - ALL image: repository: quay.io/validator-labs/validator-plugin-aws - tag: v0.1.10 + tag: v0.1.11 resources: limits: cpu: 500m @@ -471,33 +371,13 @@ plugins: - chart: name: validator-plugin-network repository: validator-plugin-network - version: v0.1.0 + version: v0.1.1 values: |- controllerManager: - kubeRbacProxy: - args: - - --secure-listen-address=0.0.0.0:8443 - - --upstream=http://127.0.0.1:8080/ - - --logtostderr=true - - --v=0 - containerSecurityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - image: - repository: gcr.io/kubebuilder/kube-rbac-proxy - tag: v0.16.0 - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - memory: 64Mi manager: args: - --health-probe-bind-address=:8081 + - --metrics-bind-address=:8443 - --leader-elect containerSecurityContext: allowPrivilegeEscalation: true @@ -508,7 +388,7 @@ plugins: - ALL image: repository: quay.io/validator-labs/validator-plugin-network - tag: v0.1.0 + tag: v0.1.1 resources: limits: cpu: 500m @@ -530,32 +410,12 @@ plugins: - chart: name: validator-plugin-maas repository: validator-plugin-maas - version: v0.0.12 + version: v0.0.13 values: |- controllerManager: - kubeRbacProxy: - args: - - --secure-listen-address=0.0.0.0:8443 - - --upstream=http://127.0.0.1:8080/ - - --logtostderr=true - - --v=0 - containerSecurityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - image: - repository: gcr.io/kubebuilder/kube-rbac-proxy - tag: v0.16.0 - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - memory: 64Mi manager: args: + - --metrics-bind-address=:8443 - --health-probe-bind-address=:8081 - --leader-elect containerSecurityContext: @@ -565,7 +425,7 @@ plugins: - ALL image: repository: quay.io/validator-labs/validator-plugin-maas - tag: v0.0.12 + tag: v0.0.13 resources: limits: cpu: 500m @@ -582,39 +442,18 @@ plugins: - name: https port: 8443 protocol: TCP - targetPort: https + targetPort: 8443 type: ClusterIP - chart: name: validator-plugin-vsphere repository: validator-plugin-vsphere - version: v0.1.4 + version: v0.1.5 values: |- controllerManager: - kubeRbacProxy: - args: - - --secure-listen-address=0.0.0.0:8443 - - --upstream=http://127.0.0.1:8080/ - - --logtostderr=true - - --v=0 - containerSecurityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - image: - repository: gcr.io/kubebuilder/kube-rbac-proxy - tag: v0.16.0 - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - memory: 64Mi manager: args: - --health-probe-bind-address=:8081 - - --metrics-bind-address=127.0.0.1:8080 + - --metrics-bind-address=:8443 - --leader-elect containerSecurityContext: allowPrivilegeEscalation: false @@ -623,7 +462,7 @@ plugins: - ALL image: repository: quay.io/validator-labs/validator-plugin-vsphere - tag: v0.1.4 + tag: v0.1.5 resources: limits: cpu: 500m diff --git a/cmd/main.go b/cmd/main.go index c8354595..1ffcefb6 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -1,5 +1,5 @@ /* -Copyright 2023. +Copyright 2024. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,24 +14,27 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package main initializes the ValidationConfig and ValidationResult controllers. package main import ( + "crypto/tls" "flag" "os" "time" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. + _ "k8s.io/client-go/plugin/pkg/client/auth" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - _ "k8s.io/client-go/plugin/pkg/client/auth" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/metrics/filters" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/webhook" validationv1alpha1 "github.com/validator-labs/validator/api/v1alpha1" "github.com/validator-labs/validator/internal/controller" @@ -39,7 +42,7 @@ import ( "github.com/validator-labs/validator/pkg/helm/release" "github.com/validator-labs/validator/pkg/kube" "github.com/validator-labs/validator/pkg/sinks" - //+kubebuilder:scaffold:imports + // +kubebuilder:scaffold:imports ) var ( @@ -51,15 +54,26 @@ func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(validationv1alpha1.AddToScheme(scheme)) - //+kubebuilder:scaffold:scheme + // +kubebuilder:scaffold:scheme } func main() { + var metricsAddr string var enableLeaderElection bool var probeAddr string + var secureMetrics bool + var enableHTTP2 bool + var tlsOpts []func(*tls.Config) + flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+ + "Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.") flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") - flag.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ - "Enabling this will ensure there is only one active controller manager.") + flag.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + flag.BoolVar(&secureMetrics, "metrics-secure", true, + "If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.") + flag.BoolVar(&enableHTTP2, "enable-http2", false, + "If set, HTTP/2 will be enabled for the metrics and webhook servers") opts := zap.Options{ Development: true, } @@ -70,11 +84,61 @@ func main() { ns := os.Getenv("NAMESPACE") + // if the enable-http2 flag is false (the default), http/2 should be disabled + // due to its vulnerabilities. More specifically, disabling http/2 will + // prevent from being vulnerable to the HTTP/2 Stream Cancellation and + // Rapid Reset CVEs. For more information see: + // - https://github.com/advisories/GHSA-qppj-fm5r-hxr3 + // - https://github.com/advisories/GHSA-4374-p667-p6c8 + disableHTTP2 := func(c *tls.Config) { + setupLog.Info("disabling http/2") + c.NextProtos = []string{"http/1.1"} + } + + if !enableHTTP2 { + tlsOpts = append(tlsOpts, disableHTTP2) + } + + webhookServer := webhook.NewServer(webhook.Options{ + TLSOpts: tlsOpts, + }) + + // Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server. + // More info: + // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/metrics/server + // - https://book.kubebuilder.io/reference/metrics.html + metricsServerOptions := metricsserver.Options{ + BindAddress: metricsAddr, + SecureServing: secureMetrics, + TLSOpts: tlsOpts, + } + + if secureMetrics { + // FilterProvider is used to protect the metrics endpoint with authn/authz. + // These configurations ensure that only authorized users and service accounts + // can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info: + // https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/metrics/filters#WithAuthenticationAndAuthorization + metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization + + // TODO(user): If CertDir, CertName, and KeyName are not specified, controller-runtime will automatically + // generate self-signed certificates for the metrics server. While convenient for development and testing, + // this setup is not recommended for production. + + // TODO(user): If cert-manager is enabled in config/default/kustomization.yaml, + // you can uncomment the following lines to use the certificate managed by cert-manager. + + // metricsServerOptions.CertDir = "/tmp/k8s-metrics-server/metrics-certs" + // metricsServerOptions.CertName = "tls.crt" + // metricsServerOptions.KeyName = "tls.key" + } + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ Scheme: scheme, + Metrics: metricsServerOptions, + WebhookServer: webhookServer, HealthProbeBindAddress: probeAddr, LeaderElection: enableLeaderElection, - LeaderElectionID: "0d82a3fe.spectrocloud.labs", + LeaderElectionID: "486a6c36.spectrocloud.labs", // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily // when the Manager ends. This requires the binary to immediately end when the // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly @@ -86,13 +150,6 @@ func main() { // if you are doing or is intended to do any operation such as perform cleanups // after the manager stops then its usage might be unsafe. // LeaderElectionReleaseOnCancel: true, - - // NewCache: func(config *rest.Config, opts cache.Options) (cache.Cache, error) { - // opts.DefaultNamespaces = map[string]cache.Config{ - // ns: {}, - // } - // return cache.New(config, opts) - // }, }) if err != nil { setupLog.Error(err, "unable to start manager") @@ -137,8 +194,7 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "ValidatorConfig") os.Exit(1) } - - //+kubebuilder:scaffold:builder + // +kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { setupLog.Error(err, "unable to set up health check") diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 7379bc69..a1ab1bda 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -1,5 +1,5 @@ # Adds namespace to all resources. -namespace: validator +namespace: validator-system # Value of this field is prepended to the # names of all resources, e.g. a deployment named @@ -20,125 +20,158 @@ resources: - ../manager # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in # crd/kustomization.yaml -# - ../webhook +#- ../webhook # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. -# - ../certmanager +#- ../certmanager # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. #- ../prometheus +# [METRICS] Expose the controller manager metrics service. +- metrics_service.yaml +# [NETWORK POLICY] Protect the /metrics endpoint and Webhook Server with NetworkPolicy. +# Only Pod(s) running a namespace labeled with 'metrics: enabled' will be able to gather the metrics. +# Only CR(s) which requires webhooks and are applied on namespaces labeled with 'webhooks: enabled' will +# be able to communicate with the Webhook Server. +#- ../network-policy -patchesStrategicMerge: -# Protect the /metrics endpoint by putting it behind auth. -# If you want your controller-manager to expose the /metrics -# endpoint w/o any authn/z, please comment the following line. -- manager_auth_proxy_patch.yaml - - +# Uncomment the patches line if you enable Metrics, and/or are using webhooks and cert-manager +patches: +# [METRICS] The following patch will enable the metrics endpoint using HTTPS and the port :8443. +# More info: https://book.kubebuilder.io/reference/metrics +- path: manager_metrics_patch.yaml + target: + kind: Deployment # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in # crd/kustomization.yaml -# - manager_webhook_patch.yaml - -# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. -# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. -# 'CERTMANAGER' needs to be enabled to use ca injection -# - webhookcainjection_patch.yaml +#- path: manager_webhook_patch.yaml # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. # Uncomment the following replacements to add the cert-manager CA injection annotations -# replacements: -# - source: # Add cert-manager annotation to ValidatingWebhookConfiguration, MutatingWebhookConfiguration and CRDs -# kind: Certificate -# group: cert-manager.io -# version: v1 -# name: serving-cert # this name should match the one in certificate.yaml -# fieldPath: .metadata.namespace # namespace of the certificate CR -# targets: -# - select: -# kind: ValidatingWebhookConfiguration -# fieldPaths: -# - .metadata.annotations.[cert-manager.io/inject-ca-from] -# options: -# delimiter: '/' -# index: 0 -# create: true -# - select: -# kind: MutatingWebhookConfiguration -# fieldPaths: -# - .metadata.annotations.[cert-manager.io/inject-ca-from] -# options: -# delimiter: '/' -# index: 0 -# create: true -# - select: -# kind: CustomResourceDefinition -# fieldPaths: -# - .metadata.annotations.[cert-manager.io/inject-ca-from] -# options: -# delimiter: '/' -# index: 0 -# create: true -# - source: -# kind: Certificate -# group: cert-manager.io -# version: v1 -# name: serving-cert # this name should match the one in certificate.yaml -# fieldPath: .metadata.name -# targets: -# - select: -# kind: ValidatingWebhookConfiguration -# fieldPaths: -# - .metadata.annotations.[cert-manager.io/inject-ca-from] -# options: -# delimiter: '/' -# index: 1 -# create: true -# - select: -# kind: MutatingWebhookConfiguration -# fieldPaths: -# - .metadata.annotations.[cert-manager.io/inject-ca-from] -# options: -# delimiter: '/' -# index: 1 -# create: true -# - select: -# kind: CustomResourceDefinition -# fieldPaths: -# - .metadata.annotations.[cert-manager.io/inject-ca-from] -# options: -# delimiter: '/' -# index: 1 -# create: true -# - source: # Add cert-manager annotation to the webhook Service -# kind: Service -# version: v1 -# name: webhook-service -# fieldPath: .metadata.name # namespace of the service -# targets: -# - select: -# kind: Certificate -# group: cert-manager.io -# version: v1 -# fieldPaths: -# - .spec.dnsNames.0 -# - .spec.dnsNames.1 -# options: -# delimiter: '.' -# index: 0 -# create: true -# - source: -# kind: Service -# version: v1 -# name: webhook-service -# fieldPath: .metadata.namespace # namespace of the service -# targets: -# - select: -# kind: Certificate -# group: cert-manager.io -# version: v1 -# fieldPaths: -# - .spec.dnsNames.0 -# - .spec.dnsNames.1 -# options: -# delimiter: '.' -# index: 1 -# create: true \ No newline at end of file +#replacements: +# - source: # Uncomment the following block if you have any webhook +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.name # Name of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 0 +# create: true +# - source: +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.namespace # Namespace of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 1 +# create: true +# +# - source: # Uncomment the following block if you have a ValidatingWebhook (--programmatic-validation) +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # This name should match the one in certificate.yaml +# fieldPath: .metadata.namespace # Namespace of the certificate CR +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # This name should match the one in certificate.yaml +# fieldPath: .metadata.name +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# +# - source: # Uncomment the following block if you have a DefaultingWebhook (--defaulting ) +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # This name should match the one in certificate.yaml +# fieldPath: .metadata.namespace # Namespace of the certificate CR +# targets: +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # This name should match the one in certificate.yaml +# fieldPath: .metadata.name +# targets: +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# +# - source: # Uncomment the following block if you have a ConversionWebhook (--conversion) +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # This name should match the one in certificate.yaml +# fieldPath: .metadata.namespace # Namespace of the certificate CR +# targets: +# - select: +# kind: CustomResourceDefinition +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # This name should match the one in certificate.yaml +# fieldPath: .metadata.name +# targets: +# - select: +# kind: CustomResourceDefinition +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml deleted file mode 100644 index 73fad2a6..00000000 --- a/config/default/manager_auth_proxy_patch.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# This patch inject a sidecar container which is a HTTP proxy for the -# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: kube-rbac-proxy - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - "ALL" - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.14.1 - args: - - "--secure-listen-address=0.0.0.0:8443" - - "--upstream=http://127.0.0.1:8080/" - - "--logtostderr=true" - - "--v=0" - ports: - - containerPort: 8443 - protocol: TCP - name: https - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - memory: 64Mi - - name: manager - args: - - "--health-probe-bind-address=:8081" - - "--metrics-bind-address=127.0.0.1:8080" - - "--leader-elect" diff --git a/config/default/manager_config_patch.yaml b/config/default/manager_config_patch.yaml deleted file mode 100644 index f6f58916..00000000 --- a/config/default/manager_config_patch.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: manager diff --git a/config/default/manager_metrics_patch.yaml b/config/default/manager_metrics_patch.yaml new file mode 100644 index 00000000..2aaef653 --- /dev/null +++ b/config/default/manager_metrics_patch.yaml @@ -0,0 +1,4 @@ +# This patch adds the args to allow exposing the metrics endpoint using HTTPS +- op: add + path: /spec/template/spec/containers/0/args/0 + value: --metrics-bind-address=:8443 diff --git a/config/rbac/auth_proxy_service.yaml b/config/default/metrics_service.yaml similarity index 54% rename from config/rbac/auth_proxy_service.yaml rename to config/default/metrics_service.yaml index d8891bc6..c038c84f 100644 --- a/config/rbac/auth_proxy_service.yaml +++ b/config/default/metrics_service.yaml @@ -3,11 +3,7 @@ kind: Service metadata: labels: control-plane: controller-manager - app.kubernetes.io/name: service - app.kubernetes.io/instance: controller-manager-metrics-service - app.kubernetes.io/component: kube-rbac-proxy - app.kubernetes.io/created-by: validator - app.kubernetes.io/part-of: validator + app.kubernetes.io/name: validator app.kubernetes.io/managed-by: kustomize name: controller-manager-metrics-service namespace: system @@ -16,6 +12,6 @@ spec: - name: https port: 8443 protocol: TCP - targetPort: https + targetPort: 8443 selector: control-plane: controller-manager diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 1799029d..80a7057a 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -70,7 +70,9 @@ spec: - /manager args: - --leader-elect + - --health-probe-bind-address=:8081 image: controller:latest + imagePullPolicy: IfNotPresent name: manager securityContext: allowPrivilegeEscalation: false diff --git a/config/network-policy/allow-metrics-traffic.yaml b/config/network-policy/allow-metrics-traffic.yaml new file mode 100644 index 00000000..195b4521 --- /dev/null +++ b/config/network-policy/allow-metrics-traffic.yaml @@ -0,0 +1,26 @@ +# This NetworkPolicy allows ingress traffic +# with Pods running on namespaces labeled with 'metrics: enabled'. Only Pods on those +# namespaces are able to gathering data from the metrics endpoint. +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + labels: + app.kubernetes.io/name: validator + app.kubernetes.io/managed-by: kustomize + name: allow-metrics-traffic + namespace: system +spec: + podSelector: + matchLabels: + control-plane: controller-manager + policyTypes: + - Ingress + ingress: + # This allows ingress traffic from any namespace with the label metrics: enabled + - from: + - namespaceSelector: + matchLabels: + metrics: enabled # Only from namespaces with this label + ports: + - port: 8443 + protocol: TCP diff --git a/config/network-policy/kustomization.yaml b/config/network-policy/kustomization.yaml new file mode 100644 index 00000000..ec0fb5e5 --- /dev/null +++ b/config/network-policy/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- allow-metrics-traffic.yaml diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index c428666e..5116f0a9 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -5,14 +5,25 @@ resources: # runtime. Be sure to update RoleBinding and ClusterRoleBinding # subjects if changing service account names. - service_account.yaml -- custom-role.yaml +- role.yaml - role_binding.yaml - leader_election_role.yaml - leader_election_role_binding.yaml -# Comment the following 4 lines if you want to disable -# the auth proxy (https://github.com/brancz/kube-rbac-proxy) -# which protects your /metrics endpoint. -- auth_proxy_service.yaml -- auth_proxy_role.yaml -- auth_proxy_role_binding.yaml -- auth_proxy_client_clusterrole.yaml +# The following RBAC configurations are used to protect +# the metrics endpoint with authn/authz. These configurations +# ensure that only authorized users and service accounts +# can access the metrics endpoint. Comment the following +# permissions if you want to disable this protection. +# More info: https://book.kubebuilder.io/reference/metrics.html +- metrics_auth_role.yaml +- metrics_auth_role_binding.yaml +- metrics_reader_role.yaml +# For each CRD, "Editor" and "Viewer" roles are scaffolded by +# default, aiding admins in cluster management. Those roles are +# not used by the Project itself. You can comment the following lines +# if you do not want those helpers be installed with your Project. +# - validatorconfig_editor_role.yaml +# - validatorconfig_viewer_role.yaml +# - validationresult_editor_role.yaml +# - validationresult_viewer_role.yaml + diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/metrics_auth_role.yaml similarity index 71% rename from config/rbac/auth_proxy_role.yaml rename to config/rbac/metrics_auth_role.yaml index 2f59d1d4..7c231a58 100644 --- a/config/rbac/auth_proxy_role.yaml +++ b/config/rbac/metrics_auth_role.yaml @@ -2,13 +2,13 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: proxy-role - app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/name: metrics-auth-role + app.kubernetes.io/instance: metrics-auth-role + app.kubernetes.io/component: rbac app.kubernetes.io/created-by: validator app.kubernetes.io/part-of: validator app.kubernetes.io/managed-by: kustomize - name: proxy-role + name: metrics-auth-role rules: - apiGroups: - authentication.k8s.io diff --git a/config/rbac/auth_proxy_role_binding.yaml b/config/rbac/metrics_auth_role_binding.yaml similarity index 63% rename from config/rbac/auth_proxy_role_binding.yaml rename to config/rbac/metrics_auth_role_binding.yaml index 3e4a57d6..68de87ae 100644 --- a/config/rbac/auth_proxy_role_binding.yaml +++ b/config/rbac/metrics_auth_role_binding.yaml @@ -2,17 +2,17 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - app.kubernetes.io/name: clusterrolebinding - app.kubernetes.io/instance: proxy-rolebinding - app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/name: metrics-auth-rolebinding + app.kubernetes.io/instance: metrics-auth-rolebinding + app.kubernetes.io/component: rbac app.kubernetes.io/created-by: validator app.kubernetes.io/part-of: validator app.kubernetes.io/managed-by: kustomize - name: proxy-rolebinding + name: metrics-auth-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: proxy-role + name: metrics-auth-role subjects: - kind: ServiceAccount name: controller-manager diff --git a/config/rbac/auth_proxy_client_clusterrole.yaml b/config/rbac/metrics_reader_role.yaml similarity index 79% rename from config/rbac/auth_proxy_client_clusterrole.yaml rename to config/rbac/metrics_reader_role.yaml index 1d5c0a9a..77ba6262 100644 --- a/config/rbac/auth_proxy_client_clusterrole.yaml +++ b/config/rbac/metrics_reader_role.yaml @@ -2,9 +2,9 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - app.kubernetes.io/name: clusterrole + app.kubernetes.io/name: metrics-reader app.kubernetes.io/instance: metrics-reader - app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/component: rbac app.kubernetes.io/created-by: validator app.kubernetes.io/part-of: validator app.kubernetes.io/managed-by: kustomize diff --git a/go.mod b/go.mod index 1b9954e5..390243c0 100644 --- a/go.mod +++ b/go.mod @@ -47,6 +47,7 @@ require ( github.com/alibabacloud-go/tea-utils v1.4.5 // indirect github.com/alibabacloud-go/tea-xml v1.1.3 // indirect github.com/aliyun/credentials-go v1.3.1 // indirect + github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/aws/aws-sdk-go-v2 v1.30.4 // indirect github.com/aws/aws-sdk-go-v2/config v1.27.29 // indirect @@ -67,6 +68,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver v3.5.1+incompatible // indirect github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect github.com/clbanning/mxj/v2 v2.7.0 // indirect @@ -84,6 +86,7 @@ require ( github.com/dustin/go-humanize v1.0.1 // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-chi/chi v4.1.2+incompatible // indirect @@ -107,6 +110,7 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect + github.com/google/cel-go v0.20.1 // indirect github.com/google/certificate-transparency-go v1.2.1 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/go-cmp v0.6.0 // indirect @@ -117,6 +121,7 @@ require ( github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/hashicorp/hcl v1.0.1-vault-5 // indirect @@ -165,6 +170,7 @@ require ( github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/viper v1.19.0 // indirect + github.com/stoewer/go-strcase v1.2.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/thales-e-security/pool v0.0.2 // indirect @@ -176,9 +182,14 @@ require ( github.com/x448/float16 v0.8.4 // indirect github.com/xanzy/go-gitlab v0.107.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.29.0 // indirect @@ -193,6 +204,8 @@ require ( golang.org/x/tools v0.27.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect + google.golang.org/grpc v1.65.0 // indirect google.golang.org/protobuf v1.35.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect @@ -200,8 +213,11 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.1.0 // indirect k8s.io/apiextensions-apiserver v0.31.0 // indirect + k8s.io/apiserver v0.31.0 // indirect + k8s.io/component-base v0.31.0 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/release-utils v0.8.4 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect diff --git a/go.sum b/go.sum index 3b888e12..cf6c3612 100644 --- a/go.sum +++ b/go.sum @@ -216,6 +216,7 @@ github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0= github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= github.com/coredns/corefile-migration v1.0.23 h1:Fp4FETmk8sT/IRgnKX2xstC2dL7+QdcU+BL5AYIN3Jw= github.com/coredns/corefile-migration v1.0.23/go.mod h1:8HyMhuyzx9RLZp8cRc9Uf3ECpEAafHOFxQWUPqktMQI= +github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI= github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -392,7 +393,6 @@ github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWS github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= diff --git a/hack/chart/values-base.yaml b/hack/chart/values-base.yaml index 8ac9b492..8f450576 100644 --- a/hack/chart/values-base.yaml +++ b/hack/chart/values-base.yaml @@ -1,28 +1,8 @@ controllerManager: - kubeRbacProxy: - args: - - --secure-listen-address=0.0.0.0:8443 - - --upstream=http://127.0.0.1:8080/ - - --logtostderr=true - - --v=0 - containerSecurityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - image: - repository: gcr.io/kubebuilder/kube-rbac-proxy - tag: v0.16.0 - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - memory: 64Mi manager: args: - --health-probe-bind-address=:8081 + - --metrics-bind-address=:8443 - --leader-elect containerSecurityContext: allowPrivilegeEscalation: false diff --git a/hack/hauler-manifest-template.yaml b/hack/hauler-manifest-template.yaml index 86779099..deebfc36 100644 --- a/hack/hauler-manifest-template.yaml +++ b/hack/hauler-manifest-template.yaml @@ -15,9 +15,6 @@ spec: - name: quay.io/validator-labs/validator-certs-init:1.0.0 - name: gcr.io/spectro-images-public/release/spectro-cleanup:1.2.0 - name: kindest/node:v1.30.2 - # TODO: align on a single kube-rbac-proxy image - - name: gcr.io/kubebuilder/kube-rbac-proxy:v0.15.0 - - name: gcr.io/kubebuilder/kube-rbac-proxy:v0.16.0 --- apiVersion: content.hauler.cattle.io/v1alpha1 kind: Charts diff --git a/hauler-manifest.yaml b/hauler-manifest.yaml index fc219193..9aab8f4d 100644 --- a/hauler-manifest.yaml +++ b/hauler-manifest.yaml @@ -5,19 +5,16 @@ metadata: spec: images: - name: quay.io/validator-labs/validator:v0.1.14 # x-release-please-version - - name: quay.io/validator-labs/validator-plugin-aws:v0.1.10 - - name: quay.io/validator-labs/validator-plugin-azure:v0.0.24 - - name: quay.io/validator-labs/validator-plugin-kubescape:v0.0.4 - - name: quay.io/validator-labs/validator-plugin-maas:v0.0.12 - - name: quay.io/validator-labs/validator-plugin-network:v0.1.0 - - name: quay.io/validator-labs/validator-plugin-oci:v0.3.3 - - name: quay.io/validator-labs/validator-plugin-vsphere:v0.1.4 + - name: quay.io/validator-labs/validator-plugin-aws:v0.1.11 + - name: quay.io/validator-labs/validator-plugin-azure:v0.0.25 + - name: quay.io/validator-labs/validator-plugin-kubescape:v0.0.5 + - name: quay.io/validator-labs/validator-plugin-maas:v0.0.13 + - name: quay.io/validator-labs/validator-plugin-network:v0.1.1 + - name: quay.io/validator-labs/validator-plugin-oci:v0.3.4 + - name: quay.io/validator-labs/validator-plugin-vsphere:v0.1.5 - name: quay.io/validator-labs/validator-certs-init:1.0.0 - name: gcr.io/spectro-images-public/release/spectro-cleanup:1.2.0 - name: kindest/node:v1.30.2 - # TODO: align on a single kube-rbac-proxy image - - name: gcr.io/kubebuilder/kube-rbac-proxy:v0.15.0 - - name: gcr.io/kubebuilder/kube-rbac-proxy:v0.16.0 --- apiVersion: content.hauler.cattle.io/v1alpha1 kind: Charts @@ -30,25 +27,25 @@ spec: version: 0.1.14 # x-release-please-version - name: validator-plugin-aws repoURL: https://validator-labs.github.io/validator-plugin-aws - version: 0.1.10 + version: 0.1.11 - name: validator-plugin-azure repoURL: https://validator-labs.github.io/validator-plugin-azure - version: 0.0.24 + version: 0.0.25 - name: validator-plugin-kubescape repoURL: https://validator-labs.github.io/validator-plugin-kubescape - version: 0.0.4 + version: 0.0.5 - name: validator-plugin-maas repoURL: https://validator-labs.github.io/validator-plugin-maas - version: 0.0.12 + version: 0.0.13 - name: validator-plugin-network repoURL: https://validator-labs.github.io/validator-plugin-network - version: 0.1.0 + version: 0.1.1 - name: validator-plugin-oci repoURL: https://validator-labs.github.io/validator-plugin-oci - version: 0.3.3 + version: 0.3.4 - name: validator-plugin-vsphere repoURL: https://validator-labs.github.io/validator-plugin-vsphere - version: 0.1.4 + version: 0.1.5 --- apiVersion: content.hauler.cattle.io/v1alpha1 kind: Files @@ -57,4 +54,4 @@ metadata: spec: files: - name: validatorctl - path: https://github.com/validator-labs/validatorctl/releases/download/v0.2.5/validator-linux-ARCH \ No newline at end of file + path: https://github.com/validator-labs/validatorctl/releases/download/v0.2.6/validator-linux-ARCH \ No newline at end of file diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 77c20768..2b91c696 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -47,7 +47,7 @@ import ( "github.com/validator-labs/validator/pkg/kube" "github.com/validator-labs/validator/pkg/sinks" "github.com/validator-labs/validator/pkg/util" - //+kubebuilder:scaffold:imports + // +kubebuilder:scaffold:imports ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to @@ -114,7 +114,7 @@ var _ = BeforeSuite(func() { err = v1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) - //+kubebuilder:scaffold:scheme + // +kubebuilder:scaffold:scheme k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) Expect(err).NotTo(HaveOccurred()) diff --git a/internal/controller/testdata/vc-network.yaml b/internal/controller/testdata/vc-network.yaml index 9af08059..4769f88d 100644 --- a/internal/controller/testdata/vc-network.yaml +++ b/internal/controller/testdata/vc-network.yaml @@ -19,27 +19,6 @@ spec: version: v0.0.15 values: |- controllerManager: - kubeRbacProxy: - args: - - --secure-listen-address=0.0.0.0:8443 - - --upstream=http://127.0.0.1:8080/ - - --logtostderr=true - - --v=0 - containerSecurityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - image: - repository: gcr.io/kubebuilder/kube-rbac-proxy - tag: v0.14.1 - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - memory: 64Mi manager: args: - --health-probe-bind-address=:8081 diff --git a/internal/controller/validationresult_controller.go b/internal/controller/validationresult_controller.go index 258fae0f..e287b062 100644 --- a/internal/controller/validationresult_controller.go +++ b/internal/controller/validationresult_controller.go @@ -50,9 +50,9 @@ type ValidationResultReconciler struct { SinkClient *sinks.Client } -//+kubebuilder:rbac:groups=validation.spectrocloud.labs,resources=validationresults,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=validation.spectrocloud.labs,resources=validationresults/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=validation.spectrocloud.labs,resources=validationresults/finalizers,verbs=update +// +kubebuilder:rbac:groups=validation.spectrocloud.labs,resources=validationresults,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=validation.spectrocloud.labs,resources=validationresults/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=validation.spectrocloud.labs,resources=validationresults/finalizers,verbs=update // Reconcile reconciles a ValidationResult. func (r *ValidationResultReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { diff --git a/internal/controller/validationresult_controller_test.go b/internal/controller/validationresult_controller_test.go index f35a5e6b..2d9eb360 100644 --- a/internal/controller/validationresult_controller_test.go +++ b/internal/controller/validationresult_controller_test.go @@ -14,7 +14,7 @@ import ( "github.com/validator-labs/validator/api/v1alpha1" "github.com/validator-labs/validator/pkg/constants" - //+kubebuilder:scaffold:imports + // +kubebuilder:scaffold:imports ) const validationResultName = "validator-plugin-aws-service-quota" diff --git a/internal/controller/validatorconfig_controller.go b/internal/controller/validatorconfig_controller.go index 2b902c64..bdc32681 100644 --- a/internal/controller/validatorconfig_controller.go +++ b/internal/controller/validatorconfig_controller.go @@ -68,9 +68,9 @@ type ValidatorConfigReconciler struct { Scheme *runtime.Scheme } -//+kubebuilder:rbac:groups=validation.spectrocloud.labs,resources=validatorconfigs,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=validation.spectrocloud.labs,resources=validatorconfigs/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=validation.spectrocloud.labs,resources=validatorconfigs/finalizers,verbs=update +// +kubebuilder:rbac:groups=validation.spectrocloud.labs,resources=validatorconfigs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=validation.spectrocloud.labs,resources=validatorconfigs/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=validation.spectrocloud.labs,resources=validatorconfigs/finalizers,verbs=update // Reconcile reconciles a ValidatorConfig. func (r *ValidatorConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { diff --git a/internal/controller/validatorconfig_controller_test.go b/internal/controller/validatorconfig_controller_test.go index 00844331..da3255b3 100644 --- a/internal/controller/validatorconfig_controller_test.go +++ b/internal/controller/validatorconfig_controller_test.go @@ -23,7 +23,7 @@ import ( "github.com/validator-labs/validator/api/v1alpha1" "github.com/validator-labs/validator/pkg/helm" "github.com/validator-labs/validator/pkg/test" - //+kubebuilder:scaffold:imports + // +kubebuilder:scaffold:imports ) const ( diff --git a/tests/e2e/e2e_suite_test.go b/tests/e2e/e2e_suite_test.go new file mode 100644 index 00000000..789aeb2c --- /dev/null +++ b/tests/e2e/e2e_suite_test.go @@ -0,0 +1,120 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "os" + "os/exec" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/validator-labs/validator/tests/utils" +) + +var ( + // Optional Environment Variables: + // - PROMETHEUS_INSTALL_SKIP=true: Skips Prometheus Operator installation during test setup. + // - CERT_MANAGER_INSTALL_SKIP=true: Skips CertManager installation during test setup. + // These variables are useful if Prometheus or CertManager is already installed, avoiding + // re-installation and conflicts. + skipPrometheusInstall = os.Getenv("PROMETHEUS_INSTALL_SKIP") == "true" + skipCertManagerInstall = os.Getenv("CERT_MANAGER_INSTALL_SKIP") == "true" + // isPrometheusOperatorAlreadyInstalled will be set true when prometheus CRDs be found on the cluster + isPrometheusOperatorAlreadyInstalled = false + // isCertManagerAlreadyInstalled will be set true when CertManager CRDs be found on the cluster + isCertManagerAlreadyInstalled = false + + // projectImage is the name of the image which will be build and loaded + // with the code source changes to be tested. + projectImage = "quay.io/validator-labs/validator:latest" +) + +// TestE2E runs the end-to-end (e2e) test suite for the project. These tests execute in an isolated, +// temporary environment to validate project changes with the the purposed to be used in CI jobs. +// The default setup requires Kind, builds/loads the Manager Docker image locally, and installs +// CertManager and Prometheus. +func TestE2E(t *testing.T) { + RegisterFailHandler(Fail) + _, _ = fmt.Fprintf(GinkgoWriter, "Starting validator integration test suite\n") + RunSpecs(t, "e2e suite") +} + +var _ = BeforeSuite(func() { + By("Ensure that Prometheus is enabled") + _ = utils.UncommentCode("config/default/kustomization.yaml", "#- ../prometheus", "#") + + By("generating files") + cmd := exec.Command("make", "generate") + _, err := utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to run make generate") + + By("generating manifests") + cmd = exec.Command("make", "manifests") + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to run make manifests") + + By("building the manager(Operator) image") + cmd = exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectImage)) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to build the manager(Operator) image") + + // TODO(user): If you want to change the e2e test vendor from Kind, ensure the image is + // built and available before running the tests. Also, remove the following block. + By("loading the manager(Operator) image on Kind") + err = utils.LoadImageToKindClusterWithName(projectImage) + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to load the manager(Operator) image into Kind") + + // The tests-e2e are intended to run on a temporary cluster that is created and destroyed for testing. + // To prevent errors when tests run in environments with Prometheus or CertManager already installed, + // we check for their presence before execution. + // Setup Prometheus and CertManager before the suite if not skipped and if not already installed + if !skipPrometheusInstall { + By("checking if prometheus is installed already") + isPrometheusOperatorAlreadyInstalled = utils.IsPrometheusCRDsInstalled() + if !isPrometheusOperatorAlreadyInstalled { + _, _ = fmt.Fprintf(GinkgoWriter, "Installing Prometheus Operator...\n") + Expect(utils.InstallPrometheusOperator()).To(Succeed(), "Failed to install Prometheus Operator") + } else { + _, _ = fmt.Fprintf(GinkgoWriter, "WARNING: Prometheus Operator is already installed. Skipping installation...\n") + } + } + if !skipCertManagerInstall { + By("checking if cert manager is installed already") + isCertManagerAlreadyInstalled = utils.IsCertManagerCRDsInstalled() + if !isCertManagerAlreadyInstalled { + _, _ = fmt.Fprintf(GinkgoWriter, "Installing CertManager...\n") + Expect(utils.InstallCertManager()).To(Succeed(), "Failed to install CertManager") + } else { + _, _ = fmt.Fprintf(GinkgoWriter, "WARNING: CertManager is already installed. Skipping installation...\n") + } + } +}) + +var _ = AfterSuite(func() { + // Teardown Prometheus and CertManager after the suite if not skipped and if they were not already installed + if !skipPrometheusInstall && !isPrometheusOperatorAlreadyInstalled { + _, _ = fmt.Fprintf(GinkgoWriter, "Uninstalling Prometheus Operator...\n") + utils.UninstallPrometheusOperator() + } + if !skipCertManagerInstall && !isCertManagerAlreadyInstalled { + _, _ = fmt.Fprintf(GinkgoWriter, "Uninstalling CertManager...\n") + utils.UninstallCertManager() + } +}) diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go new file mode 100644 index 00000000..d03cf8e7 --- /dev/null +++ b/tests/e2e/e2e_test.go @@ -0,0 +1,307 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/validator-labs/validator/tests/utils" +) + +// namespace where the project is deployed in +const namespace = "validator-system" + +// serviceAccountName created for the project +const serviceAccountName = "validator-controller-manager" + +// metricsServiceName is the name of the metrics service of the project +const metricsServiceName = "validator-controller-manager-metrics-service" + +// metricsRoleBindingName is the name of the RBAC that will be created to allow get the metrics data +const metricsRoleBindingName = "validator-metrics-binding" + +var _ = Describe("Manager", Ordered, func() { + var controllerPodName string + + // Before running the tests, set up the environment by creating the namespace, + // installing CRDs, and deploying the controller. + BeforeAll(func() { + By("creating manager namespace") + cmd := exec.Command("kubectl", "create", "ns", namespace) + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to create namespace") + + By("installing CRDs") + cmd = exec.Command("make", "install") + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to install CRDs") + + By("deploying the controller-manager") + cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", projectImage)) + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to deploy the controller-manager") + }) + + // After all tests have been executed, clean up by undeploying the controller, uninstalling CRDs, + // and deleting the namespace. + AfterAll(func() { + By("cleaning up the curl pod for metrics") + cmd := exec.Command("kubectl", "delete", "pod", "curl-metrics", "-n", namespace) + _, _ = utils.Run(cmd) + + By("undeploying the controller-manager") + cmd = exec.Command("make", "undeploy") + _, _ = utils.Run(cmd) + + By("uninstalling CRDs") + cmd = exec.Command("make", "uninstall") + _, _ = utils.Run(cmd) + + By("removing manager namespace") + cmd = exec.Command("kubectl", "delete", "ns", namespace) + _, _ = utils.Run(cmd) + }) + + // After each test, check for failures and collect logs, events, + // and pod descriptions for debugging. + AfterEach(func() { + specReport := CurrentSpecReport() + if specReport.Failed() { + By("Fetching controller manager pod logs") + cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace) + controllerLogs, err := utils.Run(cmd) + if err == nil { + _, _ = fmt.Fprintf(GinkgoWriter, fmt.Sprintf("Controller logs:\n %s", controllerLogs)) + } else { + _, _ = fmt.Fprintf(GinkgoWriter, fmt.Sprintf("Failed to get Controller logs: %s", err)) + } + + By("Fetching Kubernetes events") + cmd = exec.Command("kubectl", "get", "events", "-n", namespace, "--sort-by=.lastTimestamp") + eventsOutput, err := utils.Run(cmd) + if err == nil { + _, _ = fmt.Fprintf(GinkgoWriter, fmt.Sprintf("Kubernetes events:\n%s", eventsOutput)) + } else { + _, _ = fmt.Fprintf(GinkgoWriter, fmt.Sprintf("Failed to get Kubernetes events: %s", err)) + } + + By("Fetching curl-metrics logs") + cmd = exec.Command("kubectl", "logs", "curl-metrics", "-n", namespace) + metricsOutput, err := utils.Run(cmd) + if err == nil { + _, _ = fmt.Fprintf(GinkgoWriter, fmt.Sprintf("Metrics logs:\n %s", metricsOutput)) + } else { + _, _ = fmt.Fprintf(GinkgoWriter, fmt.Sprintf("Failed to get curl-metrics logs: %s", err)) + } + + By("Fetching controller manager pod description") + cmd = exec.Command("kubectl", "describe", "pod", controllerPodName, "-n", namespace) + podDescription, err := utils.Run(cmd) + if err == nil { + fmt.Println("Pod description:\n", podDescription) + } else { + fmt.Println("Failed to describe controller pod") + } + } + }) + + SetDefaultEventuallyTimeout(2 * time.Minute) + SetDefaultEventuallyPollingInterval(time.Second) + + Context("Manager", func() { + It("should run successfully", func() { + By("validating that the controller-manager pod is running as expected") + verifyControllerUp := func(g Gomega) { + // Get the name of the controller-manager pod + cmd := exec.Command("kubectl", "get", + "pods", "-l", "control-plane=controller-manager", + "-o", "go-template={{ range .items }}"+ + "{{ if not .metadata.deletionTimestamp }}"+ + "{{ .metadata.name }}"+ + "{{ \"\\n\" }}{{ end }}{{ end }}", + "-n", namespace, + ) + + podOutput, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred(), "Failed to retrieve controller-manager pod information") + podNames := utils.GetNonEmptyLines(podOutput) + g.Expect(podNames).To(HaveLen(1), "expected 1 controller pod running") + controllerPodName = podNames[0] + g.Expect(controllerPodName).To(ContainSubstring("controller-manager")) + + // Validate the pod's status + cmd = exec.Command("kubectl", "get", + "pods", controllerPodName, "-o", "jsonpath={.status.phase}", + "-n", namespace, + ) + output, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(output).To(Equal("Running"), "Incorrect controller-manager pod status") + } + Eventually(verifyControllerUp).Should(Succeed()) + }) + + It("should ensure the metrics endpoint is serving metrics", func() { + By("creating a ClusterRoleBinding for the service account to allow access to metrics") + cmd := exec.Command("kubectl", "create", "clusterrolebinding", metricsRoleBindingName, + "--clusterrole=validator-metrics-reader", + fmt.Sprintf("--serviceaccount=%s:%s", namespace, serviceAccountName), + ) + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to create ClusterRoleBinding") + + By("validating that the metrics service is available") + cmd = exec.Command("kubectl", "get", "service", metricsServiceName, "-n", namespace) + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Metrics service should exist") + + By("validating that the ServiceMonitor for Prometheus is applied in the namespace") + cmd = exec.Command("kubectl", "get", "ServiceMonitor", "-n", namespace) + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "ServiceMonitor should exist") + + By("getting the service account token") + token, err := serviceAccountToken() + Expect(err).NotTo(HaveOccurred()) + Expect(token).NotTo(BeEmpty()) + + By("waiting for the metrics endpoint to be ready") + verifyMetricsEndpointReady := func(g Gomega) { + cmd := exec.Command("kubectl", "get", "endpoints", metricsServiceName, "-n", namespace) + output, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(output).To(ContainSubstring("8443"), "Metrics endpoint is not ready") + } + Eventually(verifyMetricsEndpointReady).Should(Succeed()) + + By("verifying that the controller manager is serving the metrics server") + verifyMetricsServerStarted := func(g Gomega) { + cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace) + output, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(output).To(ContainSubstring("controller-runtime.metrics\tServing metrics server"), + "Metrics server not yet started") + } + Eventually(verifyMetricsServerStarted).Should(Succeed()) + + By("creating the curl-metrics pod to access the metrics endpoint") + cmd = exec.Command("kubectl", "run", "curl-metrics", "--restart=Never", + "--namespace", namespace, + "--image=curlimages/curl:7.78.0", + "--", "/bin/sh", "-c", fmt.Sprintf( + "curl -v -k -H 'Authorization: Bearer %s' https://%s.%s.svc.cluster.local:8443/metrics", + token, metricsServiceName, namespace)) + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to create curl-metrics pod") + + By("waiting for the curl-metrics pod to complete.") + verifyCurlUp := func(g Gomega) { + cmd := exec.Command("kubectl", "get", "pods", "curl-metrics", + "-o", "jsonpath={.status.phase}", + "-n", namespace) + output, err := utils.Run(cmd) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(output).To(Equal("Succeeded"), "curl pod in wrong status") + } + Eventually(verifyCurlUp, 5*time.Minute).Should(Succeed()) + + By("getting the metrics by checking curl-metrics logs") + metricsOutput := getMetricsOutput() + Expect(metricsOutput).To(ContainSubstring( + "controller_runtime_reconcile_total", + )) + }) + + // +kubebuilder:scaffold:e2e-webhooks-checks + + // TODO: Customize the e2e test suite with scenarios specific to your project. + // Consider applying sample/CR(s) and check their status and/or verifying + // the reconciliation by using the metrics, i.e.: + // metricsOutput := getMetricsOutput() + // Expect(metricsOutput).To(ContainSubstring( + // fmt.Sprintf(`controller_runtime_reconcile_total{controller="%s",result="success"} 1`, + // strings.ToLower(), + // )) + }) +}) + +// serviceAccountToken returns a token for the specified service account in the given namespace. +// It uses the Kubernetes TokenRequest API to generate a token by directly sending a request +// and parsing the resulting token from the API response. +func serviceAccountToken() (string, error) { + const tokenRequestRawString = `{ + "apiVersion": "authentication.k8s.io/v1", + "kind": "TokenRequest" + }` + + // Temporary file to store the token request + secretName := fmt.Sprintf("%s-token-request", serviceAccountName) + tokenRequestFile := filepath.Join("/tmp", secretName) + err := os.WriteFile(tokenRequestFile, []byte(tokenRequestRawString), os.FileMode(0o644)) + if err != nil { + return "", err + } + + var out string + verifyTokenCreation := func(g Gomega) { + // Execute kubectl command to create the token + cmd := exec.Command("kubectl", "create", "--raw", fmt.Sprintf( + "/api/v1/namespaces/%s/serviceaccounts/%s/token", + namespace, + serviceAccountName, + ), "-f", tokenRequestFile) + + output, err := cmd.CombinedOutput() + g.Expect(err).NotTo(HaveOccurred()) + + // Parse the JSON output to extract the token + var token tokenRequest + err = json.Unmarshal([]byte(output), &token) + g.Expect(err).NotTo(HaveOccurred()) + + out = token.Status.Token + } + Eventually(verifyTokenCreation).Should(Succeed()) + + return out, err +} + +// getMetricsOutput retrieves and returns the logs from the curl pod used to access the metrics endpoint. +func getMetricsOutput() string { + By("getting the curl-metrics logs") + cmd := exec.Command("kubectl", "logs", "curl-metrics", "-n", namespace) + metricsOutput, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to retrieve logs from curl pod") + Expect(metricsOutput).To(ContainSubstring("< HTTP/1.1 200 OK")) + return metricsOutput +} + +// tokenRequest is a simplified representation of the Kubernetes TokenRequest API response, +// containing only the token field that we need to extract. +type tokenRequest struct { + Status struct { + Token string `json:"token"` + } `json:"status"` +} diff --git a/tests/utils/utils.go b/tests/utils/utils.go new file mode 100644 index 00000000..9155b227 --- /dev/null +++ b/tests/utils/utils.go @@ -0,0 +1,258 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "bufio" + "bytes" + "fmt" + "os" + "os/exec" + "strings" + + . "github.com/onsi/ginkgo/v2" //nolint:golint,revive +) + +const ( + prometheusOperatorVersion = "v0.77.1" + prometheusOperatorURL = "https://github.com/prometheus-operator/prometheus-operator/" + + "releases/download/%s/bundle.yaml" + + certmanagerVersion = "v1.16.0" + certmanagerURLTmpl = "https://github.com/jetstack/cert-manager/releases/download/%s/cert-manager.yaml" +) + +func warnError(err error) { + _, _ = fmt.Fprintf(GinkgoWriter, "warning: %v\n", err) +} + +// Run executes the provided command within this context +func Run(cmd *exec.Cmd) (string, error) { + dir, _ := GetProjectDir() + cmd.Dir = dir + + if err := os.Chdir(cmd.Dir); err != nil { + _, _ = fmt.Fprintf(GinkgoWriter, "chdir dir: %s\n", err) + } + + cmd.Env = append(os.Environ(), "GO111MODULE=on") + command := strings.Join(cmd.Args, " ") + _, _ = fmt.Fprintf(GinkgoWriter, "running: %s\n", command) + output, err := cmd.CombinedOutput() + if err != nil { + return string(output), fmt.Errorf("%s failed with error: (%v) %s", command, err, string(output)) + } + + return string(output), nil +} + +// InstallPrometheusOperator installs the prometheus Operator to be used to export the enabled metrics. +func InstallPrometheusOperator() error { + url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion) + // #nosec G204 + cmd := exec.Command("kubectl", "create", "-f", url) + _, err := Run(cmd) + return err +} + +// UninstallPrometheusOperator uninstalls the prometheus +func UninstallPrometheusOperator() { + url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion) + // #nosec G204 + cmd := exec.Command("kubectl", "delete", "-f", url) + if _, err := Run(cmd); err != nil { + warnError(err) + } +} + +// IsPrometheusCRDsInstalled checks if any Prometheus CRDs are installed +// by verifying the existence of key CRDs related to Prometheus. +func IsPrometheusCRDsInstalled() bool { + // List of common Prometheus CRDs + prometheusCRDs := []string{ + "prometheuses.monitoring.coreos.com", + "prometheusrules.monitoring.coreos.com", + "prometheusagents.monitoring.coreos.com", + } + + cmd := exec.Command("kubectl", "get", "crds", "-o", "custom-columns=NAME:.metadata.name") + output, err := Run(cmd) + if err != nil { + return false + } + crdList := GetNonEmptyLines(string(output)) + for _, crd := range prometheusCRDs { + for _, line := range crdList { + if strings.Contains(line, crd) { + return true + } + } + } + + return false +} + +// UninstallCertManager uninstalls the cert manager +func UninstallCertManager() { + url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) + // #nosec G204 + cmd := exec.Command("kubectl", "delete", "-f", url) + if _, err := Run(cmd); err != nil { + warnError(err) + } +} + +// InstallCertManager installs the cert manager bundle. +func InstallCertManager() error { + url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) + // #nosec G204 + cmd := exec.Command("kubectl", "apply", "-f", url) + if _, err := Run(cmd); err != nil { + return err + } + // Wait for cert-manager-webhook to be ready, which can take time if cert-manager + // was re-installed after uninstalling on a cluster. + cmd = exec.Command("kubectl", "wait", "deployment.apps/cert-manager-webhook", + "--for", "condition=Available", + "--namespace", "cert-manager", + "--timeout", "5m", + ) + + _, err := Run(cmd) + return err +} + +// IsCertManagerCRDsInstalled checks if any Cert Manager CRDs are installed +// by verifying the existence of key CRDs related to Cert Manager. +func IsCertManagerCRDsInstalled() bool { + // List of common Cert Manager CRDs + certManagerCRDs := []string{ + "certificates.cert-manager.io", + "issuers.cert-manager.io", + "clusterissuers.cert-manager.io", + "certificaterequests.cert-manager.io", + "orders.acme.cert-manager.io", + "challenges.acme.cert-manager.io", + } + + // Execute the kubectl command to get all CRDs + cmd := exec.Command("kubectl", "get", "crds") + output, err := Run(cmd) + if err != nil { + return false + } + + // Check if any of the Cert Manager CRDs are present + crdList := GetNonEmptyLines(string(output)) + for _, crd := range certManagerCRDs { + for _, line := range crdList { + if strings.Contains(line, crd) { + return true + } + } + } + + return false +} + +// LoadImageToKindClusterWithName loads a local docker image to the kind cluster +func LoadImageToKindClusterWithName(name string) error { + cluster := "kind" + if v, ok := os.LookupEnv("KIND_CLUSTER"); ok { + cluster = v + } + kindOptions := []string{"load", "docker-image", name, "--name", cluster} + // #nosec G204 + cmd := exec.Command("kind", kindOptions...) + _, err := Run(cmd) + return err +} + +// GetNonEmptyLines converts given command output string into individual objects +// according to line breakers, and ignores the empty elements in it. +func GetNonEmptyLines(output string) []string { + var res []string + elements := strings.Split(output, "\n") + for _, element := range elements { + if element != "" { + res = append(res, element) + } + } + + return res +} + +// GetProjectDir will return the directory where the project is +func GetProjectDir() (string, error) { + wd, err := os.Getwd() + if err != nil { + return wd, err + } + wd = strings.Replace(wd, "/tests/e2e", "", -1) + return wd, nil +} + +// UncommentCode searches for target in the file and remove the comment prefix +// of the target content. The target content may span multiple lines. +func UncommentCode(filename, target, prefix string) error { + // false positive + // nolint:gosec + // #nosec G304 + content, err := os.ReadFile(filename) + if err != nil { + return err + } + strContent := string(content) + + idx := strings.Index(strContent, target) + if idx < 0 { + return fmt.Errorf("unable to find the code %s to be uncomment", target) + } + + out := new(bytes.Buffer) + _, err = out.Write(content[:idx]) + if err != nil { + return err + } + + scanner := bufio.NewScanner(bytes.NewBufferString(target)) + if !scanner.Scan() { + return nil + } + for { + _, err := out.WriteString(strings.TrimPrefix(scanner.Text(), prefix)) + if err != nil { + return err + } + // Avoid writing a newline in case the previous line was the last in target. + if !scanner.Scan() { + break + } + if _, err := out.WriteString("\n"); err != nil { + return err + } + } + + _, err = out.Write(content[idx+len(target):]) + if err != nil { + return err + } + // false positive + // nolint:gosec + // #nosec G306 + return os.WriteFile(filename, out.Bytes(), 0644) +}