diff --git a/internal/helm-project-operator/charts/example-chart/Chart.yaml b/internal/helm-project-operator/charts/example-chart/Chart.yaml new file mode 100644 index 00000000..82ffe693 --- /dev/null +++ b/internal/helm-project-operator/charts/example-chart/Chart.yaml @@ -0,0 +1,11 @@ +apiVersion: v2 +name: example-chart +description: Example Helm Project Operator chart +version: 0.0.0 +appVersion: 0.0.0 +annotations: + catalog.cattle.io/certified: rancher + catalog.cattle.io/hidden: "true" + catalog.cattle.io/release-name: example-chart + catalog.cattle.io/os: linux,windows + catalog.cattle.io/permits-os: linux,windows diff --git a/internal/helm-project-operator/charts/example-chart/README.md b/internal/helm-project-operator/charts/example-chart/README.md new file mode 100644 index 00000000..c25c95ca --- /dev/null +++ b/internal/helm-project-operator/charts/example-chart/README.md @@ -0,0 +1,5 @@ +# example-chart + +This chart is a dummy chart that is deployed on behalf of the default Helm Project Operator. + +This chart is primarily intended for testing purposes. diff --git a/internal/helm-project-operator/charts/example-chart/questions.yaml b/internal/helm-project-operator/charts/example-chart/questions.yaml new file mode 100644 index 00000000..2464e356 --- /dev/null +++ b/internal/helm-project-operator/charts/example-chart/questions.yaml @@ -0,0 +1,7 @@ +questions: + - variable: data + label: mydata + description: My Data + type: string + required: true + group: Data diff --git a/internal/helm-project-operator/charts/example-chart/templates/configmaps.yaml b/internal/helm-project-operator/charts/example-chart/templates/configmaps.yaml new file mode 100644 index 00000000..55336dbc --- /dev/null +++ b/internal/helm-project-operator/charts/example-chart/templates/configmaps.yaml @@ -0,0 +1,45 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: my-config-map + namespace: {{ .Release.Namespace }} +data: + config: |- +{{ .Values.data | toYaml | indent 4 }} + project-namespaces: |- +{{ .Values.global.cattle.projectNamespaces | toYaml | indent 4 }} + project-id: |- +{{ .Values.global.cattle.projectID | toYaml | indent 4 }} + release-project-id: |- +{{ .Values.global.cattle.releaseProjectID | toYaml | indent 4 }} + project-namespace-selector: |- +{{ .Values.global.cattle.projectNamespaceSelector | toYaml | indent 4 }} + system-default-registry: |- +{{ .Values.global.cattle.systemDefaultRegistry | toYaml | indent 4 }} + cattle-url: |- +{{ .Values.global.cattle.url | toYaml | indent 4 }} + cluster-id: |- +{{ .Values.global.cattle.clusterId | toYaml | indent 4 }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: my-config-map-2 + namespace: {{ .Release.Namespace }} +data: + config: |- +{{ .Values.data | toYaml | indent 4 }} + project-namespaces: |- +{{ .Values.global.cattle.projectNamespaces | toYaml | indent 4 }} + project-id: |- +{{ .Values.global.cattle.projectID | toYaml | indent 4 }} + release-project-id: |- +{{ .Values.global.cattle.releaseProjectID | toYaml | indent 4 }} + project-namespace-selector: |- +{{ .Values.global.cattle.projectNamespaceSelector | toYaml | indent 4 }} + system-default-registry: |- +{{ .Values.global.cattle.systemDefaultRegistry | toYaml | indent 4 }} + cattle-url: |- +{{ .Values.global.cattle.url | toYaml | indent 4 }} + cluster-id: |- +{{ .Values.global.cattle.clusterId | toYaml | indent 4 }} diff --git a/internal/helm-project-operator/charts/example-chart/templates/dashboard-roles.yaml b/internal/helm-project-operator/charts/example-chart/templates/dashboard-roles.yaml new file mode 100755 index 00000000..4fb0f004 --- /dev/null +++ b/internal/helm-project-operator/charts/example-chart/templates/dashboard-roles.yaml @@ -0,0 +1,61 @@ +{{- if and .Values.global.rbac.create .Values.global.rbac.userRoles.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ .Release.Name }}-admin + namespace: {{ .Release.Namespace }} + labels: + helm.cattle.io/project-helm-chart-role: {{ .Release.Name }} + {{- if .Values.global.rbac.userRoles.aggregateToDefaultRoles }} + helm.cattle.io/project-helm-chart-role-aggregate-from: admin + {{- end }} +rules: +- apiGroups: + - "test.cattle.io" + resources: + - test + resourceNames: + - test + verbs: + - 'test' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ .Release.Name }}-edit + namespace: {{ .Release.Namespace }} + labels: + helm.cattle.io/project-helm-chart-role: {{ .Release.Name }} + {{- if .Values.global.rbac.userRoles.aggregateToDefaultRoles }} + helm.cattle.io/project-helm-chart-role-aggregate-from: edit + {{- end }} +rules: +- apiGroups: + - "test.cattle.io" + resources: + - test + resourceNames: + - test + verbs: + - 'test' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ .Release.Name }}-view + namespace: {{ .Release.Namespace }} + labels: + helm.cattle.io/project-helm-chart-role: {{ .Release.Name }} + {{- if .Values.global.rbac.userRoles.aggregateToDefaultRoles }} + helm.cattle.io/project-helm-chart-role-aggregate-from: view + {{- end }} +rules: +- apiGroups: + - "test.cattle.io" + resources: + - test + resourceNames: + - test + verbs: + - 'test' +{{- end }} diff --git a/internal/helm-project-operator/charts/example-chart/templates/dashboard-values-configmap.yaml b/internal/helm-project-operator/charts/example-chart/templates/dashboard-values-configmap.yaml new file mode 100644 index 00000000..a117a47f --- /dev/null +++ b/internal/helm-project-operator/charts/example-chart/templates/dashboard-values-configmap.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: my-dashboard-values + namespace: {{ .Release.Namespace }} + labels: + helm.cattle.io/dashboard-values-configmap: {{ .Release.Name }} +data: + data.json: |- +{{ .Values.data | toJson | indent 4 }} diff --git a/internal/helm-project-operator/charts/example-chart/values.yaml b/internal/helm-project-operator/charts/example-chart/values.yaml new file mode 100644 index 00000000..c4c2d53a --- /dev/null +++ b/internal/helm-project-operator/charts/example-chart/values.yaml @@ -0,0 +1,34 @@ +global: + cattle: + clusterID: "" + projectID: "" + projectNamespaces: [] + projectNamespaceSelector: {} + releaseProjectID: "" + systemDefaultRegistry: "" + url: "" + rbac: + ## Create RBAC resources for ServiceAccounts and users + ## + create: true + + userRoles: + ## Create default user Roles that the Helm Project Operator will automatically create RoleBindings for + ## + ## How does this work? + ## + ## The operator will watch for all subjects bound to each Kubernetes default ClusterRole in the project registration namespace + ## where the ProjectHelmChart that deployed this chart belongs to; if it observes a subject bound to a particular role in + ## the project registration namespace (e.g. edit) and if a Role exists that is deployed by this chart with the label + ## 'helm.cattle.io/project-helm-chart-role-aggregate-from': '', it will automaticaly create a RoleBinding + ## in the release namespace binding all such subjects to that Role. + ## + ## Note: while the default behavior is to use the Kubernetes default ClusterRole, the operator deployment can be configured + ## to use a different set of ClusterRoles as the source of truth for admin, edit, and view permissions. + ## + create: true + ## Add labels to Roles to have the operator pick them up + aggregateToDefaultRoles: true + +data: + hello: world diff --git a/internal/helm-project-operator/charts/helm-project-operator/Chart.yaml b/internal/helm-project-operator/charts/helm-project-operator/Chart.yaml new file mode 100644 index 00000000..d0845d56 --- /dev/null +++ b/internal/helm-project-operator/charts/helm-project-operator/Chart.yaml @@ -0,0 +1,15 @@ +apiVersion: v2 +name: helm-project-operator +description: Helm Project Operator +version: 0.2.1 +appVersion: 0.2.1 +annotations: + catalog.cattle.io/certified: rancher + catalog.cattle.io/display-name: Helm Project Operator + catalog.cattle.io/kube-version: '>=1.16.0-0' + catalog.cattle.io/namespace: cattle-helm-system + catalog.cattle.io/permits-os: linux,windows + catalog.cattle.io/provides-gvr: helm.cattle.io.projecthelmchart/v1alpha1 + catalog.cattle.io/rancher-version: '>= 2.6.0-0' + catalog.cattle.io/release-name: helm-project-operator + catalog.cattle.io/os: linux,windows diff --git a/internal/helm-project-operator/charts/helm-project-operator/README.md b/internal/helm-project-operator/charts/helm-project-operator/README.md new file mode 100644 index 00000000..fc1d39e8 --- /dev/null +++ b/internal/helm-project-operator/charts/helm-project-operator/README.md @@ -0,0 +1,77 @@ +# Helm Project Operator + +## How does the operator work? + +1. On deploying a Helm Project Operator, users can create ProjectHelmCharts CRs with `spec.helmApiVersion` set to `dummy.cattle.io/v1alpha1` in a **Project Registration Namespace (`cattle-project-`)**. +2. On seeing each ProjectHelmChartCR, the operator will automatically deploy the embedded Helm chart on the Project Owner's behalf in the **Project Release Namespace (`cattle-project--dummy`)** based on a HelmChart CR and a HelmRelease CR automatically created by the ProjectHelmChart controller in the **Operator / System Namespace**. +3. RBAC will automatically be assigned in the Project Release Namespace to allow users to based on Role created in the Project Release Namespace with a given set of labels; this will be based on RBAC defined on the Project Registration Namespace against the [default Kubernetes user-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) (see below for more information about configuring RBAC). + +### What is a Project? + +In Helm Project Operator, a Project is a group of namespaces that can be identified by a `metav1.LabelSelector`; by default, the label used to identify projects is `field.cattle.io/projectId`, the label used to identify namespaces that are contained within a given [Rancher](https://rancher.com/) Project. + +### What is a ProjectHelmChart? + +A ProjectHelmChart is an instance of a (project-scoped) Helm chart deployed on behalf of a user who has permissions to create ProjectHelmChart resources in a Project Registration namespace. + +Generally, the best way to think about the ProjectHelmChart model is by comparing it to two other models: +1. Managed Kubernetes providers (EKS, GKE, AKS, etc.): in this model, a user has the ability to say "I want a Kubernetes cluster" but the underlying cloud provider is responsible for provisioning the infrastructure and offering **limited view and access** of the underlying resources created on their behalf; similarly, Helm Project Operator allows a Project Owner to say "I want this Helm chart deployed", but the underlying Operator is responsible for "provisioning" (deploying) the Helm chart and offering **limited view and access** of the underlying Kubernetes resources created on their behalf (based on configuring "least-privilege" Kubernetes RBAC for the Project Owners / Members in the newly created Project Release Namespace). +2. Dynamically-provisioned Persistent Volumes: in this model, a single resource (PersistentVolume) exists that allows you to specify a Storage Class that actually implements provisioning the underlying storage via a Storage Class Provisioner (e.g. Longhorn). Similarly, the ProjectHelmChart exists that allows you to specify a `spec.helmApiVersion` ("storage class") that actually implements deploying the underlying Helm chart via a Helm Project Operator (e.g. [`rancher/prometheus-federator`](https://github.com/rancher/prometheus-federator)). + +### Configuring the Helm release created by a ProjectHelmChart + +The `spec.values` of this ProjectHelmChart resources will correspond to the `values.yaml` override to be supplied to the underlying Helm chart deployed by the operator on the user's behalf; to see the underlying chart's `values.yaml` spec, either: +- View to the chart's definition located at [`rancher/helm-project-operator` under `charts/example-chart`](https://github.com/rancher/helm-project-operator/blob/main/charts/example-chart) (where the chart version will be tied to the version of this operator) +- Look for the ConfigMap named `dummy.cattle.io.v1alpha1` that is automatically created in each Project Registration Namespace, which will contain both the `values.yaml` and `questions.yaml` that was used to configure the chart (which was embedded directly into the `helm-project-operator` binary). + +### Namespaces + +All Helm Project Operators have three different classifications of namespaces that the operator looks out for: +1. **Operator / System Namespace**: this is the namespace that the operator is deployed into (e.g. `cattle-helm-system`). This namespace will contain all HelmCharts and HelmReleases for all ProjectHelmCharts watched by this operator. **Only Cluster Admins should have access to this namespace.** +2. **Project Registration Namespace (`cattle-project-`)**: this is the set of namespaces that the operator watches for ProjectHelmCharts within. The RoleBindings and ClusterRoleBindings that apply to this namespace will also be the source of truth for the auto-assigned RBAC created in the Project Release Namespace (see more details below). **Project Owners (admin), Project Members (edit), and Read-Only Members (view) should have access to this namespace**. +> Note: Project Registration Namespaces will be auto-generated by the operator and imported into the Project it is tied to if `.Values.global.cattle.projectLabel` is provided (which is set to `field.cattle.io/projectId` by default); this indicates that a Project Registration Namespace should be created by the operator if at least one namespace is observed with that label. The operator will not let these namespaces be deleted unless either all namespaces with that label are gone (e.g. this is the last namespace in that project, in which case the namespace will be marked with the label `"helm.cattle.io/helm-project-operator-orphaned": "true"`, which signals that it can be deleted) or it is no longer watching that project (because the project ID was provided under `.Values.helmProjectOperator.otherSystemProjectLabelValues`, which serves as a denylist for Projects). These namespaces will also never be auto-deleted to avoid destroying user data; it is recommended that users clean up these namespaces manually if desired on creating or deleting a project +> Note: if `.Values.global.cattle.projectLabel` is not provided, the Operator / System Namespace will also be the Project Registration Namespace +3. **Project Release Namespace (`cattle-project--dummy`)**: this is the set of namespaces that the operator deploys Helm charts within on behalf of a ProjectHelmChart; the operator will also automatically assign RBAC to Roles created in this namespace by the Helm charts based on bindings found in the Project Registration Namespace. **Only Cluster Admins should have access to this namespace; Project Owners (admin), Project Members (edit), and Read-Only Members (view) will be assigned limited access to this namespace by the deployed Helm Chart and Helm Project Operator.** +> Note: Project Release Namespaces are automatically deployed and imported into the project whose ID is specified under `.Values.helmProjectOperator.projectReleaseNamespaces.labelValue` (which defaults to the value of `.Values.global.cattle.systemProjectId` if not specified) whenever a ProjectHelmChart is specified in a Project Registration Namespace +> Note: Project Release Namespaces follow the same orphaning conventions as Project Registration Namespaces (see note above) +> Note: if `.Values.projectReleaseNamespaces.enabled` is false, the Project Release Namespace will be the same as the Project Registration Namespace + +### Helm Resources (HelmChart, HelmRelease) + +On deploying a ProjectHelmChart, the Helm Project Operator will automatically create and manage two child custom resources that manage the underlying Helm resources in turn: +- A HelmChart CR (managed via an embedded [k3s-io/helm-contoller](https://github.com/k3s-io/helm-controller) in the operator): this custom resource automatically creates a Job in the same namespace that triggers a `helm install`, `helm upgrade`, or `helm uninstall` depending on the change applied to the HelmChart CR; this CR is automatically updated on changes to the ProjectHelmChart (e.g. modifying the values.yaml) or changes to the underlying Project definition (e.g. adding or removing namespaces from a project). +> **Important Note: If a ProjectHelmChart is not deploying or updating the underlying Project Monitoring Stack for some reason, the Job created by this resource in the Operator / System namespace should be the first place you check to see if there's something wrong with the Helm operation; however, this is generally only accessible by a Cluster Admin.** +- A HelmRelease CR (managed via an embedded [rancher/helm-locker](https://github.com/rancher/helm-locker) in the operator): this custom resource automatically locks a deployed Helm release in place and automatically overwrites updates to underlying resources unless the change happens via a Helm operation (`helm install`, `helm upgrade`, or `helm uninstall` performed by the HelmChart CR). +> Note: HelmRelease CRs emit Kubernetes Events that detect when an underlying Helm release is being modified and locks it back to place; to view these events, you can use `kubectl describe helmrelease -n `; you can also view the logs on this operator to see when changes are detected and which resources were attempted to be modified + +Both of these resources are created for all Helm charts in the Operator / System namespaces to avoid escalation of privileges to underprivileged users. + +### RBAC + +As described in the section on namespaces above, Helm Project Operator expects that Project Owners, Project Members, and other users in the cluster with Project-level permissions (e.g. permissions in a certain set of namespaces identified by a single label selector) have minimal permissions in any namespaces except the Project Registration Namespace (which is imported into the project by default) and those that already comprise their projects. Therefore, in order to allow Project Owners to assign specific chart permissions to other users in their Project namespaces, the Helm Project Operator will automatically watch the following bindings: +- ClusterRoleBindings +- RoleBindings in the Project Release Namespace + +On observing a change to one of those types of bindings, the Helm Project Operator will check whether the `roleRef` that the the binding points to matches a ClusterRole with the name provided under `helmProjectOperator.releaseRoleBindings.clusterRoleRefs.admin`, `helmProjectOperator.releaseRoleBindings.clusterRoleRefs.edit`, or `helmProjectOperator.releaseRoleBindings.clusterRoleRefs.view`; by default, these roleRefs correspond will correspond to `admin`, `edit`, and `view` respectively, which are the [default Kubernetes user-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles). + +> Note: for Rancher RBAC users, these [default Kubernetes user-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) directly correlate to the `Project Owner`, `Project Member`, and `Read-Only` default Project Role Templates. + +If the `roleRef` matches, the Helm Project Operator will filter the `subjects` of the binding for all Users and Groups and use that to automatically construct a RoleBinding for each Role in the Project Release Namespace with the same name as the role and the following labels: +- `helm.cattle.io/project-helm-chart-role: {{ .Release.Name }}` +- `helm.cattle.io/project-helm-chart-role-aggregate-from: ` + +By default, the `example-chart` (the underlying chart deployed by Helm Project Operator) does not create any default roles; however, if a Cluster Admin would like to assign additional permissions to certain users, they can either directly assign RoleBindings in the Project Release Namespace to certain users or created Roles with the above two labels on them to allow Project Owners to control assigning those RBAC roles to users in their Project Registration namespaces. + +### Advanced Helm Project Operator Configuration + +|Value|Configuration| +|---|---------------------------| +|`valuesOverride`| Allows an Operator to override values that are set on each ProjectHelmChart deployment on an operator-level; user-provided options (specified on the `spec.values` of the ProjectHelmChart) are automatically overridden if operator-level values are provided. For an exmaple, see how the default value overrides `federate.targets` (note: when overriding list values like `federate.targets`, user-provided list values will **not** be concatenated) | +|`projectReleaseNamespaces.labelValues`| The value of the Project that all Project Release Namespaces should be auto-imported into (via label and annotation). Not recommended to be overridden on a Rancher setup. | +|`otherSystemProjectLabelValues`| Other namespaces that the operator should treat as a system namespace that should not be monitored. By default, all namespaces that match `global.cattle.systemProjectId` will not be matched. `kube-system` is explicitly marked as a system namespace as well, regardless of label or annotation. | +|`releaseRoleBindings.aggregate`| Whether to automatically create RBAC resources in Project Release namespaces +|`releaseRoleBindings.clusterRoleRefs.`| ClusterRoles to reference to discover subjects to create RoleBindings for in the Project Release Namespace for all corresponding Project Release Roles. See RBAC above for more information | +|`hardenedNamespaces.enabled`| Whether to automatically patch the default ServiceAccount with `automountServiceAccountToken: false` and create a default NetworkPolicy in all managed namespaces in the cluster; the default values ensure that the creation of the namespace does not break a CIS 1.16 hardened scan | +|`hardenedNamespaces.configuration`| The configuration to be supplied to the default ServiceAccount or auto-generated NetworkPolicy on managing a namespace | +|`helmController.enabled`| Whether to enable an embedded k3s-io/helm-controller instance within the Helm Project Operator. Should be disabled for RKE2 clusters since RKE2 clusters already run Helm Controller to manage internal Kubernetes components | +|`helmLocker.enabled`| Whether to enable an embedded rancher/helm-locker instance within the Helm Project Operator. | diff --git a/internal/helm-project-operator/charts/helm-project-operator/app-readme.md b/internal/helm-project-operator/charts/helm-project-operator/app-readme.md new file mode 100644 index 00000000..fd551467 --- /dev/null +++ b/internal/helm-project-operator/charts/helm-project-operator/app-readme.md @@ -0,0 +1,20 @@ +# Helm Project Operator + +This chart installs the example [Helm Project Operator](https://github.com/rancher/helm-project-operator) onto your cluster. + +## Upgrading to Kubernetes v1.25+ + +Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API. + +As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `global.cattle.psp.enabled` set to `false` if it has been previously set to `true`. +​ +> **Note:** +> In this chart release, any previous field that was associated with any PSP resources have been removed in favor of a single global field: `global.cattle.psp.enabled`. + ​ +> **Note:** +> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).** +> +> If your charts get stuck in this state, please consult the Rancher docs on how to clean up your Helm release secrets. +Upon setting `global.cattle.psp.enabled` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart. +​ +As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Rancher docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards. \ No newline at end of file diff --git a/internal/helm-project-operator/charts/helm-project-operator/questions.yaml b/internal/helm-project-operator/charts/helm-project-operator/questions.yaml new file mode 100644 index 00000000..054361a7 --- /dev/null +++ b/internal/helm-project-operator/charts/helm-project-operator/questions.yaml @@ -0,0 +1,43 @@ +questions: +- variable: global.cattle.psp.enabled + default: "false" + description: "Flag to enable or disable the installation of PodSecurityPolicies by this chart in the target cluster. If the cluster is running Kubernetes 1.25+, you must update this value to false." + label: "Enable PodSecurityPolicies" + type: boolean + group: "Security Settings" +- variable: helmController.enabled + label: Enable Embedded Helm Controller + description: 'Note: If you are running this chart in an RKE2 cluster, this should be disabled.' + type: boolean + group: Helm Controller +- variable: helmLocker.enabled + label: Enable Embedded Helm Locker + type: boolean + group: Helm Locker +- variable: projectReleaseNamespaces.labelValue + label: Project Release Namespace Project ID + description: By default, the System Project is selected. This can be overriden to a different Project (e.g. p-xxxxx) + type: string + required: false + group: Namespaces +- variable: releaseRoleBindings.clusterRoleRefs.admin + label: Admin ClusterRole + description: By default, admin selects Project Owners. This can be overridden to a different ClusterRole (e.g. rt-xxxxx) + type: string + default: admin + required: false + group: RBAC +- variable: releaseRoleBindings.clusterRoleRefs.edit + label: Edit ClusterRole + description: By default, edit selects Project Members. This can be overridden to a different ClusterRole (e.g. rt-xxxxx) + type: string + default: edit + required: false + group: RBAC +- variable: releaseRoleBindings.clusterRoleRefs.view + label: View ClusterRole + description: By default, view selects Read-Only users. This can be overridden to a different ClusterRole (e.g. rt-xxxxx) + type: string + default: view + required: false + group: RBAC diff --git a/internal/helm-project-operator/charts/helm-project-operator/templates/NOTES.txt b/internal/helm-project-operator/charts/helm-project-operator/templates/NOTES.txt new file mode 100644 index 00000000..32baeebc --- /dev/null +++ b/internal/helm-project-operator/charts/helm-project-operator/templates/NOTES.txt @@ -0,0 +1,2 @@ +{{ $.Chart.Name }} has been installed. Check its status by running: + kubectl --namespace {{ template "helm-project-operator.namespace" . }} get pods -l "release={{ $.Release.Name }}" diff --git a/internal/helm-project-operator/charts/helm-project-operator/templates/_helpers.tpl b/internal/helm-project-operator/charts/helm-project-operator/templates/_helpers.tpl new file mode 100644 index 00000000..97dd6b36 --- /dev/null +++ b/internal/helm-project-operator/charts/helm-project-operator/templates/_helpers.tpl @@ -0,0 +1,66 @@ +# Rancher +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# Helm Project Operator + +{{/* vim: set filetype=mustache: */}} +{{/* Expand the name of the chart. This is suffixed with -alertmanager, which means subtract 13 from longest 63 available */}} +{{- define "helm-project-operator.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 50 | trimSuffix "-" -}} +{{- end }} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "helm-project-operator.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{/* Create chart name and version as used by the chart label. */}} +{{- define "helm-project-operator.chartref" -}} +{{- replace "+" "_" .Chart.Version | printf "%s-%s" .Chart.Name -}} +{{- end }} + +{{/* Generate basic labels */}} +{{- define "helm-project-operator.labels" -}} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/version: "{{ replace "+" "_" .Chart.Version }}" +app.kubernetes.io/part-of: {{ template "helm-project-operator.name" . }} +chart: {{ template "helm-project-operator.chartref" . }} +release: {{ $.Release.Name | quote }} +heritage: {{ $.Release.Service | quote }} +{{- if .Values.commonLabels}} +{{ toYaml .Values.commonLabels }} +{{- end }} +{{- end -}} diff --git a/internal/helm-project-operator/charts/helm-project-operator/templates/cleanup.yaml b/internal/helm-project-operator/charts/helm-project-operator/templates/cleanup.yaml new file mode 100644 index 00000000..98675642 --- /dev/null +++ b/internal/helm-project-operator/charts/helm-project-operator/templates/cleanup.yaml @@ -0,0 +1,82 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "helm-project-operator.name" . }}-cleanup + namespace: {{ template "helm-project-operator.namespace" . }} + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} + app: {{ template "helm-project-operator.name" . }} + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-delete-policy": before-hook-creation, hook-succeeded, hook-failed +spec: + template: + metadata: + name: {{ template "helm-project-operator.name" . }}-cleanup + labels: {{ include "helm-project-operator.labels" . | nindent 8 }} + app: {{ template "helm-project-operator.name" . }} + spec: + serviceAccountName: {{ template "helm-project-operator.name" . }} +{{- if .Values.cleanup.securityContext }} + securityContext: {{ toYaml .Values.cleanup.securityContext | nindent 8 }} +{{- end }} + initContainers: + - name: add-cleanup-annotations + image: {{ template "system_default_registry" . }}{{ .Values.cleanup.image.repository }}:{{ .Values.cleanup.image.tag }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + command: + - /bin/sh + - -c + - > + echo "Labeling all ProjectHelmCharts with helm.cattle.io/helm-project-operator-cleanup=true"; + EXPECTED_HELM_API_VERSION={{ .Values.helmApiVersion }}; + IFS=$'\n'; + for namespace in $(kubectl get namespaces -l helm.cattle.io/helm-project-operated=true --no-headers -o=custom-columns=NAME:.metadata.name); do + for projectHelmChartAndHelmApiVersion in $(kubectl get projecthelmcharts -n ${namespace} --no-headers -o=custom-columns=NAME:.metadata.name,HELMAPIVERSION:.spec.helmApiVersion); do + projectHelmChartAndHelmApiVersion=$(echo ${projectHelmChartAndHelmApiVersion} | xargs); + projectHelmChart=$(echo ${projectHelmChartAndHelmApiVersion} | cut -d' ' -f1); + helmApiVersion=$(echo ${projectHelmChartAndHelmApiVersion} | cut -d' ' -f2); + if [[ ${helmApiVersion} != ${EXPECTED_HELM_API_VERSION} ]]; then + echo "Skipping marking ${namespace}/${projectHelmChart} with cleanup annotation since spec.helmApiVersion: ${helmApiVersion} is not ${EXPECTED_HELM_API_VERSION}"; + continue; + fi; + kubectl label projecthelmcharts -n ${namespace} ${projectHelmChart} helm.cattle.io/helm-project-operator-cleanup=true --overwrite; + done; + done; +{{- if .Values.cleanup.resources }} + resources: {{ toYaml .Values.cleanup.resources | nindent 12 }} +{{- end }} +{{- if .Values.cleanup.containerSecurityContext }} + securityContext: {{ toYaml .Values.cleanup.containerSecurityContext | nindent 12 }} +{{- end }} + containers: + - name: ensure-subresources-deleted + image: {{ template "system_default_registry" . }}{{ .Values.cleanup.image.repository }}:{{ .Values.cleanup.image.tag }} + imagePullPolicy: IfNotPresent + command: + - /bin/sh + - -c + - > + SYSTEM_NAMESPACE={{ .Release.Namespace }} + EXPECTED_HELM_API_VERSION={{ .Values.helmApiVersion }}; + HELM_API_VERSION_TRUNCATED=$(echo ${EXPECTED_HELM_API_VERSION} | cut -d'/' -f0); + echo "Ensuring HelmCharts and HelmReleases are deleted from ${SYSTEM_NAMESPACE}..."; + while [[ "$(kubectl get helmcharts,helmreleases -l helm.cattle.io/helm-api-version=${HELM_API_VERSION_TRUNCATED} -n ${SYSTEM_NAMESPACE} 2>&1)" != "No resources found in ${SYSTEM_NAMESPACE} namespace." ]]; do + echo "waiting for HelmCharts and HelmReleases to be deleted from ${SYSTEM_NAMESPACE}... sleeping 3 seconds"; + sleep 3; + done; + echo "Successfully deleted all HelmCharts and HelmReleases in ${SYSTEM_NAMESPACE}!"; +{{- if .Values.cleanup.resources }} + resources: {{ toYaml .Values.cleanup.resources | nindent 12 }} +{{- end }} +{{- if .Values.cleanup.containerSecurityContext }} + securityContext: {{ toYaml .Values.cleanup.containerSecurityContext | nindent 12 }} +{{- end }} + restartPolicy: OnFailure + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} + {{- if .Values.cleanup.nodeSelector }} + {{- toYaml .Values.cleanup.nodeSelector | nindent 8 }} + {{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} + {{- if .Values.cleanup.tolerations }} + {{- toYaml .Values.cleanup.tolerations | nindent 8 }} + {{- end }} diff --git a/internal/helm-project-operator/charts/helm-project-operator/templates/clusterrole.yaml b/internal/helm-project-operator/charts/helm-project-operator/templates/clusterrole.yaml new file mode 100644 index 00000000..60ed263b --- /dev/null +++ b/internal/helm-project-operator/charts/helm-project-operator/templates/clusterrole.yaml @@ -0,0 +1,57 @@ +{{- if and .Values.global.rbac.create .Values.global.rbac.userRoles.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "helm-project-operator.name" . }}-admin + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} + {{- if .Values.global.rbac.userRoles.aggregateToDefaultRoles }} + rbac.authorization.k8s.io/aggregate-to-admin: "true" + {{- end }} +rules: +- apiGroups: + - helm.cattle.io + resources: + - projecthelmcharts + - projecthelmcharts/finalizers + - projecthelmcharts/status + verbs: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "helm-project-operator.name" . }}-edit + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} + {{- if .Values.global.rbac.userRoles.aggregateToDefaultRoles }} + rbac.authorization.k8s.io/aggregate-to-edit: "true" + {{- end }} +rules: +- apiGroups: + - helm.cattle.io + resources: + - projecthelmcharts + - projecthelmcharts/status + verbs: + - 'get' + - 'list' + - 'watch' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "helm-project-operator.name" . }}-view + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} + {{- if .Values.global.rbac.userRoles.aggregateToDefaultRoles }} + rbac.authorization.k8s.io/aggregate-to-view: "true" + {{- end }} +rules: +- apiGroups: + - helm.cattle.io + resources: + - projecthelmcharts + - projecthelmcharts/status + verbs: + - 'get' + - 'list' + - 'watch' +{{- end }} diff --git a/internal/helm-project-operator/charts/helm-project-operator/templates/configmap.yaml b/internal/helm-project-operator/charts/helm-project-operator/templates/configmap.yaml new file mode 100644 index 00000000..d4def157 --- /dev/null +++ b/internal/helm-project-operator/charts/helm-project-operator/templates/configmap.yaml @@ -0,0 +1,14 @@ +## Note: If you add another entry to this ConfigMap, make sure a corresponding env var is set +## in the deployment of the operator to ensure that a Helm upgrade will force the operator +## to reload the values in the ConfigMap and redeploy +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "helm-project-operator.name" . }}-config + namespace: {{ template "helm-project-operator.namespace" . }} + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} +data: + hardened.yaml: |- +{{ .Values.hardenedNamespaces.configuration | toYaml | indent 4 }} + values.yaml: |- +{{ .Values.valuesOverride | toYaml | indent 4 }} diff --git a/internal/helm-project-operator/charts/helm-project-operator/templates/deployment.yaml b/internal/helm-project-operator/charts/helm-project-operator/templates/deployment.yaml new file mode 100644 index 00000000..33b81e72 --- /dev/null +++ b/internal/helm-project-operator/charts/helm-project-operator/templates/deployment.yaml @@ -0,0 +1,126 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "helm-project-operator.name" . }} + namespace: {{ template "helm-project-operator.namespace" . }} + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} + app: {{ template "helm-project-operator.name" . }} +spec: + {{- if .Values.replicas }} + replicas: {{ .Values.replicas }} + {{- end }} + selector: + matchLabels: + app: {{ template "helm-project-operator.name" . }} + release: {{ $.Release.Name | quote }} + template: + metadata: + labels: {{ include "helm-project-operator.labels" . | nindent 8 }} + app: {{ template "helm-project-operator.name" . }} + spec: + containers: + - name: {{ template "helm-project-operator.name" . }} + image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + args: + - {{ template "helm-project-operator.name" . }} + - --namespace={{ template "helm-project-operator.namespace" . }} + - --controller-name={{ template "helm-project-operator.name" . }} + - --values-override-file=/etc/helmprojectoperator/config/values.yaml +{{- if .Values.global.cattle.systemDefaultRegistry }} + - --system-default-registry={{ .Values.global.cattle.systemDefaultRegistry }} +{{- end }} +{{- if .Values.global.cattle.url }} + - --cattle-url={{ .Values.global.cattle.url }} +{{- end }} +{{- if .Values.global.cattle.projectLabel }} + - --project-label={{ .Values.global.cattle.projectLabel }} +{{- end }} +{{- if not .Values.projectReleaseNamespaces.enabled }} + - --system-project-label-values={{ join "," (append .Values.otherSystemProjectLabelValues .Values.global.cattle.systemProjectId) }} +{{- else if and (ne (len .Values.global.cattle.systemProjectId) 0) (ne (len .Values.projectReleaseNamespaces.labelValue) 0) (ne .Values.projectReleaseNamespaces.labelValue .Values.global.cattle.systemProjectId) }} + - --system-project-label-values={{ join "," (append .Values.otherSystemProjectLabelValues .Values.global.cattle.systemProjectId) }} +{{- else if len .Values.otherSystemProjectLabelValues }} + - --system-project-label-values={{ join "," .Values.otherSystemProjectLabelValues }} +{{- end }} +{{- if .Values.projectReleaseNamespaces.enabled }} +{{- if .Values.projectReleaseNamespaces.labelValue }} + - --project-release-label-value={{ .Values.projectReleaseNamespaces.labelValue }} +{{- else if .Values.global.cattle.systemProjectId }} + - --project-release-label-value={{ .Values.global.cattle.systemProjectId }} +{{- end }} +{{- end }} +{{- if .Values.global.cattle.clusterId }} + - --cluster-id={{ .Values.global.cattle.clusterId }} +{{- end }} +{{- if .Values.releaseRoleBindings.aggregate }} +{{- if .Values.releaseRoleBindings.clusterRoleRefs }} +{{- if .Values.releaseRoleBindings.clusterRoleRefs.admin }} + - --admin-cluster-role={{ .Values.releaseRoleBindings.clusterRoleRefs.admin }} +{{- end }} +{{- if .Values.releaseRoleBindings.clusterRoleRefs.edit }} + - --edit-cluster-role={{ .Values.releaseRoleBindings.clusterRoleRefs.edit }} +{{- end }} +{{- if .Values.releaseRoleBindings.clusterRoleRefs.view }} + - --view-cluster-role={{ .Values.releaseRoleBindings.clusterRoleRefs.view }} +{{- end }} +{{- end }} +{{- end }} +{{- if .Values.hardenedNamespaces.enabled }} + - --hardening-options-file=/etc/helmprojectoperator/config/hardening.yaml +{{- else }} + - --disable-hardening +{{- end }} +{{- if .Values.debug }} + - --debug + - --debug-level={{ .Values.debugLevel }} +{{- end }} +{{- if not .Values.helmController.enabled }} + - --disable-embedded-helm-controller +{{- else }} + - --helm-job-image={{ template "system_default_registry" . }}{{ .Values.helmController.job.image.repository }}:{{ .Values.helmController.job.image.tag }} +{{- end }} +{{- if not .Values.helmLocker.enabled }} + - --disable-embedded-helm-locker +{{- end }} +{{- if .Values.additionalArgs }} +{{- toYaml .Values.additionalArgs | nindent 10 }} +{{- end }} + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + ## Note: The below two values only exist to force Helm to upgrade the deployment on + ## a change to the contents of the ConfigMap during an upgrade. Neither serve + ## any practical purpose and can be removed and replaced with a configmap reloader + ## in a future change if dynamic updates are required. + - name: HARDENING_OPTIONS_SHA_256_HASH + value: {{ .Values.hardenedNamespaces.configuration | toYaml | sha256sum }} + - name: VALUES_OVERRIDE_SHA_256_HASH + value: {{ .Values.valuesOverride | toYaml | sha256sum }} +{{- if .Values.resources }} + resources: {{ toYaml .Values.resources | nindent 12 }} +{{- end }} +{{- if .Values.containerSecurityContext }} + securityContext: {{ toYaml .Values.containerSecurityContext | nindent 12 }} +{{- end }} + volumeMounts: + - name: config + mountPath: "/etc/helmprojectoperator/config" + serviceAccountName: {{ template "helm-project-operator.name" . }} +{{- if .Values.securityContext }} + securityContext: {{ toYaml .Values.securityContext | nindent 8 }} +{{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.nodeSelector }} +{{- toYaml .Values.nodeSelector | nindent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.tolerations }} +{{- toYaml .Values.tolerations | nindent 8 }} +{{- end }} + volumes: + - name: config + configMap: + name: {{ template "helm-project-operator.name" . }}-config diff --git a/internal/helm-project-operator/charts/helm-project-operator/templates/psp.yaml b/internal/helm-project-operator/charts/helm-project-operator/templates/psp.yaml new file mode 100644 index 00000000..73dcc456 --- /dev/null +++ b/internal/helm-project-operator/charts/helm-project-operator/templates/psp.yaml @@ -0,0 +1,68 @@ +{{- if .Values.global.cattle.psp.enabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "helm-project-operator.name" . }}-psp + namespace: {{ template "helm-project-operator.namespace" . }} + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} + app: {{ template "helm-project-operator.name" . }} +{{- if .Values.global.rbac.pspAnnotations }} + annotations: {{ toYaml .Values.global.rbac.pspAnnotations | nindent 4 }} +{{- end }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Permits the container to run with root privileges as well. + rule: 'RunAsAny' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 0 + max: 65535 + readOnlyRootFilesystem: false +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "helm-project-operator.name" . }}-psp + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} + app: {{ template "helm-project-operator.name" . }} +rules: +{{- if semverCompare "> 1.15.0-0" .Capabilities.KubeVersion.GitVersion }} +- apiGroups: ['policy'] +{{- else }} +- apiGroups: ['extensions'] +{{- end }} + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "helm-project-operator.name" . }}-psp +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "helm-project-operator.name" . }}-psp + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} + app: {{ template "helm-project-operator.name" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "helm-project-operator.name" . }}-psp +subjects: + - kind: ServiceAccount + name: {{ template "helm-project-operator.name" . }} + namespace: {{ template "helm-project-operator.namespace" . }} +{{- end }} diff --git a/internal/helm-project-operator/charts/helm-project-operator/templates/rbac.yaml b/internal/helm-project-operator/charts/helm-project-operator/templates/rbac.yaml new file mode 100644 index 00000000..b1c40920 --- /dev/null +++ b/internal/helm-project-operator/charts/helm-project-operator/templates/rbac.yaml @@ -0,0 +1,32 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "helm-project-operator.name" . }} + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} + app: {{ template "helm-project-operator.name" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: "cluster-admin" # see note below +subjects: +- kind: ServiceAccount + name: {{ template "helm-project-operator.name" . }} + namespace: {{ template "helm-project-operator.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "helm-project-operator.name" . }} + namespace: {{ template "helm-project-operator.namespace" . }} + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} + app: {{ template "helm-project-operator.name" . }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }} +{{- end }} +# --- +# NOTE: +# As of now, due to the fact that the k3s-io/helm-controller can only deploy jobs that are cluster-bound to the cluster-admin +# ClusterRole, the only way for this operator to be able to perform that binding is if it is also bound to the cluster-admin ClusterRole. +# +# As a result, this ClusterRoleBinding will be left as a work-in-progress until changes are made in k3s-io/helm-controller to allow us to grant +# only scoped down permissions to the Job that is deployed. diff --git a/internal/helm-project-operator/charts/helm-project-operator/templates/system-namespaces-configmap.yaml b/internal/helm-project-operator/charts/helm-project-operator/templates/system-namespaces-configmap.yaml new file mode 100644 index 00000000..f4c85254 --- /dev/null +++ b/internal/helm-project-operator/charts/helm-project-operator/templates/system-namespaces-configmap.yaml @@ -0,0 +1,62 @@ +{{- if .Values.systemNamespacesConfigMap.create }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "helm-project-operator.name" . }}-system-namespaces + namespace: {{ template "helm-project-operator.namespace" . }} + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} +data: + system-namespaces.json: |- + { +{{- if .Values.projectReleaseNamespaces.enabled }} +{{- if .Values.projectReleaseNamespaces.labelValue }} + "projectReleaseLabelValue": {{ .Values.projectReleaseNamespaces.labelValue | quote }}, +{{- else if .Values.global.cattle.systemProjectId }} + "projectReleaseLabelValue": {{ .Values.global.cattle.systemProjectId | quote }}, +{{- else }} + "projectReleaseLabelValue": "", +{{- end }} +{{- else }} + "projectReleaseLabelValue": "", +{{- end }} +{{- if not .Values.projectReleaseNamespaces.enabled }} + "systemProjectLabelValues": {{ append .Values.otherSystemProjectLabelValues .Values.global.cattle.systemProjectId | toJson }} +{{- else if and (ne (len .Values.global.cattle.systemProjectId) 0) (ne (len .Values.projectReleaseNamespaces.labelValue) 0) (ne .Values.projectReleaseNamespaces.labelValue .Values.global.cattle.systemProjectId) }} + "systemProjectLabelValues": {{ append .Values.otherSystemProjectLabelValues .Values.global.cattle.systemProjectId | toJson }} +{{- else if len .Values.otherSystemProjectLabelValues }} + "systemProjectLabelValues": {{ .Values.otherSystemProjectLabelValues | toJson }} +{{- else }} + "systemProjectLabelValues": [] +{{- end }} + } +--- +{{- if (and .Values.systemNamespacesConfigMap.rbac.enabled .Values.systemNamespacesConfigMap.rbac.subjects) }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "helm-project-operator.name" . }}-system-namespaces + namespace: {{ template "helm-project-operator.namespace" . }} + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} +rules: +- apiGroups: + - "" + resources: + - configmaps + resourceNames: + - "{{ template "helm-project-operator.name" . }}-system-namespaces" + verbs: + - 'get' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "helm-project-operator.name" . }}-system-namespaces + namespace: {{ template "helm-project-operator.namespace" . }} + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "helm-project-operator.name" . }}-system-namespaces +subjects: {{ .Values.systemNamespacesConfigMap.rbac.subjects | toYaml | nindent 2 }} +{{- end }} +{{- end }} diff --git a/internal/helm-project-operator/charts/helm-project-operator/templates/validate-psp-install.yaml b/internal/helm-project-operator/charts/helm-project-operator/templates/validate-psp-install.yaml new file mode 100644 index 00000000..a30c59d3 --- /dev/null +++ b/internal/helm-project-operator/charts/helm-project-operator/templates/validate-psp-install.yaml @@ -0,0 +1,7 @@ +#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}} +#{{- if .Values.global.cattle.psp.enabled }} +#{{- if not (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} +#{{- fail "The target cluster does not have the PodSecurityPolicy API resource. Please disable PSPs in this chart before proceeding." -}} +#{{- end }} +#{{- end }} +#{{- end }} diff --git a/internal/helm-project-operator/charts/helm-project-operator/values.yaml b/internal/helm-project-operator/charts/helm-project-operator/values.yaml new file mode 100644 index 00000000..63fae45a --- /dev/null +++ b/internal/helm-project-operator/charts/helm-project-operator/values.yaml @@ -0,0 +1,228 @@ +# Default values for helm-project-operator. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Helm Project Operator Configuration + +global: + cattle: + clusterId: "" + psp: + enabled: false + projectLabel: field.cattle.io/projectId + systemDefaultRegistry: "" + systemProjectId: "" + url: "" + rbac: + ## Create RBAC resources for ServiceAccounts and users + ## + create: true + + userRoles: + ## Create default user ClusterRoles to allow users to interact with ProjectHelmCharts + create: true + ## Aggregate default user ClusterRoles into default k8s ClusterRoles + aggregateToDefaultRoles: true + + pspAnnotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Reference to one or more secrets to be used when pulling images + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + imagePullSecrets: [] + # - name: "image-pull-secret" + +helmApiVersion: dummy.cattle.io/v1alpha1 + +## valuesOverride overrides values that are set on each ProjectHelmChart deployment on an operator-level +## User-provided values will be overwritten based on the values provided here +valuesOverride: {} + +## projectReleaseNamespaces are auto-generated namespaces that are created to host Helm Releases +## managed by this operator on behalf of a ProjectHelmChart +projectReleaseNamespaces: + ## Enabled determines whether Project Release Namespaces should be created. If false, the underlying + ## Helm release will be deployed in the Project Registration Namespace + enabled: true + ## labelValue is the value of the Project that the projectReleaseNamespace should be created within + ## If empty, this will be set to the value of global.cattle.systemProjectId + ## If global.cattle.systemProjectId is also empty, project release namespaces will be disabled + labelValue: "" + +## otherSystemProjectLabelValues are project labels that identify namespaces as those that should be treated as system projects +## i.e. they will be entirely ignored by the operator +## By default, the global.cattle.systemProjectId will be in this list +otherSystemProjectLabelValues: [] + +## releaseRoleBindings configures RoleBindings automatically created by the Helm Project Operator +## in Project Release Namespaces where underlying Helm charts are deployed +releaseRoleBindings: + ## aggregate enables creating these RoleBindings off aggregating RoleBindings in the + ## Project Registration Namespace or ClusterRoleBindings that bind users to the ClusterRoles + ## specified under clusterRoleRefs + aggregate: true + + ## clusterRoleRefs are the ClusterRoles whose RoleBinding or ClusterRoleBindings should determine + ## the RoleBindings created in the Project Release Namespace + ## + ## By default, these are set to create RoleBindings based on the RoleBindings / ClusterRoleBindings + ## attached to the default K8s user-facing ClusterRoles of admin, edit, and view. + ## ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles + ## + clusterRoleRefs: + admin: admin + edit: edit + view: view + +hardenedNamespaces: + # Whether to automatically manage the configuration of the default ServiceAccount and + # auto-create a NetworkPolicy for each namespace created by this operator + enabled: true + + configuration: + # Values to be applied to each default ServiceAccount created in a managed namespace + serviceAccountSpec: + secrets: [] + imagePullSecrets: [] + automountServiceAccountToken: false + # Values to be applied to each default generated NetworkPolicy created in a managed namespace + networkPolicySpec: + podSelector: {} + egress: [] + ingress: [] + policyTypes: ["Ingress", "Egress"] + +## systemNamespacesConfigMap is a ConfigMap created to allow users to see valid entries +## for registering a ProjectHelmChart for a given Project on the Rancher Dashboard UI. +## It does not need to be enabled for a non-Rancher use case. +systemNamespacesConfigMap: + ## Create indicates whether the system namespaces configmap should be created + ## This is a required value for integration with Rancher Dashboard + create: true + + ## RBAC provides options around the RBAC created to allow users to be able to view + ## the systemNamespacesConfigMap; if not specified, only users with the ability to + ## view ConfigMaps in the namespace where this chart is deployed will be able to + ## properly view the system namespaces on the Rancher Dashboard UI + rbac: + ## enabled indicates that we should deploy a RoleBinding and Role to view this ConfigMap + enabled: true + ## subjects are the subjects that should be bound to this default RoleBinding + ## By default, we allow anyone who is authenticated to the system to be able to view + ## this ConfigMap in the deployment namespace + subjects: + - kind: Group + name: system:authenticated + +nameOverride: "" + +namespaceOverride: "" + +replicas: 1 + +image: + repository: rancher/helm-project-operator + tag: v0.2.1 + pullPolicy: IfNotPresent + +helmController: + # Note: should be disabled for RKE2 clusters since they already run Helm Controller to manage internal Kubernetes components + enabled: true + + job: + image: + repository: rancher/klipper-helm + tag: v0.7.0-build20220315 + +helmLocker: + enabled: true + +# Additional arguments to be passed into the Helm Project Operator image +additionalArgs: [] + +## Define which Nodes the Pods are scheduled on. +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for use with node taints +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +# - key: "key" +# operator: "Equal" +# value: "value" +# effect: "NoSchedule" + +resources: {} + # limits: + # memory: 500Mi + # cpu: 1000m + # requests: + # memory: 100Mi + # cpu: 100m + +containerSecurityContext: {} + # allowPrivilegeEscalation: false + # capabilities: + # drop: + # - ALL + # privileged: false + # readOnlyRootFilesystem: true + +securityContext: {} + # runAsGroup: 1000 + # runAsUser: 1000 + # supplementalGroups: + # - 1000 + +debug: false +debugLevel: 0 + +cleanup: + image: + repository: rancher/shell + tag: v0.1.19 + pullPolicy: IfNotPresent + + ## Define which Nodes the Pods are scheduled on. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for use with node taints + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + + containerSecurityContext: {} + # allowPrivilegeEscalation: false + # capabilities: + # drop: + # - ALL + # privileged: false + # readOnlyRootFilesystem: true + + securityContext: + runAsNonRoot: false + runAsUser: 0 + + resources: {} + # limits: + # memory: 500Mi + # cpu: 1000m + # requests: + # memory: 100Mi + # cpu: 100m diff --git a/internal/helm-project-operator/e2e_suite_test.go b/internal/helm-project-operator/e2e_suite_test.go new file mode 100644 index 00000000..1a376005 --- /dev/null +++ b/internal/helm-project-operator/e2e_suite_test.go @@ -0,0 +1,93 @@ +package main_test + +import ( + "context" + "errors" + "os" + "testing" + "time" + + k3shelmv1 "github.com/k3s-io/helm-controller/pkg/apis/helm.cattle.io/v1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + lockerv1alpha1 "github.com/rancher/helm-locker/pkg/apis/helm.cattle.io/v1alpha1" + v1alpha1 "github.com/rancher/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1" + "github.com/rancher/wrangler/v3/pkg/kubeconfig" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + + env "github.com/caarlos0/env/v11" + "github.com/kralicky/kmatch" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" +) + +func TestE2e(t *testing.T) { + SetDefaultEventuallyTimeout(60 * time.Second) + SetDefaultEventuallyPollingInterval(50 * time.Millisecond) + SetDefaultConsistentlyDuration(1 * time.Second) + SetDefaultConsistentlyPollingInterval(50 * time.Millisecond) + RegisterFailHandler(Fail) + RunSpecs(t, "E2e Suite") +} + +var ( + k8sClient client.Client + cfg *rest.Config + testCtx context.Context + clientSet *kubernetes.Clientset + + clientC clientcmd.ClientConfig +) + +type TestSpec struct { + Kubeconfig string `env:"KUBECONFIG,required"` + // HpoImage string `env:"IMAGE,required"` +} + +func (t *TestSpec) Validate() error { + var errs []error + // if _, err := dockerparser.Parse(t.HpoImage); err != nil { + // errs = append(errs, err) + // } + if _, err := os.Stat(t.Kubeconfig); err != nil { + errs = append(errs, err) + } + + if len(errs) > 0 { + return errors.Join(errs...) + } + return nil +} + +var _ = BeforeSuite(func() { + ts := TestSpec{} + Expect(env.Parse(&ts)).To(Succeed(), "Could not parse test spec from environment variables") + Expect(ts.Validate()).To(Succeed(), "Invalid input e2e test spec") + + ctxCa, ca := context.WithCancel(context.Background()) + DeferCleanup(func() { + ca() + }) + + niCfg := kubeconfig.GetNonInteractiveClientConfig(ts.Kubeconfig) + restConfig, err := niCfg.ClientConfig() + Expect(err).To(Succeed()) + testCtx = ctxCa + newCfg, err := config.GetConfig() + cfg = newCfg + Expect(err).NotTo(HaveOccurred(), "Could not initialize kubernetes client config") + newClientset, err := kubernetes.NewForConfig(restConfig) + Expect(err).To(Succeed(), "Could not initialize kubernetes clientset") + clientSet = newClientset + + newK8sClient, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred(), "Could not initialize kubernetes client") + k8sClient = newK8sClient + v1alpha1.AddToScheme(k8sClient.Scheme()) + k3shelmv1.AddToScheme(k8sClient.Scheme()) + lockerv1alpha1.AddToScheme(k8sClient.Scheme()) + kmatch.SetDefaultObjectClient(k8sClient) +}) diff --git a/internal/helm-project-operator/e2e_test.go b/internal/helm-project-operator/e2e_test.go new file mode 100644 index 00000000..b29dd198 --- /dev/null +++ b/internal/helm-project-operator/e2e_test.go @@ -0,0 +1,450 @@ +package main_test + +import ( + "context" + "errors" + "fmt" + "os/exec" + "strings" + "time" + + . "github.com/kralicky/kmatch" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "golang.org/x/mod/semver" + + corev1 "k8s.io/api/core/v1" + // "sigs.k8s.io/controller-runtime/pkg/client" + k3shelmv1 "github.com/k3s-io/helm-controller/pkg/apis/helm.cattle.io/v1" + lockerv1alpha1 "github.com/rancher/helm-locker/pkg/apis/helm.cattle.io/v1alpha1" + v1alpha1 "github.com/rancher/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1" + "github.com/rancher/helm-project-operator/pkg/controllers/common" + "github.com/rancher/helm-project-operator/pkg/operator" + "github.com/rancher/helm-project-operator/pkg/test" + appsv1 "k8s.io/api/apps/v1" + + // "github.com/rancher/helm-project-operator/pkg/controllers/common" + // "github.com/rancher/helm-project-operator/pkg/operator" + // "github.com/rancher/helm-project-operator/pkg/test" + batchv1 "k8s.io/api/batch/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/discovery" +) + +var ( + // could be improved to be read from the values.yaml possibly + cfgName = strings.ReplaceAll("dummy.cattle.io/v1alpha1", "/", ".") +) + +// hardcoded labels / annotations / values +const ( + // TODO : this will be subject to change as I update the code + + labelProjectId = "field.cattle.io/projectId" + annoProjectId = "field.cattle.io/projectId" + + labelHelmProj = "helm.cattle.io/projectId" + labelOperatedByHelmProj = "helm.cattle.io/helm-project-operated" +) + +// test constants +const ( + // opaque project name + testProjectName = "p-example" + // opaque name give to our project helm chart CR + testPHCName = "project-example-chart" + // install namespace of the chart + chartNs = "cattle-helm-system" + // comes from dummy.go common.OperatorOptions + releaseName = "dummy" +) + +const ( + // DummyHelmAPIVersion is the spec.helmApiVersion corresponding to the dummy example-chart + DummyHelmAPIVersion = "dummy.cattle.io/v1alpha1" + + // DummyReleaseName is the release name corresponding to the operator that deploys the dummy example-chart + DummyReleaseName = "dummy" +) + +func projectNamespace(project string) string { + return fmt.Sprintf("cattle-project-%s", project) +} + +type helmInstaller struct { + helmInstallOptions +} + +func (h *helmInstaller) build() (*exec.Cmd, error) { + if h.releaseName == "" { + return nil, errors.New("helm release name must be set") + } + if h.chartRegistry == "" { + return nil, errors.New("helm chart registry must be set") + } + args := []string{ + "upgrade", + "--install", + } + if h.createNamespace { + args = append(args, "--create-namespace") + } + if h.namespace != "" { + args = append(args, "-n", h.namespace) + } + args = append(args, h.releaseName) + for k, v := range h.values { + args = append(args, "--set", fmt.Sprintf("%s=%s", k, v)) + } + args = append(args, h.chartRegistry) + GinkgoWriter.Print(strings.Join(append([]string{"helm"}, append(args, "\n")...), " ")) + cmd := exec.CommandContext(h.ctx, "helm", args...) + return cmd, nil +} + +func newHelmInstaller(opts ...helmInstallerOption) *helmInstaller { + h := &helmInstaller{ + helmInstallOptions: helmInstallerDefaultOptions(), + } + for _, opt := range opts { + opt(&h.helmInstallOptions) + } + return h + +} + +func helmInstallerDefaultOptions() helmInstallOptions { + return helmInstallOptions{ + ctx: context.Background(), + createNamespace: false, + namespace: "default", + releaseName: "helm-project-operator", + chartRegistry: "https://charts.helm.sh/stable", + values: make(map[string]string), + } +} + +type helmInstallOptions struct { + ctx context.Context + createNamespace bool + namespace string + releaseName string + chartRegistry string + values map[string]string +} + +type helmInstallerOption func(*helmInstallOptions) + +func WithContext(ctx context.Context) helmInstallerOption { + return func(h *helmInstallOptions) { + h.ctx = ctx + } +} + +func WithCreateNamespace() helmInstallerOption { + return func(h *helmInstallOptions) { + h.createNamespace = true + } +} + +func WithNamespace(namespace string) helmInstallerOption { + return func(h *helmInstallOptions) { + h.namespace = namespace + } +} + +func WithReleaseName(releaseName string) helmInstallerOption { + return func(h *helmInstallOptions) { + h.releaseName = releaseName + } +} + +func WithChartRegistry(chartRegistry string) helmInstallerOption { + return func(h *helmInstallOptions) { + h.chartRegistry = chartRegistry + } +} + +func WithValue(key string, value string) helmInstallerOption { + return func(h *helmInstallOptions) { + if _, ok := h.values[key]; ok { + panic("duplicate helm value set, likely uninteded behaviour") + } + h.values[key] = value + } +} + +var _ = Describe("E2E helm project operator tests", Ordered, Label("integration"), func() { + BeforeAll(func() { + By("checking the cluster server version info") + discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg) + Expect(err).To(Succeed(), "Failed to create discovery client") + serverVersion, err := discoveryClient.ServerVersion() + Expect(err).To(Succeed(), "Failed to get server version") + GinkgoWriter.Print( + fmt.Sprintf("Running e2e tests against Kubernetes distribution %s %s\n", + strings.TrimPrefix(semver.Build(serverVersion.GitVersion), "+"), + semver.MajorMinor(serverVersion.GitVersion), + ), + ) + }) + + When("We install the helm project operator", func() { + // TODO : we need to rework pkg/cli before refactoring the start of the operator. + // !! We need to be careful to rework them with parity with the way the older rancher/wrangler-cli works + // !! which is great but had a lot of default coercion between env variables, structs and other things... + // TODO : then we can run this "in-tree" instead of importing images and deploying a chart + + It("should install from the latest charts", func() { + go func() { + defer func() { + // recover from RunOrDie which will always cause a panic on os.Exit + r := recover() + if r != nil { + GinkgoWriter.Write([]byte(fmt.Sprintf("Recovered from panic: %v", r))) + } + }() + if false { + err := operator.Init(testCtx, "cattle-helm-system", clientC, common.Options{ + OperatorOptions: common.OperatorOptions{ + HelmAPIVersion: DummyHelmAPIVersion, + ReleaseName: DummyReleaseName, + SystemNamespaces: []string{"kube-system"}, + ChartContent: string(test.TestData("example-chart/example-chart.tgz.base64")), + Singleton: false, + }, + RuntimeOptions: common.RuntimeOptions{ + Namespace: "cattle-helm-system", + DisableEmbeddedHelmController: true, + }, + }) + if err != nil { + GinkgoWriter.Write([]byte("hello")) + } + } + }() + ctxT, ca := context.WithTimeout(testCtx, 5*time.Minute) + defer ca() + helmInstaller := newHelmInstaller( + WithContext(ctxT), + WithCreateNamespace(), + WithNamespace(chartNs), + WithReleaseName("helm-project-operator"), + WithChartRegistry("./charts/helm-project-operator"), + WithValue("image.repository", "rancher/helm-project-operator"), + WithValue("image.tag", "dev"), + WithValue("helmController.enabled", "true"), + ) + cmd, err := helmInstaller.build() + Expect(err).To(Succeed()) + session, err := StartCmd(cmd) + Expect(err).To(Succeed(), "helm install command failed") + err = session.Wait() + Expect(err).To(Succeed(), "helm install command failed to exit successfully") + }) + + It("Should create a helm project operator deployment", func() { + // Skip("implementation detail") + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "helm-project-operator", + Namespace: "cattle-helm-system", + }, + } + Eventually(Object(deploy)).Should(ExistAnd( + HaveMatchingContainer(And( + HaveName("helm-project-operator"), + HaveImage("rancher/helm-project-operator:dev"), + )), + )) + + Eventually( + Object(deploy), + time.Second*90, time.Millisecond*333, + ).Should(HaveSuccessfulRollout()) + + }) + + When("a project registration namespace is created", func() { + It("Should create the project registration namespace", func() { + By("creating the project registration namespace") + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "e2e-hpo", + Labels: map[string]string{ + // Note : this will be rejected by webhook if rancher/rancher is managing this cluster + labelProjectId: "p-example", + }, + Annotations: map[string]string{ + annoProjectId: fmt.Sprintf("local:%s", testProjectName), + }, + }, + } + err := k8sClient.Create(testCtx, ns) + exists := apierrors.IsAlreadyExists(err) + if !exists { + Expect(err).To(Succeed(), "Failed to create project registration namespace") + } + Eventually(Object(ns)).Should(Exist()) + + By("verifying the helm project namespace has been created by the controller") + projNs := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: projectNamespace(testProjectName), + }, + } + Eventually(Object(projNs), 60*time.Second).Should(ExistAnd( + HaveLabels( + labelProjectId, testProjectName, + labelOperatedByHelmProj, "true", + labelHelmProj, testProjectName, + ), + HaveAnnotations( + labelProjectId, testProjectName, + ), + )) + + By("verifying the helm project operator has created the helm api configmap") + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cfgName, + Namespace: projectNamespace(testProjectName), + }, + } + Eventually(Object(configMap)).Should(Exist()) + }) + }) + + When("We create a ProjectHelmChart", func() { + It("should create the project-helm-chart object", func() { + projH := v1alpha1.ProjectHelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Name: testPHCName, + Namespace: projectNamespace(testProjectName), + }, + Spec: v1alpha1.ProjectHelmChartSpec{ + HelmAPIVersion: "dummy.cattle.io/v1alpha1", + Values: v1alpha1.GenericMap{ + "data": map[string]interface{}{ + "hello": "e2e-ci", + }, + }, + }, + } + Expect(k8sClient.Create(testCtx, &projH)).To(Succeed()) + }) + + It("should create the associated CRs with this project helm charts", func() { + By("verifying the k3s-io helm-controller has created the helm chart") + helmchart := &k3shelmv1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s", testPHCName, releaseName), + Namespace: chartNs, + }, + } + Eventually(Object(helmchart), time.Second*15, time.Millisecond*50).Should(Exist()) + + By("verifying the helm locker has created the associated helm release") + helmchartRelease := &lockerv1alpha1.HelmRelease{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s", testPHCName, releaseName), + Namespace: chartNs, + }, + } + Eventually(Object(helmchartRelease), time.Second*15, time.Millisecond*50).Should(Exist()) + }) + + It("should create the job which deploys the helm chart", func() { + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("helm-install-%s-%s", testPHCName, releaseName), + Namespace: chartNs, + }, + } + Eventually(Object(job)).Should(Exist()) + // TODO this works, but would be better to mirror the condition in kubectl wait --for=complete + Eventually(func() error { + retJob, err := Object(job)() + if err != nil { + return err + } + if retJob.Status.Succeeded < 1 { + return fmt.Errorf("job has not yet succeeded") + } + return nil + }).Should(Succeed()) + }) + + When("We delete a project helm chart", func() { + It("should delete the project helm chart CR", func() { + projH := &v1alpha1.ProjectHelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Name: testPHCName, + Namespace: projectNamespace(testProjectName), + }, + Spec: v1alpha1.ProjectHelmChartSpec{ + HelmAPIVersion: "dummy.cattle.io/v1alpha1", + Values: v1alpha1.GenericMap{ + "data": map[string]interface{}{ + "hello": "e2e-ci", + }, + }, + }, + } + Expect(k8sClient.Delete(testCtx, projH)).To(Succeed()) + }) + //FIXME: this spec could be flaky + It("should have created the matching delete job", func() { + deleteJob := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("helm-delete-%s-%s", testPHCName, releaseName), + Namespace: chartNs, + }, + } + Eventually(Object(deleteJob)).Should(Exist()) + + Eventually(func() error { + retJob, err := Object(deleteJob)() + if err != nil { + return err + } + if retJob.Status.Succeeded < 1 { + return fmt.Errorf("delete job has not yet succeeded") + } + return nil + }).Should(Succeed()) + }) + + It("should make sure that resources that should be absent are absent", func() { + By("verifying the project helm chart has been deleted") + projH := &v1alpha1.ProjectHelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Name: testPHCName, + Namespace: projectNamespace(testProjectName), + }, + } + Consistently(Object(projH)).ShouldNot(Exist()) + + By("verifying the helm chart CR has been deleted") + helmchart := &k3shelmv1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s", testPHCName, releaseName), + Namespace: chartNs, + }, + } + Consistently(Object(helmchart)).Should(Not(Exist())) + + By("verifying the helm locker release CR has been deleted") + helmchartRelease := &lockerv1alpha1.HelmRelease{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s", testPHCName, releaseName), + Namespace: chartNs, + }, + } + Consistently(Object(helmchartRelease)).Should(Not(Exist())) + }) + }) + }) + }) +}) diff --git a/internal/helm-project-operator/generate.go b/internal/helm-project-operator/generate.go new file mode 100644 index 00000000..94e672b1 --- /dev/null +++ b/internal/helm-project-operator/generate.go @@ -0,0 +1,5 @@ +//go:generate go run pkg/codegen/cleanup/main.go +//go:generate go run pkg/codegen/main.go +//go:generate go run ./pkg/codegen crds ./crds ./crds + +package main diff --git a/internal/helm-project-operator/go.mod b/internal/helm-project-operator/go.mod new file mode 100644 index 00000000..e97e9ea9 --- /dev/null +++ b/internal/helm-project-operator/go.mod @@ -0,0 +1,105 @@ +module github.com/rancher/helm-project-operator + +go 1.22.3 + +toolchain go1.23.0 + +require ( + github.com/caarlos0/env/v11 v11.1.0 + github.com/k3s-io/helm-controller v0.16.6-0.20241210112214-b40937ee695b + github.com/kralicky/kmatch v0.0.0-20240603031752-4aaff7842056 + github.com/onsi/ginkgo/v2 v2.22.0 + github.com/onsi/gomega v1.34.2 + github.com/rancher/helm-locker v0.0.2-rc.1.0.20241217154720-4d292094619c + github.com/rancher/lasso v0.0.0-20240705194423-b2a060d103c1 + github.com/rancher/wrangler/v3 v3.0.0 + github.com/samber/lo v1.47.0 + github.com/sirupsen/logrus v1.9.3 + github.com/spf13/cobra v1.8.0 + golang.org/x/mod v0.21.0 + gopkg.in/yaml.v2 v2.4.0 + k8s.io/api v0.30.3 + k8s.io/apimachinery v0.30.3 + k8s.io/client-go v0.30.3 + k8s.io/klog v1.0.0 + sigs.k8s.io/controller-runtime v0.18.4 +) + +require ( + emperror.dev/errors v0.8.1 // indirect + github.com/Masterminds/semver/v3 v3.2.1 // indirect + github.com/Masterminds/squirrel v1.5.4 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch v5.7.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-gorp/gorp/v3 v3.1.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/imdario/mergo v0.3.13 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jmoiron/sqlx v1.3.5 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect + github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/prometheus/client_golang v1.16.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/rubenv/sql-migrate v1.5.2 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/oauth2 v0.16.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/term v0.25.0 // indirect + golang.org/x/text v0.19.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.26.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.34.1 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + helm.sh/helm/v3 v3.15.3 // indirect + k8s.io/apiextensions-apiserver v0.30.1 // indirect + k8s.io/code-generator v0.30.1 // indirect + k8s.io/gengo v0.0.0-20240228010128-51d4e06bde70 // indirect + k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/internal/helm-project-operator/go.sum b/internal/helm-project-operator/go.sum new file mode 100644 index 00000000..fd7c6109 --- /dev/null +++ b/internal/helm-project-operator/go.sum @@ -0,0 +1,351 @@ +emperror.dev/errors v0.8.1 h1:UavXZ5cSX/4u9iyvH6aDcuGkVjeexUGJ7Ij7G4VfQT0= +emperror.dev/errors v0.8.1/go.mod h1:YcRvLPh626Ubn2xqtoprejnA5nFha+TJ+2vew48kWuE= +github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= +github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= +github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/caarlos0/env/v11 v11.1.0 h1:a5qZqieE9ZfzdvbbdhTalRrHT5vu/4V1/ad1Ka6frhI= +github.com/caarlos0/env/v11 v11.1.0/go.mod h1:LwgkYk1kDvfGpHthrWWLof3Ny7PezzFwS4QrsJdHTMo= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= +github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= +github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gobuffalo/logger v1.0.6 h1:nnZNpxYo0zx+Aj9RfMPBm+x9zAU2OayFh/xrAWi34HU= +github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs= +github.com/gobuffalo/packd v1.0.1 h1:U2wXfRr4E9DH8IdsDLlRFwTZTK7hLfq9qT/QHXGVe/0= +github.com/gobuffalo/packd v1.0.1/go.mod h1:PP2POP3p3RXGz7Jh6eYEf93S7vA2za6xM7QT85L4+VY= +github.com/gobuffalo/packr/v2 v2.8.3 h1:xE1yzvnO56cUC0sTpKR3DIbxZgB54AftTFMhB2XEWlY= +github.com/gobuffalo/packr/v2 v2.8.3/go.mod h1:0SahksCVcx4IMnigTjiFuyldmTrdTctXsOdiU5KwbKc= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto= +github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= +github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/k3s-io/helm-controller v0.16.6-0.20241210112214-b40937ee695b h1:arBvOvisrjJZbb5Tzmqm/U8BUUpq/4dQ5E4Lnr3bTso= +github.com/k3s-io/helm-controller v0.16.6-0.20241210112214-b40937ee695b/go.mod h1:AcSxEhOIUgeVvBTnJOAwcezBZXtYew/RhKwO5xp3RlM= +github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= +github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kralicky/kmatch v0.0.0-20240603031752-4aaff7842056 h1:qGTRMNJPF6TyOLklyqFvjd1zyZnyrlBmAdpcvtqW1iw= +github.com/kralicky/kmatch v0.0.0-20240603031752-4aaff7842056/go.mod h1:JRnTh8vZ0vEr8ljMxpSWPDD6b9LcTtdJ+ofrI6yCLiA= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/markbates/errx v1.1.0 h1:QDFeR+UP95dO12JgW+tgi2UVfo0V8YBHiUIOaeBPiEI= +github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc= +github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY= +github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= +github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= +github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= +github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= +github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/rancher/helm-locker v0.0.2-rc.1.0.20241217154720-4d292094619c h1:mSfBdpM7hudbeNG3+j8IMnuVldqcMIEkwdmBbfTEu+s= +github.com/rancher/helm-locker v0.0.2-rc.1.0.20241217154720-4d292094619c/go.mod h1:5Bfez3BMxFa/4njxwL4n3cBwItojNUM8/+GJ+Ze4FeM= +github.com/rancher/lasso v0.0.0-20240705194423-b2a060d103c1 h1:vv1jDlYbd4KhGbPNxmjs8CYgEHUrQm2bMtmULfXJ6iw= +github.com/rancher/lasso v0.0.0-20240705194423-b2a060d103c1/go.mod h1:A/y3BLQkxZXYD60MNDRwAG9WGxXfvd6Z6gWR/a8wPw8= +github.com/rancher/wrangler/v3 v3.0.0 h1:IHHCA+vrghJDPxjtLk4fmeSCFhNe9fFzLFj3m2B0YpA= +github.com/rancher/wrangler/v3 v3.0.0/go.mod h1:Dfckuuq7MJk2JWVBDywRlZXMxEyPxHy4XqGrPEzu5Eg= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rubenv/sql-migrate v1.5.2 h1:bMDqOnrJVV/6JQgQ/MxOpU+AdO8uzYYA/TxFUBzFtS0= +github.com/rubenv/sql-migrate v1.5.2/go.mod h1:H38GW8Vqf8F0Su5XignRyaRcbXbJunSWxs+kmzlg0Is= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc= +github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= +golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +helm.sh/helm/v3 v3.15.3 h1:HcZDaVFe9uHa6hpsR54mJjYyRy4uz/pc6csg27nxFOc= +helm.sh/helm/v3 v3.15.3/go.mod h1:FzSIP8jDQaa6WAVg9F+OkKz7J0ZmAga4MABtTbsb9WQ= +k8s.io/api v0.30.3 h1:ImHwK9DCsPA9uoU3rVh4QHAHHK5dTSv1nxJUapx8hoQ= +k8s.io/api v0.30.3/go.mod h1:GPc8jlzoe5JG3pb0KJCSLX5oAFIW3/qNJITlDj8BH04= +k8s.io/apiextensions-apiserver v0.30.1 h1:4fAJZ9985BmpJG6PkoxVRpXv9vmPUOVzl614xarePws= +k8s.io/apiextensions-apiserver v0.30.1/go.mod h1:R4GuSrlhgq43oRY9sF2IToFh7PVlF1JjfWdoG3pixk4= +k8s.io/apimachinery v0.30.3 h1:q1laaWCmrszyQuSQCfNB8cFgCuDAoPszKY4ucAjDwHc= +k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/apiserver v0.30.1 h1:BEWEe8bzS12nMtDKXzCF5Q5ovp6LjjYkSp8qOPk8LZ8= +k8s.io/apiserver v0.30.1/go.mod h1:i87ZnQ+/PGAmSbD/iEKM68bm1D5reX8fO4Ito4B01mo= +k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= +k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= +k8s.io/code-generator v0.30.1 h1:ZsG++q5Vt0ScmKCeLhynUuWgcwFGg1Hl1AGfatqPJBI= +k8s.io/code-generator v0.30.1/go.mod h1:hFgxRsvOUg79mbpbVKfjJvRhVz1qLoe40yZDJ/hwRH4= +k8s.io/component-base v0.30.1 h1:bvAtlPh1UrdaZL20D9+sWxsJljMi0QZ3Lmw+kmZAaxQ= +k8s.io/component-base v0.30.1/go.mod h1:e/X9kDiOebwlI41AvBHuWdqFriSRrX50CdwA9TFaHLI= +k8s.io/gengo v0.0.0-20240228010128-51d4e06bde70 h1:D9H6wq7PAmub2g4XUrekNWMFVI0JIz7s0F64HBPsPOw= +k8s.io/gengo v0.0.0-20240228010128-51d4e06bde70/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= +k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4= +sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= +sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/internal/helm-project-operator/helpers_test.go b/internal/helm-project-operator/helpers_test.go new file mode 100644 index 00000000..fa906441 --- /dev/null +++ b/internal/helm-project-operator/helpers_test.go @@ -0,0 +1,78 @@ +package main_test + +import ( + "context" + "errors" + "io" + "os/exec" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega/gexec" + "github.com/samber/lo" + corev1 "k8s.io/api/core/v1" +) + +type Session interface { + G() (*gexec.Session, bool) + Wait() error +} + +type sessionWrapper struct { + g *gexec.Session + cmd *exec.Cmd +} + +func (s *sessionWrapper) G() (*gexec.Session, bool) { + if s.g != nil { + return s.g, true + } + return nil, false +} + +func (s *sessionWrapper) Wait() error { + if s == nil { + return nil + } + if s.g != nil { + ws := s.g.Wait() + if ws.ExitCode() != 0 { + return errors.New(string(ws.Err.Contents())) + } + return nil + } + return s.cmd.Wait() +} + +func StartCmd(cmd *exec.Cmd) (Session, error) { + session, err := gexec.Start(cmd, ginkgo.GinkgoWriter, ginkgo.GinkgoWriter) + if err != nil { + return nil, err + } + return &sessionWrapper{ + g: session, + cmd: cmd, + }, nil +} + +// nolint:unused +func streamLogs(ctx context.Context, namespace string, podName string) { + logOptions := &corev1.PodLogOptions{ + Follow: true, + } + + req := clientSet.CoreV1().Pods(namespace).GetLogs(podName, logOptions) + lo.Async( + func() error { + stream, err := req.Stream(ctx) + if err != nil { + return err + } + defer stream.Close() + _, err = io.Copy(ginkgo.GinkgoWriter, stream) + if err != nil { + return err + } + return nil + }, + ) +} diff --git a/internal/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1/doc.go b/internal/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1/doc.go new file mode 100644 index 00000000..31a7ddd9 --- /dev/null +++ b/internal/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2023 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +// +k8s:deepcopy-gen=package +// +groupName=helm.cattle.io +package v1alpha1 diff --git a/internal/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1/genericmap.go b/internal/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1/genericmap.go new file mode 100644 index 00000000..07e0a1f7 --- /dev/null +++ b/internal/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1/genericmap.go @@ -0,0 +1,28 @@ +package v1alpha1 + +import ( + "gopkg.in/yaml.v2" + "k8s.io/apimachinery/pkg/runtime" +) + +// +kubebuilder:pruning:PreserveUnknownFields +// +kubebuilder:validation:EmbeddedResource + +// GenericMap is a wrapper on arbitrary JSON / YAML resources +type GenericMap map[string]interface{} + +func (in *GenericMap) DeepCopy() *GenericMap { + if in == nil { + return nil + } + out := new(GenericMap) + *out = runtime.DeepCopyJSON(*in) + return out +} + +func (in *GenericMap) ToYAML() ([]byte, error) { + if in == nil { + return []byte{}, nil + } + return yaml.Marshal(in) +} diff --git a/internal/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1/project.go b/internal/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1/project.go new file mode 100644 index 00000000..e7f35853 --- /dev/null +++ b/internal/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1/project.go @@ -0,0 +1,64 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ProjectHelmChart specifies a managed Helm chart that should be deployed for a "Project" (defined as any set +// of namespaces that can be targeted by a label selector) and be updated automatically on changing definitions +// of that project (e.g. namespaces added or removed). It is a parent object that creates HelmCharts and HelmReleases +// under the hood via wrangler.Apply and relatedresource.Watch +type ProjectHelmChart struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ProjectHelmChartSpec `json:"spec"` + Status ProjectHelmChartStatus `json:"status"` +} + +// ProjectHelmChartSpec defines the spec of a ProjectHelmChart +type ProjectHelmChartSpec struct { + // HelmAPIVersion identifies whether a particular rendition of the Helm Project Operator + // should watch ProjectHelmChart of this type. e.g. monitoring.cattle.io/v1alpha1 is watched by Prometheus Federator + HelmAPIVersion string `json:"helmApiVersion"` + + // ProjectNamespaceSelector is a namespaceSelector that identifies the project this underlying chart should be targeting + // If a project label is provided as part of the Operator's runtime options, this field will be ignored since ProjectHelmCharts + // will be created in dedicated project namespaces with a pre-defined project namespace selector + ProjectNamespaceSelector *metav1.LabelSelector `json:"projectNamespaceSelector"` + + // Values is a generic map (e.g. generic yaml) representing the values.yaml used to configure the underlying Helm chart that + // will be deployed for this + Values GenericMap `json:"values"` +} + +type ProjectHelmChartStatus struct { + // DashboardValues are values provided to the ProjectHelmChart from ConfigMaps in the Project Release namespace + // tagged with 'helm.cattle.io/dashboard-values-configmap': '{{ .Release.Name }}' + DashboardValues GenericMap `json:"dashboardValues"` + + // Status is the current status of this ProjectHelmChart + // Please see pkg/controllers/project/status.go for possible states + Status string `json:"status"` + + // StatusMessage is a detailed message explaining the current status of the ProjectHelmChart + // Please see pkg/controllers/project/status.go for possible state messages + StatusMessage string `json:"statusMessage"` + + // SystemNamespace is the namespace where HelmCharts and HelmReleases will be deployed + SystemNamespace string `json:"systemNamespace"` + + // ReleaseNamespace is the namespace where the underlying Helm chart will be deployed + // Also known as the Project Release Namespace + ReleaseNamespace string `json:"releaseNamespace"` + + // ReleaseName is the name of the Helm Release contained in the Project Release Namespace + ReleaseName string `json:"releaseName"` + + // TargetNamespaces are the current set of namespaces targeted by the namespaceSelector + // that this ProjectHelmChart was configured with. As noted above, this will correspond + // to the Project Registration Namespace's selector if project label is provided + TargetNamespaces []string `json:"targetNamespaces"` +} diff --git a/internal/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1/zz_generated_deepcopy.go b/internal/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1/zz_generated_deepcopy.go new file mode 100644 index 00000000..3c249e39 --- /dev/null +++ b/internal/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1/zz_generated_deepcopy.go @@ -0,0 +1,142 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2023 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in GenericMap) DeepCopyInto(out *GenericMap) { + { + in := &in + clone := in.DeepCopy() + *out = *clone + return + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectHelmChart) DeepCopyInto(out *ProjectHelmChart) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectHelmChart. +func (in *ProjectHelmChart) DeepCopy() *ProjectHelmChart { + if in == nil { + return nil + } + out := new(ProjectHelmChart) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectHelmChart) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectHelmChartList) DeepCopyInto(out *ProjectHelmChartList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProjectHelmChart, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectHelmChartList. +func (in *ProjectHelmChartList) DeepCopy() *ProjectHelmChartList { + if in == nil { + return nil + } + out := new(ProjectHelmChartList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectHelmChartList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectHelmChartSpec) DeepCopyInto(out *ProjectHelmChartSpec) { + *out = *in + if in.ProjectNamespaceSelector != nil { + in, out := &in.ProjectNamespaceSelector, &out.ProjectNamespaceSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Values.DeepCopyInto(&out.Values) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectHelmChartSpec. +func (in *ProjectHelmChartSpec) DeepCopy() *ProjectHelmChartSpec { + if in == nil { + return nil + } + out := new(ProjectHelmChartSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectHelmChartStatus) DeepCopyInto(out *ProjectHelmChartStatus) { + *out = *in + in.DashboardValues.DeepCopyInto(&out.DashboardValues) + if in.TargetNamespaces != nil { + in, out := &in.TargetNamespaces, &out.TargetNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectHelmChartStatus. +func (in *ProjectHelmChartStatus) DeepCopy() *ProjectHelmChartStatus { + if in == nil { + return nil + } + out := new(ProjectHelmChartStatus) + in.DeepCopyInto(out) + return out +} diff --git a/internal/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1/zz_generated_list_types.go b/internal/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1/zz_generated_list_types.go new file mode 100644 index 00000000..39c88b56 --- /dev/null +++ b/internal/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1/zz_generated_list_types.go @@ -0,0 +1,42 @@ +/* +Copyright 2023 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +// +k8s:deepcopy-gen=package +// +groupName=helm.cattle.io +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ProjectHelmChartList is a list of ProjectHelmChart resources +type ProjectHelmChartList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ProjectHelmChart `json:"items"` +} + +func NewProjectHelmChart(namespace, name string, obj ProjectHelmChart) *ProjectHelmChart { + obj.APIVersion, obj.Kind = SchemeGroupVersion.WithKind("ProjectHelmChart").ToAPIVersionAndKind() + obj.Name = name + obj.Namespace = namespace + return &obj +} diff --git a/internal/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1/zz_generated_register.go b/internal/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1/zz_generated_register.go new file mode 100644 index 00000000..7d5f1afd --- /dev/null +++ b/internal/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1/zz_generated_register.go @@ -0,0 +1,60 @@ +/* +Copyright 2023 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +// +k8s:deepcopy-gen=package +// +groupName=helm.cattle.io +package v1alpha1 + +import ( + helm "github.com/rancher/helm-project-operator/pkg/apis/helm.cattle.io" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + ProjectHelmChartResourceName = "projecthelmcharts" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: helm.GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ProjectHelmChart{}, + &ProjectHelmChartList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/internal/helm-project-operator/pkg/apis/helm.cattle.io/zz_generated_register.go b/internal/helm-project-operator/pkg/apis/helm.cattle.io/zz_generated_register.go new file mode 100644 index 00000000..89a2a024 --- /dev/null +++ b/internal/helm-project-operator/pkg/apis/helm.cattle.io/zz_generated_register.go @@ -0,0 +1,24 @@ +/* +Copyright 2023 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package helm + +const ( + // Package-wide consts from generator "zz_generated_register". + GroupName = "helm.cattle.io" +) diff --git a/internal/helm-project-operator/pkg/applier/applyinator.go b/internal/helm-project-operator/pkg/applier/applyinator.go new file mode 100644 index 00000000..7b7a58ea --- /dev/null +++ b/internal/helm-project-operator/pkg/applier/applyinator.go @@ -0,0 +1,142 @@ +package applier + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/workqueue" +) + +var ( + defaultRateLimiter = workqueue.NewMaxOfRateLimiter( + workqueue.NewItemFastSlowRateLimiter(time.Millisecond, 2*time.Minute, 30), + workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 30*time.Second), + ) +) + +// ApplyFunc is a func that needs to be applied on seeing a particular key be passed to an Applyinator +type ApplyFunc func(key string) error + +// Options are options that can be specified to configure a desired Applyinator +type Options struct { + RateLimiter workqueue.RateLimiter +} + +// Applyinator is an interface that eventually ensures that a requested action, identified by some key, +// is applied. Any object that implements Applyinator should provide the same guarantees as the +// k8s.io/client-go/util/workqueue implementation, namely: +// +// * Fair: items processed in the order in which they are added. +// * Stingy: a single item will not be processed multiple times concurrently, +// and if an item is added multiple times before it can be processed, it +// will only be processed once. +// * Multiple consumers and producers. In particular, it is allowed for an +// item to be reenqueued while it is being processed. +type Applyinator interface { + Apply(key string) + Run(ctx context.Context, workers int) +} + +// NewApplyinator allows you to register a function that applies an action based on whether a particular +// key is enqueued via a call to Apply. It implements k8s.io/client-go/util/workqueue under the hood, which +// allows us to ensure that the apply function is called with the following guarantees (provided by workqueues): +// +// * Fair: items processed in the order in which they are added. +// * Stingy: a single item will not be processed multiple times concurrently, +// and if an item is added multiple times before it can be processed, it +// will only be processed once. +// * Multiple consumers and producers. In particular, it is allowed for an +// item to be reenqueued while it is being processed. +func NewApplyinator(name string, applyFunc ApplyFunc, opts *Options) Applyinator { + opts = applyDefaultOptions(opts) + return &applyinator{ + workqueue: workqueue.NewNamedRateLimitingQueue(opts.RateLimiter, name), + apply: applyFunc, + } +} + +func applyDefaultOptions(opts *Options) *Options { + var newOpts Options + if opts != nil { + newOpts = *opts + } + if newOpts.RateLimiter == nil { + newOpts.RateLimiter = defaultRateLimiter + logrus.Debug("No rate limiter supplied, using default rate limiter.") + } + return &newOpts +} + +type applyinator struct { + workqueue workqueue.RateLimitingInterface + apply ApplyFunc +} + +// Apply triggers the Applyinator to run the provided apply func on the given key +// whenever the workqueue processes the next item +func (a *applyinator) Apply(key string) { + a.workqueue.Add(key) +} + +// Run allows the applyinator to start processing items added to its workqueue +func (a *applyinator) Run(ctx context.Context, workers int) { + + logrus.Debugf("Adding items to applyinator work queue. Workers: %d", workers) + go func() { + <-ctx.Done() + a.workqueue.ShutDown() + }() + for i := 0; i < workers; i++ { + go wait.Until(a.runWorker, time.Second, ctx.Done()) + } +} + +func (a *applyinator) runWorker() { + for a.processNextWorkItem() { + } +} + +func (a *applyinator) processNextWorkItem() bool { + obj, shutdown := a.workqueue.Get() + + if shutdown { + logrus.Debug("ProcessNextWorkItem called during shutdown. Exiting function.") + return false + } + + if err := a.processSingleItem(obj); err != nil { + if !strings.Contains(err.Error(), "please apply your changes to the latest version and try again") { + logrus.Errorf("%v", err) + } + return true + } + + return true +} + +func (a *applyinator) processSingleItem(obj interface{}) error { + var ( + key string + ok bool + ) + + defer a.workqueue.Done(obj) + + if key, ok = obj.(string); !ok { + a.workqueue.Forget(obj) + logrus.Errorf("expected string in workqueue but got %#v", obj) + return nil + } + if err := a.apply(key); err != nil { + a.workqueue.AddRateLimited(key) + return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) + } + + logrus.Debugf("Call to processSingleItem was successful for key: %s", key) + a.workqueue.Forget(obj) + return nil +} diff --git a/internal/helm-project-operator/pkg/cli/builder.go b/internal/helm-project-operator/pkg/cli/builder.go new file mode 100644 index 00000000..0e363fbe --- /dev/null +++ b/internal/helm-project-operator/pkg/cli/builder.go @@ -0,0 +1,276 @@ +package cli + +// https://github.com/rancher/wrangler-cli/blob/master/builder.go + +import ( + "os" + "reflect" + "regexp" + "strconv" + "strings" + "unsafe" + + "github.com/rancher/wrangler/v3/pkg/signals" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +var ( + caseRegexp = regexp.MustCompile("([a-z])([A-Z])") +) + +type PersistentPreRunnable interface { + PersistentPre(cmd *cobra.Command, args []string) error +} + +type PreRunnable interface { + Pre(cmd *cobra.Command, args []string) error +} + +type Runnable interface { + Run(cmd *cobra.Command, args []string) error +} + +type customizer interface { + Customize(cmd *cobra.Command) +} + +type fieldInfo struct { + FieldType reflect.StructField + FieldValue reflect.Value +} + +func fields(obj interface{}) []fieldInfo { + ptrValue := reflect.ValueOf(obj) + objValue := ptrValue.Elem() + + var result []fieldInfo + + for i := 0; i < objValue.NumField(); i++ { + fieldType := objValue.Type().Field(i) + if fieldType.Anonymous && fieldType.Type.Kind() == reflect.Struct { + result = append(result, fields(objValue.Field(i).Addr().Interface())...) + } else if !fieldType.Anonymous { + result = append(result, fieldInfo{ + FieldValue: objValue.Field(i), + FieldType: objValue.Type().Field(i), + }) + } + } + + return result +} + +func Name(obj interface{}) string { + ptrValue := reflect.ValueOf(obj) + objValue := ptrValue.Elem() + commandName := strings.Replace(objValue.Type().Name(), "Command", "", 1) + commandName, _ = name(commandName, "", "") + return commandName +} + +func Main(cmd *cobra.Command) { + ctx := signals.SetupSignalContext() + if err := cmd.ExecuteContext(ctx); err != nil { + logrus.Fatal(err) + } +} + +// Command populates a cobra.Command object by extracting args from struct tags of the +// Runnable obj passed. Also the Run method is assigned to the RunE of the command. +// name = Override the struct field with + +func Command(obj Runnable, cmd cobra.Command) *cobra.Command { + var ( + envs []func() + arrays = map[string]reflect.Value{} + slices = map[string]reflect.Value{} + maps = map[string]reflect.Value{} + ptrValue = reflect.ValueOf(obj) + objValue = ptrValue.Elem() + ) + + c := cmd + if c.Use == "" { + c.Use = Name(obj) + } + + for _, info := range fields(obj) { + fieldType := info.FieldType + v := info.FieldValue + + name, alias := name(fieldType.Name, fieldType.Tag.Get("name"), fieldType.Tag.Get("short")) + usage := fieldType.Tag.Get("usage") + env := strings.Split(fieldType.Tag.Get("env"), ",") + defValue := fieldType.Tag.Get("default") + if len(env) == 1 && env[0] == "" { + env = nil + } + defInt, err := strconv.Atoi(defValue) + if err != nil { + defInt = 0 + } + + flags := c.PersistentFlags() + switch fieldType.Type.Kind() { + case reflect.Int: + flags.IntVarP((*int)(unsafe.Pointer(v.Addr().Pointer())), name, alias, defInt, usage) + case reflect.String: + flags.StringVarP((*string)(unsafe.Pointer(v.Addr().Pointer())), name, alias, defValue, usage) + case reflect.Slice: + switch fieldType.Tag.Get("split") { + case "false": + arrays[name] = v + flags.StringArrayP(name, alias, nil, usage) + default: + slices[name] = v + flags.StringSliceP(name, alias, nil, usage) + } + case reflect.Map: + maps[name] = v + flags.StringSliceP(name, alias, nil, usage) + case reflect.Bool: + flags.BoolVarP((*bool)(unsafe.Pointer(v.Addr().Pointer())), name, alias, false, usage) + default: + panic("Unknown kind on field " + fieldType.Name + " on " + objValue.Type().Name()) + } + + if env != nil { + for _, env := range env { + envs = append(envs, func() { + v := os.Getenv(env) + if v != "" { + fv, err := flags.GetString(name) + if err == nil && (fv == "" || fv == defValue) { + flags.Set(name, v) + } + } + }) + } + } + } + + if p, ok := obj.(PersistentPreRunnable); ok { + c.PersistentPreRunE = p.PersistentPre + } + + if p, ok := obj.(PreRunnable); ok { + c.PreRunE = p.Pre + } + + c.RunE = obj.Run + c.PersistentPreRunE = bind(c.PersistentPreRunE, arrays, slices, maps, envs) + c.PreRunE = bind(c.PreRunE, arrays, slices, maps, envs) + c.RunE = bind(c.RunE, arrays, slices, maps, envs) + + cust, ok := obj.(customizer) + if ok { + cust.Customize(&c) + } + + return &c +} + +func assignMaps(app *cobra.Command, maps map[string]reflect.Value) error { + for k, v := range maps { + k = contextKey(k) + s, err := app.Flags().GetStringSlice(k) + if err != nil { + return err + } + if s != nil { + values := map[string]string{} + for _, part := range s { + parts := strings.SplitN(part, "=", 2) + if len(parts) == 1 { + values[parts[0]] = "" + } else { + values[parts[0]] = parts[1] + } + } + v.Set(reflect.ValueOf(values)) + } + } + return nil +} + +func assignSlices(app *cobra.Command, slices map[string]reflect.Value) error { + for k, v := range slices { + k = contextKey(k) + s, err := app.Flags().GetStringSlice(k) + if err != nil { + return err + } + if s != nil { + v.Set(reflect.ValueOf(s[:])) + } + } + return nil +} + +func assignArrays(app *cobra.Command, arrays map[string]reflect.Value) error { + for k, v := range arrays { + k = contextKey(k) + s, err := app.Flags().GetStringArray(k) + if err != nil { + return err + } + if s != nil { + v.Set(reflect.ValueOf(s[:])) + } + } + return nil +} + +func contextKey(name string) string { + parts := strings.Split(name, ",") + return parts[len(parts)-1] +} + +func name(name, setName, short string) (string, string) { + if setName != "" { + return setName, short + } + parts := strings.Split(name, "_") + i := len(parts) - 1 + name = caseRegexp.ReplaceAllString(parts[i], "$1-$2") + name = strings.ToLower(name) + result := append([]string{name}, parts[0:i]...) + for i := 0; i < len(result); i++ { + result[i] = strings.ToLower(result[i]) + } + if short == "" && len(result) > 1 { + short = result[1] + } + return result[0], short +} + +func bind(next func(*cobra.Command, []string) error, + arrays map[string]reflect.Value, + slices map[string]reflect.Value, + maps map[string]reflect.Value, + envs []func()) func(*cobra.Command, []string) error { + if next == nil { + return nil + } + return func(cmd *cobra.Command, args []string) error { + for _, envCallback := range envs { + envCallback() + } + if err := assignArrays(cmd, arrays); err != nil { + return err + } + if err := assignSlices(cmd, slices); err != nil { + return err + } + if err := assignMaps(cmd, maps); err != nil { + return err + } + + if next != nil { + return next(cmd, args) + } + + return nil + } +} diff --git a/internal/helm-project-operator/pkg/cli/debug.go b/internal/helm-project-operator/pkg/cli/debug.go new file mode 100644 index 00000000..59d14275 --- /dev/null +++ b/internal/helm-project-operator/pkg/cli/debug.go @@ -0,0 +1,51 @@ +package cli + +// https://github.com/rancher/wrangler-cli/blob/master/debug.go + +import ( + "flag" + "fmt" + + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "k8s.io/klog" +) + +type DebugConfig struct { + Debug bool + DebugLevel int +} + +func (c *DebugConfig) MustSetupDebug() { + err := c.SetupDebug() + if err != nil { + panic("failed to setup debug logging: " + err.Error()) + } +} + +func (c *DebugConfig) SetupDebug() error { + logging := flag.NewFlagSet("", flag.PanicOnError) + klog.InitFlags(logging) + if c.Debug { + logrus.SetLevel(logrus.DebugLevel) + if err := logging.Parse([]string{ + fmt.Sprintf("-v=%d", c.DebugLevel), + }); err != nil { + return err + } + } else { + if err := logging.Parse([]string{ + "-v=0", + }); err != nil { + return err + } + } + + return nil +} + +func AddDebug(cmd *cobra.Command, config *DebugConfig) *cobra.Command { + cmd.Flags().BoolVar(&config.Debug, "debug", false, "Turn on debug logging") + cmd.Flags().IntVar(&config.DebugLevel, "debug-level", 0, "If debugging is enabled, set klog -v=X") + return cmd +} diff --git a/internal/helm-project-operator/pkg/cli/doc.go b/internal/helm-project-operator/pkg/cli/doc.go new file mode 100644 index 00000000..803a971e --- /dev/null +++ b/internal/helm-project-operator/pkg/cli/doc.go @@ -0,0 +1,5 @@ +package cli + +// FIXME: the abstractions in rancher/wrangler-cli are clunky and no longer used +// we should explicitly redefine the config structs to achieve parity but this could +// be a somewhat lengthy task diff --git a/internal/helm-project-operator/pkg/codegen/cleanup/main.go b/internal/helm-project-operator/pkg/codegen/cleanup/main.go new file mode 100644 index 00000000..7f338712 --- /dev/null +++ b/internal/helm-project-operator/pkg/codegen/cleanup/main.go @@ -0,0 +1,20 @@ +package main + +import ( + "os" + + "github.com/rancher/wrangler/v3/pkg/cleanup" + "github.com/sirupsen/logrus" +) + +func main() { + if err := cleanup.Cleanup("./pkg/apis"); err != nil { + logrus.Fatal(err) + } + if err := os.RemoveAll("./pkg/generated"); err != nil { + logrus.Fatal(err) + } + if err := os.RemoveAll("./crds"); err != nil { + logrus.Fatal(err) + } +} diff --git a/internal/helm-project-operator/pkg/codegen/main.go b/internal/helm-project-operator/pkg/codegen/main.go new file mode 100644 index 00000000..296b16dc --- /dev/null +++ b/internal/helm-project-operator/pkg/codegen/main.go @@ -0,0 +1,39 @@ +package main + +import ( + "os" + + v1alpha1 "github.com/rancher/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1" + "github.com/rancher/helm-project-operator/pkg/crd" + "github.com/sirupsen/logrus" + + controllergen "github.com/rancher/wrangler/v3/pkg/controller-gen" + "github.com/rancher/wrangler/v3/pkg/controller-gen/args" +) + +func main() { + if len(os.Args) > 3 && os.Args[1] == "crds" { + if len(os.Args) != 4 { + logrus.Fatal("usage: ./codegen crds ") + } + logrus.Infof("Writing CRDs to %s and %s", os.Args[2], os.Args[3]) + if err := crd.WriteFiles(os.Args[2], os.Args[3]); err != nil { + panic(err) + } + return + } + + os.Unsetenv("GOPATH") + controllergen.Run(args.Options{ + OutputPackage: "github.com/rancher/helm-project-operator/pkg/generated", + Boilerplate: "scripts/boilerplate.go.txt", + Groups: map[string]args.Group{ + "helm.cattle.io": { + Types: []interface{}{ + v1alpha1.ProjectHelmChart{}, + }, + GenerateTypes: true, + }, + }, + }) +} diff --git a/internal/helm-project-operator/pkg/controllers/common/formats.go b/internal/helm-project-operator/pkg/controllers/common/formats.go new file mode 100644 index 00000000..5bf37336 --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/common/formats.go @@ -0,0 +1,8 @@ +package common + +const ( + // ProjectRegistrationNamespaceFmt is the format used in order to create project registration namespaces if ProjectLabel is provided + // If SystemProjectLabel is also provided, the project release namespace will be this namespace with `-` suffixed, where + // ReleaseName is provided by the Project Operator that implements Helm Project Operator + ProjectRegistrationNamespaceFmt = "cattle-project-%s" +) diff --git a/internal/helm-project-operator/pkg/controllers/common/hardening.go b/internal/helm-project-operator/pkg/controllers/common/hardening.go new file mode 100644 index 00000000..7b29e12e --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/common/hardening.go @@ -0,0 +1,53 @@ +package common + +import ( + "os" + "path/filepath" + + "gopkg.in/yaml.v2" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" +) + +// HardeningOptions are options that can be provided to override the default hardening resources applied to all namespaces +// created by this Project Operator. To disable this, specify DisableHardening in the RuntimeOptions. +type HardeningOptions struct { + // ServiceAccount represents the overrides to be supplied to the default service account patched by the hardening controller + ServiceAccount *DefaultServiceAccountOptions `yaml:"serviceAccountSpec"` + // NetworkPolicy represents the overrides to be supplied to the generated NetworkPolicy created by the hardening controller + NetworkPolicy *DefaultNetworkPolicyOptions `yaml:"networkPolicySpec"` +} + +// DefaultServiceAccountOptions represents the overrides to be supplied to the default Service Account's fields +// Note: the values of these fields is identical to what is defined on the corev1.ServiceAccount object +type DefaultServiceAccountOptions struct { + Secrets []corev1.ObjectReference `yaml:"secrets,omitempty"` + ImagePullSecrets []corev1.LocalObjectReference `yaml:"imagePullSecrets,omitempty"` + AutomountServiceAccountToken *bool `yaml:"automountServiceAccountToken,omitEmpty"` +} + +// DefaultNetworkPolicyOptions is the NetworkPolicySpec specified for generated NetworkPolicy created by the hardening controller +type DefaultNetworkPolicyOptions networkingv1.NetworkPolicySpec + +// LoadHardeningOptionsFromFile unmarshalls the struct found at the file to YAML and reads it into memory +func LoadHardeningOptionsFromFile(path string) (HardeningOptions, error) { + var hardeningOptions HardeningOptions + wd, err := os.Getwd() + if err != nil { + return HardeningOptions{}, err + } + abspath := filepath.Join(wd, path) + _, err = os.Stat(abspath) + if err != nil { + if os.IsNotExist(err) { + // we just assume the default is used + err = nil + } + return HardeningOptions{}, err + } + hardeningOptionsBytes, err := os.ReadFile(abspath) + if err != nil { + return hardeningOptions, err + } + return hardeningOptions, yaml.UnmarshalStrict(hardeningOptionsBytes, &hardeningOptions) +} diff --git a/internal/helm-project-operator/pkg/controllers/common/operator.go b/internal/helm-project-operator/pkg/controllers/common/operator.go new file mode 100644 index 00000000..df17662e --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/common/operator.go @@ -0,0 +1,50 @@ +package common + +import ( + "errors" + + "github.com/sirupsen/logrus" +) + +// OperatorOptions are options provided by an operator that is implementing Helm Project Operator +type OperatorOptions struct { + // HelmAPIVersion is the unique API version marking ProjectHelmCharts that this Helm Project Operator should watch for + HelmAPIVersion string + + // ReleaseName is a name that identifies releases created for this operator + ReleaseName string + + // SystemNamespaces are additional operator namespaces to treat as if they are system namespaces whether or not + // they are marked via some sort of annotation + SystemNamespaces []string + + // ChartContent is the base64 tgz contents of the folder containing the Helm chart that needs to be deployed + ChartContent string + + // Singleton marks whether only a single ProjectHelmChart can exist per registration namespace + // If enabled, it will ensure that releases are named based on the registration namespace rather than + // the name provided on the ProjectHelmChart, which is what triggers an UnableToCreateHelmRelease status + // on the ProjectHelmChart created after this one + Singleton bool +} + +// Validate validates the provided OperatorOptions +func (opts OperatorOptions) Validate() error { + if len(opts.HelmAPIVersion) == 0 { + return errors.New("must provide a spec.helmApiVersion that this project operator is being initialized for") + } + + if len(opts.ReleaseName) == 0 { + return errors.New("must provide name of Helm release that this project operator should deploy") + } + + if len(opts.SystemNamespaces) > 0 { + logrus.Infof("Marking the following namespaces as system namespaces: %s", opts.SystemNamespaces) + } + + if len(opts.ChartContent) == 0 { + return errors.New("cannot instantiate Project Operator without bundling a Helm chart to provide for the HelmChart's spec.ChartContent") + } + + return nil +} diff --git a/internal/helm-project-operator/pkg/controllers/common/operatorlabels.go b/internal/helm-project-operator/pkg/controllers/common/operatorlabels.go new file mode 100644 index 00000000..38a0d667 --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/common/operatorlabels.go @@ -0,0 +1,96 @@ +package common + +import ( + "fmt" + "strings" +) + +// Operator Labels +// Note: These labels are automatically applied by the operator to mark resources that are created for a given ProjectHelmChart and Project Operator + +// Common + +const ( + // HelmProjectOperatedLabel marks all HelmCharts, HelmReleases, and namespaces created by this operator + HelmProjectOperatedLabel = "helm.cattle.io/helm-project-operated" + + // HelmProjectOperatorProjectLabel is applied to the Project Registration Namespace, the ProjectReleaseNamespace, and + // (only if both ProjectLabel and ProjectReleaseLabelValue are provided) to all Project namespaces + // + // If ProjectLabel and ProjectReleaseLabelValue are supplied, this label will be supplied to the global.cattle.projectNamespaceSelector + // to identify all namespaces tied to a given project + HelmProjectOperatorProjectLabel = "helm.cattle.io/projectId" +) + +// HasHelmProjectOperatedLabel returns whether a ProjectHelmChart has the Helm Project Operated label +func HasHelmProjectOperatedLabel(labels map[string]string) bool { + if labels == nil { + return false + } + _, ok := labels[HelmProjectOperatedLabel] + return ok +} + +// GetCommonLabels returns all common labels added to all generated resources +func GetCommonLabels(projectID string) map[string]string { + labels := map[string]string{ + HelmProjectOperatedLabel: "true", + } + if len(projectID) != 0 { + labels[HelmProjectOperatorProjectLabel] = projectID + } + return labels +} + +// Project Namespaces + +const ( + // HelmProjectOperatedNamespaceOrphanedLabel marks all auto-generated namespaces that no longer have resources tracked + // by this operator; if a namespace has this label, it is safe to delete + HelmProjectOperatedNamespaceOrphanedLabel = "helm.cattle.io/helm-project-operator-orphaned" +) + +// GetProjectNamespaceLabels returns the labels to be added to all Project Namespaces +func GetProjectNamespaceLabels(projectID, projectLabel, projectLabelValue string, isOrphaned bool) map[string]string { + labels := GetCommonLabels(projectID) + if isOrphaned { + labels[HelmProjectOperatedNamespaceOrphanedLabel] = "true" + } + labels[projectLabel] = projectLabelValue + return labels +} + +// GetProjectNamespaceAnnotations returns the annotations to be added to all Project Namespaces +// Note: annotations allow integration with Rancher Projects since they handle importing namespaces into Projects +func GetProjectNamespaceAnnotations(projectID, projectLabel, clusterID string) map[string]string { + projectIDWithClusterID := projectID + if len(clusterID) > 0 { + projectIDWithClusterID = fmt.Sprintf("%s:%s", clusterID, projectID) + } + return map[string]string{ + projectLabel: projectIDWithClusterID, + } +} + +// Helm Resources (HelmCharts and HelmReleases) + +const ( + // HelmProjectOperatorHelmAPIVersionLabel is a label that identifies the HelmAPIVersion that a HelmChart or HelmRelease is tied to + // This is used to identify whether a HelmChart or HelmRelease should be deleted from the cluster on uninstall + HelmProjectOperatorHelmAPIVersionLabel = "helm.cattle.io/helm-api-version" +) + +// GetHelmResourceLabels returns the labels to be added to all generated Helm resources (HelmCharts, HelmReleases) +func GetHelmResourceLabels(projectID, helmAPIVersion string) map[string]string { + labels := GetCommonLabels(projectID) + labels[HelmProjectOperatorHelmAPIVersionLabel] = strings.SplitN(helmAPIVersion, "/", 2)[0] + return labels +} + +// RoleBindings (created for Default K8s ClusterRole RBAC aggregation) + +const ( + // HelmProjectOperatorProjectHelmChartRoleBindingLabel is a label that identifies a RoleBinding as one that has been created in response to a ProjectHelmChart role + // The value of this label will be the release name of the Helm chart, which will be used to identify which ProjectHelmChart's enqueue should resynchronize this. + HelmProjectOperatorProjectHelmChartRoleBindingLabel = "helm.cattle.io/project-helm-chart-role-binding" +) diff --git a/internal/helm-project-operator/pkg/controllers/common/options.go b/internal/helm-project-operator/pkg/controllers/common/options.go new file mode 100644 index 00000000..f7ae2163 --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/common/options.go @@ -0,0 +1,40 @@ +package common + +import ( + "github.com/sirupsen/logrus" +) + +// Options defines options that can be set on initializing the HelmProjectOperator +type Options struct { + RuntimeOptions + OperatorOptions +} + +// Validate validates the provided Options +func (opts Options) Validate() error { + if err := opts.OperatorOptions.Validate(); err != nil { + return err + } + + if err := opts.RuntimeOptions.Validate(); err != nil { + return err + } + + // Cross option checks + + if opts.Singleton { + logrus.Infof("Note: Operator only supports a single ProjectHelmChart per project registration namespace") + if len(opts.ProjectLabel) == 0 { + logrus.Warnf("It is only recommended to run a singleton Project Operator when --project-label is provided (currently not set). The current configuration of this operator would only allow a single ProjectHelmChart to be managed by this Operator.") + } + } + + for subjectRole, defaultClusterRoleName := range GetDefaultClusterRoles(opts) { + logrus.Infof("RoleBindings will automatically be created for Roles in the Project Release Namespace marked with '%s': '' "+ + "and '%s': '%s' based on ClusterRoleBindings or RoleBindings in the Project Registration namespace tied to ClusterRole %s", + HelmProjectOperatorProjectHelmChartRoleLabel, HelmProjectOperatorProjectHelmChartRoleAggregateFromLabel, subjectRole, defaultClusterRoleName, + ) + } + + return nil +} diff --git a/internal/helm-project-operator/pkg/controllers/common/rbac.go b/internal/helm-project-operator/pkg/controllers/common/rbac.go new file mode 100644 index 00000000..6f3dc261 --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/common/rbac.go @@ -0,0 +1,53 @@ +package common + +import ( + rbacv1 "k8s.io/api/rbac/v1" +) + +// GetDefaultClusterRoles returns the default ClusterRoles that this operator was started with +func GetDefaultClusterRoles(opts Options) map[string]string { + clusterRoles := make(map[string]string) + if len(opts.AdminClusterRole) > 0 { + clusterRoles["admin"] = opts.AdminClusterRole + } + if len(opts.EditClusterRole) > 0 { + clusterRoles["edit"] = opts.EditClusterRole + } + if len(opts.ViewClusterRole) > 0 { + clusterRoles["view"] = opts.ViewClusterRole + } + return clusterRoles +} + +// IsDefaultClusterRoleRef returns whether the provided name is a default ClusterRole ref that this operator was +// started with (e.g. the values provided to AdminClusterRole, EditClusterRole, or ViewClusterRole in RuntimeOptions) +func IsDefaultClusterRoleRef(opts Options, roleRefName string) (string, bool) { + for subjectRole, defaultClusterRoleName := range GetDefaultClusterRoles(opts) { + if roleRefName == defaultClusterRoleName { + return subjectRole, true + } + } + return "", false +} + +// FilterToUsersAndGroups returns a subset of the provided subjects that are only Users and Groups +// i.e. it filters out ServiceAccount subjects +func FilterToUsersAndGroups(subjects []rbacv1.Subject) []rbacv1.Subject { + var filtered []rbacv1.Subject + for _, subject := range subjects { + if subject.APIGroup != rbacv1.GroupName { + continue + } + if subject.Kind != rbacv1.UserKind && subject.Kind != rbacv1.GroupKind { + // we do not automatically bind service accounts, only users and groups + continue + } + // note: we are purposefully omitting namespace here since it is not necessary even if set + filtered = append(filtered, rbacv1.Subject{ + APIGroup: subject.APIGroup, + Kind: subject.Kind, + Name: subject.Name, + }) + } + return filtered +} diff --git a/internal/helm-project-operator/pkg/controllers/common/runtime.go b/internal/helm-project-operator/pkg/controllers/common/runtime.go new file mode 100644 index 00000000..f455128e --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/common/runtime.go @@ -0,0 +1,159 @@ +package common + +import ( + "os" + "path/filepath" + + "github.com/rancher/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1" + "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" +) + +type RuntimeOptions struct { + // Namespace is the systemNamespace to create HelmCharts and HelmReleases in + // It's generally expected that this namespace is not widely accessible by all users in your cluster; it's recommended that it is placed + // in something akin to a System Project that is locked down in terms of permissions since resources like HelmCharts and HelmReleases are deployed there + Namespace string `usage:"Namespace to create HelmCharts and HelmReleases; if ProjectLabel is not provided, this will also be the namespace to watch ProjectHelmCharts" default:"cattle-helm-system" env:"NAMESPACE"` + + // NodeName is the name of the node running the operator; it adds additional information to events about where they were generated from + NodeName string `usage:"Name of the node this controller is running on" env:"NODE_NAME"` + + // ControllerName is the name of the controller that identifies this operator; this ensures that all HelmCharts and HelmReleases have the correct managed-by annotation + // so that multiple iterations of this operator in the same namespace do not try to manage the same HelmChart and HelmRelease objects + ControllerName string `usage:"Unique name to identify this controller that is added to all HelmCharts tracked by this controller" default:"helm-project-operator" env:"CONTROLLER_NAME"` + + // HelmJobImage is the job image to use to run the HelmChart job (default rancher/klipper-helm:v0.7.0-build20220315) + // Generally, this HelmJobImage can be left undefined, but may be necessary to be set if you are running with a non-default image + HelmJobImage string `usage:"Job image to use to perform helm operations on HelmChart creation" env:"HELM_JOB_IMAGE"` + + // ClusterID identifies the cluster that the operator is being operated frmo within; it adds an additional annotation to project registration + // namespaces that indicates the projectID with the cluster label. + // + // Note: primarily used for integration with Rancher Projects + ClusterID string `usage:"Identifies the cluster this controller is running on. Ignored if --project-label is not provided." env:"CLUSTER_ID"` + + // SystemDefaultRegistry is the prefix to be added to all images deployed by the HelmChart embedded into the Project Operator + // to point at the right set of images that need to be deployed. This is usually provided in Rancher as global.cattle.systemDefaultRegistry + SystemDefaultRegistry string `usage:"Default system registry to use for Docker images deployed by underlying Helm Chart. Provided as global.cattle.systemDefaultRegistry in the Helm Chart" env:"SYSTEM_DEFAULT_REGISTRY"` + + // CattleURL is the Rancher URL that this chart has been deployed onto. This is usually provided in Rancher Helm charts as global.cattle.url + CattleURL string `usage:"Default Rancher URL to provide to the Helm chart under global.cattle.url" env:"CATTLE_URL"` + + // ProjectLabel is the label that identifies projects + // Note: this field is optional and ensures that ProjectHelmCharts auto-infer their spec.projectNamespaceSelector + // If provided, any spec.projectNamespaceSelector provided will be ignored + // example: field.cattle.io/projectId + ProjectLabel string `usage:"Label on namespaces to create Project Registration Namespaces and watch for ProjectHelmCharts" env:"PROJECT_LABEL"` + + // SystemProjectLabelValues are values of ProjectLabel that identify system namespaces. Does nothing if ProjectLabel is not provided + // example: p-ranch + // If both this and the ProjectLabel example are provided, any namespaces with label 'field.cattle.io/projectId: ' + // will be treated as a systemNamespace, which means that no ProjectHelmChart will be allowed to select it + SystemProjectLabelValues []string `usage:"Values on project label on namespaces that marks it as a system namespace" env:"SYSTEM_PROJECT_LABEL_VALUE"` + + // ProjectReleaseLabelValue is the value of the ProjectLabel that should be added to Project Release Namespaces. Does nothing if ProjectLabel is not provided + // example: p-ranch + // If provided, dedicated Project Release namespaces will be created in the cluster for each ProjectHelmChart that needs a Helm Release + // The created Project Release namespaces will also automatically be identified as a System Project Namespaces based on this label, so other + // namespaces with this label value will be treated as a system namespace as well + ProjectReleaseLabelValue string `usage:"Value on project label on namespaces that marks it as a system namespace" env:"SYSTEM_PROJECT_LABEL_VALUE"` + + // AdminClusterRole configures the operator to automaticaly create RoleBindings on Roles in the Project Release Namespace marked with + // 'helm.cattle.io/project-helm-chart-role': '' and 'helm.cattle.io/project-helm-chart-role-aggregate-from': 'admin' + // based on ClusterRoleBindings or RoleBindings in the Project Registration namespace tied to the provided ClusterRole, if it exists + AdminClusterRole string `usage:"ClusterRole tied to admin users who should have permissions in the Project Release Namespace" env:"ADMIN_CLUSTER_ROLE"` + + // EditClusterRole configures the operator to automaticaly create RoleBindings on Roles in the Project Release Namespace marked with + // 'helm.cattle.io/project-helm-chart-role': '' and 'helm.cattle.io/project-helm-chart-role-aggregate-from': 'edit' + // based on ClusterRoleBindings or RoleBindings in the Project Registration namespace tied to the provided ClusterRole, if it exists + EditClusterRole string `usage:"ClusterRole tied to edit users who should have permissions in the Project Release Namespace" env:"EDIT_CLUSTER_ROLE"` + + // ViewClusterRole configures the operator to automaticaly create RoleBindings on Roles in the Project Release Namespace marked with + // 'helm.cattle.io/project-helm-chart-role': '' and 'helm.cattle.io/project-helm-chart-role-aggregate-from': 'view' + // based on ClusterRoleBindings or RoleBindings in the Project Registration namespace tied to the provided ClusterRole, if it exists + ViewClusterRole string `usage:"ClusterRole tied to view users who should have permissions in the Project Release Namespace" env:"VIEW_CLUSTER_ROLE"` + + // DisableHardening turns off the controller that manages the default service account and a default NetworkPolicy deployed on all + // namespaces marked with the Helm Project Operated Label to prevent generated namespaces from breaking a CIS 1.16 Hardened Scan by patching + // the default ServiceAccount and creating a default secure NetworkPolicy. + // + // ref: https://docs.rke2.io/security/cis_self_assessment16/#515 + // ref: https://docs.rke2.io/security/cis_self_assessment16/#532 + // + // To configure the default ServiceAccount and NetworkPolicy across all generated namespaces, you can provide overrides in the HardeningOptionsFile + // If you need to configure the default ServiceAccount and NetworkPolicy on a per-namespace basis, it is recommended that you disable this + DisableHardening bool `usage:"Path to file that contains the configuration for the default ServiceAccount and NetworkPolicy deployed on operated namespaces" env:"HARDENING_OPTIONS_FILE"` + + // HardeningOptionsFile is the path to the file that contains the configuration for the default ServiceAccount and NetworkPolicy deployed on operated namespaces + // By default, the default service account of the namespace is patched to disable automountServiceAccountToken + // By default, a default NetworkPolicy is deployed in the namespace that selects all pods in the namespace and limits all ingress and egress + HardeningOptionsFile string `usage:"Path to file that contains the configuration for the default ServiceAccount and NetworkPolicy deployed on operated namespaces" default:"hardening.yaml" env:"HARDENING_OPTIONS_FILE"` + + // ValuesOverrideFile is the path to the file that contains operated-provided overrides on the values.yaml that should be applied for each ProjectHelmChart + ValuesOverrideFile string `usage:"Path to file that contains values.yaml overrides supplied by the operator" default:"values.yaml" env:"VALUES_OVERRIDE_FILE"` + + // DisableEmbeddedHelmLocker determines whether to disable embedded Helm Locker controller in favor of external Helm Locker + DisableEmbeddedHelmLocker bool `usage:"Whether to disable embedded Helm Locker controller in favor of external Helm Locker" env:"DISABLE_EMBEDDED_HELM_LOCKER"` + + // DisableEmbeddedHelmController determines whether to disable embedded Helm Controller controller in favor of external Helm Controller + // This should be the default in most RKE2 clusters since the RKE2 server binary already embeds a Helm Controller instance that manages HelmCharts + DisableEmbeddedHelmController bool `usage:"Whether to disable embedded Helm Controller controller in favor of external Helm Controller (recommended for RKE2 clusters)" env:"DISABLE_EMBEDDED_HELM_CONTROLLER"` +} + +// Validate validates the provided RuntimeOptions +func (opts RuntimeOptions) Validate() error { + if len(opts.ProjectLabel) > 0 { + logrus.Infof("Creating dedicated project registration namespaces to discover ProjectHelmCharts based on the value found for the project label '%s' on all namespaces in the cluster, excluding system namespaces; these namespaces will need to be manually cleaned up if they have the label '%s': 'true'", opts.ProjectLabel, HelmProjectOperatedNamespaceOrphanedLabel) + if len(opts.SystemProjectLabelValues) > 0 { + for _, systemProjectLabel := range opts.SystemProjectLabelValues { + logrus.Infof("Assuming namespaces tagged with %s=%s are also system namespaces", opts.ProjectLabel, systemProjectLabel) + } + } + if len(opts.ProjectReleaseLabelValue) > 0 { + logrus.Infof("Assuming namespaces tagged with %s=%s are also system namespaces", opts.ProjectLabel, opts.ProjectReleaseLabelValue) + logrus.Infof("Creating dedicated project release namespaces for ProjectHelmCharts with label '%s': '%s'; these namespaces will need to be manually cleaned up if they have the label '%s': 'true'", opts.ProjectLabel, opts.ProjectReleaseLabelValue, HelmProjectOperatedNamespaceOrphanedLabel) + } + if len(opts.ClusterID) > 0 { + logrus.Infof("Marking project registration namespaces with %s=%s:", opts.ProjectLabel, opts.ClusterID) + } + } + + if len(opts.HelmJobImage) > 0 { + logrus.Infof("Using %s as spec.JobImage on all generated HelmChart resources", opts.HelmJobImage) + } + + if len(opts.NodeName) > 0 { + logrus.Infof("Marking events as being sourced from node %s", opts.NodeName) + } + + if opts.DisableHardening { + logrus.Info("Hardening is disabled") + } else { + logrus.Info("Managing the configuration of the default ServiceAccount and an auto-generated NetworkPolicy in all namespaces managed by this Project Operator") + } + + return nil +} + +// LoadValuesOverrideFromFile unmarshalls the struct found at the file to YAML and reads it into memory +func LoadValuesOverrideFromFile(path string) (v1alpha1.GenericMap, error) { + var valuesOverride v1alpha1.GenericMap + wd, err := os.Getwd() + if err != nil { + return nil, err + } + abspath := filepath.Join(wd, path) + _, err = os.Stat(abspath) + if err != nil { + if os.IsNotExist(err) { + // we just assume the default is used + err = nil + } + return nil, err + } + valuesOverrideBytes, err := os.ReadFile(abspath) + if err != nil { + return nil, err + } + return valuesOverride, yaml.Unmarshal(valuesOverrideBytes, &valuesOverride) +} diff --git a/internal/helm-project-operator/pkg/controllers/common/userlabels.go b/internal/helm-project-operator/pkg/controllers/common/userlabels.go new file mode 100644 index 00000000..f122bc68 --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/common/userlabels.go @@ -0,0 +1,47 @@ +package common + +import v1alpha1 "github.com/rancher/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1" + +// User-Applied Labels +// Note: These labels are expected to be applied by users (or by Jobs, in the case of cleanup), to mark a resources as one that needs +// some special logic to be applied by the Helm Project Operator on changes + +// ProjectHelmCharts + +const ( + // HelmProjectOperatedCleanupLabel is a label attached to ProjectHelmCharts to facilitate cleanup; all ProjectHelmCharts + // with this label will have their HelmCharts and HelmReleases cleaned up until the next time the Operator is deployed; + // on redeploying the operator, this label will automatically be removed from all ProjectHelmCharts deployed in the cluster. + HelmProjectOperatedCleanupLabel = "helm.cattle.io/helm-project-operator-cleanup" +) + +// HasCleanupLabel returns whether a ProjectHelmChart has the cleanup label +func HasCleanupLabel(projectHelmChart *v1alpha1.ProjectHelmChart) bool { + if projectHelmChart.Labels == nil { + return false + } + value, shouldCleanup := projectHelmChart.Labels[HelmProjectOperatedCleanupLabel] + return shouldCleanup && value == "true" +} + +// Project Release Namespace ConfigMaps + +const ( + // HelmProjectOperatorDashboardValuesConfigMapLabel is a label that identifies a ConfigMap that should be merged into status.dashboardValues when available + // The value of this label will be the release name of the Helm chart, which will be used to identify which ProjectHelmChart's status needs to be updated. + HelmProjectOperatorDashboardValuesConfigMapLabel = "helm.cattle.io/dashboard-values-configmap" +) + +// Project Release Namespace Roles + +const ( + // HelmProjectOperatorProjectHelmChartRoleLabel is a label that identifies a Role as one that needs RoleBindings to be managed by the Helm Project Operator + // The value of this label will be the release name of the Helm chart, which will be used to identify which ProjectHelmChart's enqueue should resynchronize this. + HelmProjectOperatorProjectHelmChartRoleLabel = "helm.cattle.io/project-helm-chart-role" + + // HelmProjectOperatorProjectHelmChartRoleAggregateFromLabel is a label that identifies which subjects should be bound to the Project Helm Chart Role + // The value of this label will be the name of the default k8s ClusterRoles (cluster-admin, admin, edit, view). For the provided ClusterRole, + // the operator will automatically create a RoleBinding in the Project Release Namespace binding all subjects who have that permission across all namespaces in the project + // to the Role that contains this label. This label will only be viewed if the Role has HelmProjectOperatorProjectHelmChartRoleLabel set as well + HelmProjectOperatorProjectHelmChartRoleAggregateFromLabel = "helm.cattle.io/project-helm-chart-role-aggregate-from" +) diff --git a/internal/helm-project-operator/pkg/controllers/controllers.go b/internal/helm-project-operator/pkg/controllers/controllers.go new file mode 100644 index 00000000..cf10640c --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/controllers.go @@ -0,0 +1,372 @@ +package controllers + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/k3s-io/helm-controller/pkg/controllers/chart" + k3shelm "github.com/k3s-io/helm-controller/pkg/generated/controllers/helm.cattle.io" + k3shelmcontroller "github.com/k3s-io/helm-controller/pkg/generated/controllers/helm.cattle.io/v1" + "github.com/rancher/helm-locker/pkg/controllers/release" + helmlocker "github.com/rancher/helm-locker/pkg/generated/controllers/helm.cattle.io" + helmlockercontroller "github.com/rancher/helm-locker/pkg/generated/controllers/helm.cattle.io/v1alpha1" + "github.com/rancher/helm-locker/pkg/objectset" + "github.com/rancher/helm-project-operator/pkg/controllers/common" + "github.com/rancher/helm-project-operator/pkg/controllers/hardened" + "github.com/rancher/helm-project-operator/pkg/controllers/namespace" + "github.com/rancher/helm-project-operator/pkg/controllers/project" + helmproject "github.com/rancher/helm-project-operator/pkg/generated/controllers/helm.cattle.io" + helmprojectcontroller "github.com/rancher/helm-project-operator/pkg/generated/controllers/helm.cattle.io/v1alpha1" + "github.com/rancher/lasso/pkg/cache" + "github.com/rancher/lasso/pkg/client" + "github.com/rancher/lasso/pkg/controller" + "github.com/rancher/wrangler/v3/pkg/apply" + batch "github.com/rancher/wrangler/v3/pkg/generated/controllers/batch" + batchcontroller "github.com/rancher/wrangler/v3/pkg/generated/controllers/batch/v1" + "github.com/rancher/wrangler/v3/pkg/generated/controllers/core" + corecontroller "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1" + "github.com/rancher/wrangler/v3/pkg/generated/controllers/networking.k8s.io" + networkingcontroller "github.com/rancher/wrangler/v3/pkg/generated/controllers/networking.k8s.io/v1" + rbac "github.com/rancher/wrangler/v3/pkg/generated/controllers/rbac" + rbaccontroller "github.com/rancher/wrangler/v3/pkg/generated/controllers/rbac/v1" + "github.com/rancher/wrangler/v3/pkg/generic" + "github.com/rancher/wrangler/v3/pkg/leader" + "github.com/rancher/wrangler/v3/pkg/ratelimit" + "github.com/rancher/wrangler/v3/pkg/schemes" + "github.com/rancher/wrangler/v3/pkg/start" + "github.com/sirupsen/logrus" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + typedv1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" +) + +type appContext struct { + helmprojectcontroller.Interface + + Dynamic dynamic.Interface + K8s kubernetes.Interface + Core corecontroller.Interface + Networking networkingcontroller.Interface + + HelmLocker helmlockercontroller.Interface + ObjectSetRegister objectset.LockableRegister + ObjectSetHandler *controller.SharedHandler + + HelmController k3shelmcontroller.Interface + Batch batchcontroller.Interface + RBAC rbaccontroller.Interface + + Apply apply.Apply + EventBroadcaster record.EventBroadcaster + + ClientConfig clientcmd.ClientConfig + starters []start.Starter +} + +func (a *appContext) start(ctx context.Context) error { + return start.All(ctx, 50, a.starters...) +} + +// Register registers all controllers for the Helm Project Operator based on the provided options +func Register(ctx context.Context, systemNamespace string, cfg clientcmd.ClientConfig, opts common.Options) error { + if len(systemNamespace) == 0 { + return errors.New("cannot start controllers on system namespace: system namespace not provided") + } + // always add the systemNamespace to the systemNamespaces provided + opts.SystemNamespaces = append(opts.SystemNamespaces, systemNamespace) + + // parse values.yaml and questions.yaml from file + valuesYaml, questionsYaml, err := parseValuesAndQuestions(opts.ChartContent) + if err != nil { + logrus.Fatal(err) + } + + appCtx, err := newContext(cfg, systemNamespace, opts) + if err != nil { + return err + } + + appCtx.EventBroadcaster.StartLogging(logrus.Debugf) + appCtx.EventBroadcaster.StartRecordingToSink(&typedv1.EventSinkImpl{ + Interface: appCtx.K8s.CoreV1().Events(systemNamespace), + }) + recorder := appCtx.EventBroadcaster.NewRecorder(schemes.All, corev1.EventSource{ + Component: "helm-project-operator", + Host: opts.NodeName, + }) + + if !opts.DisableHardening { + hardeningOpts, err := common.LoadHardeningOptionsFromFile(opts.HardeningOptionsFile) + if err != nil { + return err + } + hardened.Register(ctx, + appCtx.Apply, + hardeningOpts, + // watches + appCtx.Core.Namespace(), + appCtx.Core.Namespace().Cache(), + // generates + appCtx.Core.ServiceAccount(), + appCtx.Networking.NetworkPolicy(), + ) + } + + projectGetter := namespace.Register(ctx, + appCtx.Apply, + systemNamespace, + valuesYaml, + questionsYaml, + opts, + // watches and generates + appCtx.Core.Namespace(), + appCtx.Core.Namespace().Cache(), + appCtx.Core.ConfigMap(), + // enqueues + appCtx.ProjectHelmChart(), + appCtx.ProjectHelmChart().Cache(), + appCtx.Dynamic, + ) + + if len(opts.ControllerName) == 0 { + opts.ControllerName = "helm-project-operator" + } + + valuesOverride, err := common.LoadValuesOverrideFromFile(opts.ValuesOverrideFile) + if err != nil { + return err + } + project.Register(ctx, + systemNamespace, + opts, + valuesOverride, + appCtx.Apply, + // watches + appCtx.ProjectHelmChart(), + appCtx.ProjectHelmChart().Cache(), + appCtx.Core.ConfigMap(), + appCtx.Core.ConfigMap().Cache(), + appCtx.RBAC.Role(), + appCtx.RBAC.Role().Cache(), + appCtx.RBAC.ClusterRoleBinding(), + appCtx.RBAC.ClusterRoleBinding().Cache(), + // watches and generates + appCtx.HelmController.HelmChart(), + appCtx.HelmLocker.HelmRelease(), + appCtx.Core.Namespace(), + appCtx.Core.Namespace().Cache(), + appCtx.RBAC.RoleBinding(), + appCtx.RBAC.RoleBinding().Cache(), + projectGetter, + ) + + if !opts.DisableEmbeddedHelmLocker { + logrus.Infof("Registering embedded Helm Locker...") + release.Register(ctx, + systemNamespace, + opts.ControllerName, + appCtx.HelmLocker.HelmRelease(), + appCtx.HelmLocker.HelmRelease().Cache(), + appCtx.Core.Secret(), + appCtx.Core.Secret().Cache(), + appCtx.K8s, + appCtx.ObjectSetRegister, + appCtx.ObjectSetHandler, + recorder, + ) + } + + if !opts.DisableEmbeddedHelmController { + logrus.Infof("Registering embedded Helm Controller...") + chart.Register(ctx, + systemNamespace, + opts.ControllerName, + // this has to be cluster-admin for k3s reasons + "cluster-admin", + "6443", + appCtx.K8s, + appCtx.Apply, + recorder, + appCtx.HelmController.HelmChart(), + appCtx.HelmController.HelmChart().Cache(), + appCtx.HelmController.HelmChartConfig(), + appCtx.HelmController.HelmChartConfig().Cache(), + appCtx.Batch.Job(), + appCtx.Batch.Job().Cache(), + appCtx.RBAC.ClusterRoleBinding(), + appCtx.Core.ServiceAccount(), + appCtx.Core.ConfigMap(), + appCtx.Core.Secret(), + ) + } + + leader.RunOrDie(ctx, systemNamespace, fmt.Sprintf("helm-project-operator-%s-lock", opts.ReleaseName), appCtx.K8s, func(ctx context.Context) { + if err := appCtx.start(ctx); err != nil { + logrus.Fatal(err) + } + logrus.Info("All controllers have been started") + }) + + return nil +} + +func controllerFactory(rest *rest.Config) (controller.SharedControllerFactory, error) { + rateLimit := workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 60*time.Second) + clientFactory, err := client.NewSharedClientFactory(rest, nil) + if err != nil { + return nil, err + } + + cacheFactory := cache.NewSharedCachedFactory(clientFactory, nil) + return controller.NewSharedControllerFactory(cacheFactory, &controller.SharedControllerFactoryOptions{ + DefaultRateLimiter: rateLimit, + DefaultWorkers: 50, + }), nil +} + +func newContext(cfg clientcmd.ClientConfig, systemNamespace string, opts common.Options) (*appContext, error) { + client, err := cfg.ClientConfig() + if err != nil { + return nil, err + } + client.RateLimiter = ratelimit.None + + dynamic, err := dynamic.NewForConfig(client) + if err != nil { + return nil, err + } + + k8s, err := kubernetes.NewForConfig(client) + if err != nil { + return nil, err + } + + discovery, err := discovery.NewDiscoveryClientForConfig(client) + if err != nil { + return nil, err + } + + apply := apply.New(discovery, apply.NewClientFactory(client)) + + scf, err := controllerFactory(client) + if err != nil { + return nil, err + } + + // Shared Controllers + + core, err := core.NewFactoryFromConfigWithOptions(client, &generic.FactoryOptions{ + SharedControllerFactory: scf, + }) + if err != nil { + return nil, err + } + corev := core.Core().V1() + + networking, err := networking.NewFactoryFromConfigWithOptions(client, &generic.FactoryOptions{ + SharedControllerFactory: scf, + }) + if err != nil { + return nil, err + } + networkingv := networking.Networking().V1() + + // Helm Project Controller + + var namespace string // by default, this is unset so we watch everything + if len(opts.ProjectLabel) == 0 { + // we only need to watch the systemNamespace + namespace = systemNamespace + } + + helmproject, err := helmproject.NewFactoryFromConfigWithOptions(client, &generic.FactoryOptions{ + SharedControllerFactory: scf, + Namespace: namespace, + }) + if err != nil { + return nil, err + } + helmprojectv := helmproject.Helm().V1alpha1() + + // Helm Locker Controllers - should be scoped to the system namespace only + + objectSet, objectSetRegister, objectSetHandler := objectset.NewLockableRegister("object-set-register", apply, scf, discovery, nil) + + helmlocker, err := helmlocker.NewFactoryFromConfigWithOptions(client, &generic.FactoryOptions{ + SharedControllerFactory: scf, + Namespace: systemNamespace, + }) + if err != nil { + return nil, err + } + helmlockerv := helmlocker.Helm().V1alpha1() + + // Helm Controllers - should be scoped to the system namespace only + + helm, err := k3shelm.NewFactoryFromConfigWithOptions(client, &generic.FactoryOptions{ + SharedControllerFactory: scf, + Namespace: systemNamespace, + }) + if err != nil { + return nil, err + } + helmv := helm.Helm().V1() + + batch, err := batch.NewFactoryFromConfigWithOptions(client, &generic.FactoryOptions{ + SharedControllerFactory: scf, + Namespace: systemNamespace, + }) + if err != nil { + return nil, err + } + batchv := batch.Batch().V1() + + rbac, err := rbac.NewFactoryFromConfigWithOptions(client, &generic.FactoryOptions{ + SharedControllerFactory: scf, + Namespace: systemNamespace, + }) + if err != nil { + return nil, err + } + rbacv := rbac.Rbac().V1() + + return &appContext{ + Interface: helmprojectv, + + Dynamic: dynamic, + K8s: k8s, + Core: corev, + Networking: networkingv, + + HelmLocker: helmlockerv, + ObjectSetRegister: objectSetRegister, + ObjectSetHandler: objectSetHandler, + + HelmController: helmv, + Batch: batchv, + RBAC: rbacv, + + Apply: apply.WithSetOwnerReference(false, false), + EventBroadcaster: record.NewBroadcaster(), + + ClientConfig: cfg, + starters: []start.Starter{ + core, + networking, + batch, + rbac, + helm, + objectSet, + helmlocker, + helmproject, + }, + }, nil +} diff --git a/internal/helm-project-operator/pkg/controllers/hardened/controller.go b/internal/helm-project-operator/pkg/controllers/hardened/controller.go new file mode 100644 index 00000000..d7ec9fbf --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/hardened/controller.go @@ -0,0 +1,69 @@ +package hardened + +import ( + "context" + + "github.com/rancher/helm-project-operator/pkg/controllers/common" + "github.com/rancher/wrangler/v3/pkg/apply" + corecontroller "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1" + networkingcontroller "github.com/rancher/wrangler/v3/pkg/generated/controllers/networking.k8s.io/v1" + corev1 "k8s.io/api/core/v1" +) + +type handler struct { + apply apply.Apply + + opts common.HardeningOptions + + namespaces corecontroller.NamespaceController + namespaceCache corecontroller.NamespaceCache + serviceaccounts corecontroller.ServiceAccountController + networkpolicies networkingcontroller.NetworkPolicyController +} + +func Register( + ctx context.Context, + apply apply.Apply, + opts common.HardeningOptions, + namespaces corecontroller.NamespaceController, + namespaceCache corecontroller.NamespaceCache, + serviceaccounts corecontroller.ServiceAccountController, + networkpolicies networkingcontroller.NetworkPolicyController, +) { + + apply = apply. + WithSetID("hardened-hpo-operated-namespace"). + WithCacheTypes(serviceaccounts, networkpolicies) + + h := &handler{ + apply: apply, + namespaces: namespaces, + namespaceCache: namespaceCache, + serviceaccounts: serviceaccounts, + networkpolicies: networkpolicies, + } + + h.initResolvers(ctx) + + namespaces.OnChange(ctx, "harden-hpo-operated-namespace", h.OnChange) +} + +func (h *handler) OnChange(name string, namespace *corev1.Namespace) (*corev1.Namespace, error) { + if namespace == nil { + return namespace, nil + } + if namespace.DeletionTimestamp != nil { + // When a namespace gets deleted, all resources deployed to harden that namespace should also get deleted + // Therefore, we do not need to apply anything in this situation to avoid spamming logs with trying to apply + // a resource to a namespace that is being terminated + return namespace, nil + } + if !common.HasHelmProjectOperatedLabel(namespace.Labels) { + // only harden operated namespaces + return namespace, nil + } + return namespace, h.apply.WithOwner(namespace).ApplyObjects( + h.getDefaultServiceAccount(namespace), + h.getNetworkPolicy(namespace), + ) +} diff --git a/internal/helm-project-operator/pkg/controllers/hardened/resolvers.go b/internal/helm-project-operator/pkg/controllers/hardened/resolvers.go new file mode 100644 index 00000000..cc597c17 --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/hardened/resolvers.go @@ -0,0 +1,67 @@ +package hardened + +import ( + "context" + + "github.com/rancher/helm-project-operator/pkg/controllers/common" + "github.com/rancher/wrangler/v3/pkg/relatedresource" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// Note: each resource created in resources.go should have a resolver handler here +// The only exception is namespaces since those are handled by the main controller OnChange + +// initResolvers initializes resolvers that need to be set to watch child resources of Helm Project Operated Namespaces +func (h *handler) initResolvers(ctx context.Context) { + relatedresource.WatchClusterScoped( + ctx, "watch-hardened-hpo-operated-namespace", h.resolveHardenedProjectRegistrationNamespaceData, h.namespaces, + h.serviceaccounts, h.networkpolicies, + ) +} + +func (h *handler) resolveHardenedProjectRegistrationNamespaceData(namespace, name string, obj runtime.Object) ([]relatedresource.Key, error) { + if obj == nil { + return nil, nil + } + ns, err := h.namespaceCache.Get(namespace) + if err != nil { + return nil, err + } + if ns == nil { + // namespace is probably being deleted, which means we don't need to resolve anything + return nil, nil + } + if !common.HasHelmProjectOperatedLabel(ns.Labels) { + // only care about service accounts and network policies in an operated namespace + return nil, nil + } + if serviceAccount, ok := obj.(*corev1.ServiceAccount); ok { + return h.resolveServiceAccount(namespace, name, serviceAccount) + } + if networkPolicy, ok := obj.(*networkingv1.NetworkPolicy); ok { + return h.resolveNetworkPolicy(namespace, name, networkPolicy) + } + return nil, nil +} + +func (h *handler) resolveServiceAccount(namespace, name string, serviceAccount *corev1.ServiceAccount) ([]relatedresource.Key, error) { + // check if name matches + if name == defaultServiceAccountName { + return []relatedresource.Key{{ + Name: namespace, + }}, nil + } + return nil, nil +} + +func (h *handler) resolveNetworkPolicy(namespace, name string, networkPolicy *networkingv1.NetworkPolicy) ([]relatedresource.Key, error) { + // check if name matches + if name == defaultNetworkPolicyName { + return []relatedresource.Key{{ + Name: namespace, + }}, nil + } + return nil, nil +} diff --git a/internal/helm-project-operator/pkg/controllers/hardened/resources.go b/internal/helm-project-operator/pkg/controllers/hardened/resources.go new file mode 100644 index 00000000..97738134 --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/hardened/resources.go @@ -0,0 +1,68 @@ +package hardened + +import ( + "github.com/rancher/helm-project-operator/pkg/controllers/common" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Note: each resource created here should have a resolver set in resolvers.go +// The only exception is namespaces since those are handled by the main controller OnChange + +var ( + defaultServiceAccountName = "default" + defaultAutomountServiceAccountToken = false // ensures that all pods need to have service account attached to get permissions + + defaultNetworkPolicyName = "hpo-generated-default" + defaultNetworkPolicySpec = networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{}, // select all pods + Ingress: []networkingv1.NetworkPolicyIngressRule{}, // networking policy limits all ingress + Egress: []networkingv1.NetworkPolicyEgressRule{}, // network limits all egress + PolicyTypes: []networkingv1.PolicyType{"Ingress", "Egress"}, // applies to both ingress and egress + } +) + +// getDefaultServiceAccount returns the default service account configured for this Helm Project Operated namespace +func (h *handler) getDefaultServiceAccount(namespace *corev1.Namespace) *corev1.ServiceAccount { + serviceAccount := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: defaultServiceAccountName, + Namespace: namespace.Name, + Labels: map[string]string{ + common.HelmProjectOperatedLabel: "true", + }, + }, + AutomountServiceAccountToken: &defaultAutomountServiceAccountToken, + } + if h.opts.ServiceAccount != nil { + if h.opts.ServiceAccount.Secrets != nil { + serviceAccount.Secrets = h.opts.ServiceAccount.Secrets + } + if h.opts.ServiceAccount.ImagePullSecrets != nil { + serviceAccount.ImagePullSecrets = h.opts.ServiceAccount.ImagePullSecrets + } + if h.opts.ServiceAccount.AutomountServiceAccountToken != nil { + serviceAccount.AutomountServiceAccountToken = h.opts.ServiceAccount.AutomountServiceAccountToken + } + } + return serviceAccount +} + +// getNetworkPolicy returns the default Helm Project Operator generated NetworkPolicy configured for this Helm Project Operated namespace +func (h *handler) getNetworkPolicy(namespace *corev1.Namespace) *networkingv1.NetworkPolicy { + networkPolicy := &networkingv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: defaultNetworkPolicyName, + Namespace: namespace.Name, + Labels: map[string]string{ + common.HelmProjectOperatedLabel: "true", + }, + }, + Spec: defaultNetworkPolicySpec, + } + if h.opts.NetworkPolicy != nil { + networkPolicy.Spec = networkingv1.NetworkPolicySpec(*h.opts.NetworkPolicy) + } + return networkPolicy +} diff --git a/internal/helm-project-operator/pkg/controllers/namespace/controller.go b/internal/helm-project-operator/pkg/controllers/namespace/controller.go new file mode 100644 index 00000000..50c7fe2c --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/namespace/controller.go @@ -0,0 +1,375 @@ +package namespace + +import ( + "context" + "fmt" + + "github.com/rancher/helm-project-operator/pkg/applier" + "github.com/rancher/helm-project-operator/pkg/controllers/common" + helmprojectcontroller "github.com/rancher/helm-project-operator/pkg/generated/controllers/helm.cattle.io/v1alpha1" + "github.com/rancher/wrangler/v3/pkg/apply" + corecontroller "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1" + "github.com/sirupsen/logrus" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/dynamic" +) + +type handler struct { + namespaceApply apply.Apply + apply apply.Apply + + systemNamespace string + valuesYaml string + questionsYaml string + opts common.Options + + systemNamespaceTracker Tracker + projectRegistrationNamespaceTracker Tracker + + namespaces corecontroller.NamespaceController + namespaceCache corecontroller.NamespaceCache + configmaps corecontroller.ConfigMapController + projectHelmCharts helmprojectcontroller.ProjectHelmChartController + projectHelmChartCache helmprojectcontroller.ProjectHelmChartCache + + projectRegistrationNamespaceApplyinator applier.Applyinator +} + +func Register( + ctx context.Context, + apply apply.Apply, + systemNamespace, valuesYaml, questionsYaml string, + opts common.Options, + namespaces corecontroller.NamespaceController, + namespaceCache corecontroller.NamespaceCache, + configmaps corecontroller.ConfigMapController, + projectHelmCharts helmprojectcontroller.ProjectHelmChartController, + projectHelmChartCache helmprojectcontroller.ProjectHelmChartCache, + dynamic dynamic.Interface, +) ProjectGetter { + + apply = apply.WithCacheTypes(configmaps) + + h := &handler{ + apply: apply, + systemNamespace: systemNamespace, + valuesYaml: valuesYaml, + questionsYaml: questionsYaml, + opts: opts, + systemNamespaceTracker: NewTracker(), + projectRegistrationNamespaceTracker: NewTracker(), + namespaces: namespaces, + namespaceCache: namespaceCache, + configmaps: configmaps, + projectHelmCharts: projectHelmCharts, + projectHelmChartCache: projectHelmChartCache, + } + + // note: this implements a workqueue that ensures that applies only happen once at a time even if a bunch of namespaces in a project + // are all re-enqueued at the exact same time + h.projectRegistrationNamespaceApplyinator = applier.NewApplyinator("project-registration-namespace-applyinator", h.applyProjectRegistrationNamespace, nil) + h.projectRegistrationNamespaceApplyinator.Run(ctx, 2) + + h.apply = h.addReconcilers(h.apply, dynamic) + + h.initResolvers(ctx) + + h.initIndexers() + + if len(opts.ProjectLabel) == 0 { + namespaces.OnChange(ctx, "on-namespace-change", h.OnSingleNamespaceChange) + + return NewSingleNamespaceProjectGetter(systemNamespace, opts.SystemNamespaces, namespaces) + } + + // the namespaceApply is only needed in a multi-namespace setup + // note: we never delete namespaces that are created since it's possible that the user may want to leave them around + // on remove, we only output a log that says that the user should clean it up and add an annotation that it is orphaned + h.namespaceApply = apply. + WithSetID("project-registration-namespace-applier"). + WithCacheTypes(namespaces). + WithNoDeleteGVK(namespaces.GroupVersionKind()) + + namespaces.OnChange(ctx, "on-namespace-change", h.OnMultiNamespaceChange) + + h.initSystemNamespaces(h.opts.SystemNamespaces, h.systemNamespaceTracker) + + err := h.initProjectRegistrationNamespaces() + if err != nil { + logrus.Fatal(err) + } + + return NewLabelBasedProjectGetter(h.opts.ProjectLabel, h.isProjectRegistrationNamespace, h.isSystemNamespace, h.namespaces) +} + +// Single Namespace Handler + +func (h *handler) OnSingleNamespaceChange(name string, namespace *corev1.Namespace) (*corev1.Namespace, error) { + if namespace.Name != h.systemNamespace { + // enqueue system namespace to ensure that rolebindings are updated + + logrus.Debugf("Enqueue system namespace to ensure that rolebindings are updated in OnSingleNamespaceChange: %s", h.systemNamespace) + h.namespaces.Enqueue(h.systemNamespace) + return namespace, nil + } + if namespace.DeletionTimestamp != nil { + // When a namespace gets deleted, the ConfigMap deployed in that namespace should also get deleted + // Therefore, we do not need to apply anything in this situation to avoid spamming logs with trying to apply + // a resource to a namespace that is being terminated + logrus.Debugf("OnSingleNamespaceChange %s has deletion timestamp of %v", namespace, namespace.DeletionTimestamp) + return namespace, nil + } + // Trigger applying the data for this projectRegistrationNamespace + var objs []runtime.Object + objs = append(objs, h.getConfigMap("", namespace)) + return namespace, h.configureApplyForNamespace(namespace).ApplyObjects(objs...) +} + +// Multiple Namespaces Handler + +func (h *handler) OnMultiNamespaceChange(name string, namespace *corev1.Namespace) (*corev1.Namespace, error) { + if namespace == nil { + logrus.Debugf("OnMultiNamespaceChange() called with no namespace.") + return namespace, nil + } + + switch { + // note: the check for a project registration namespace must happen before + // we check for whether it is a system namespace to address the scenario where + // the 'projectLabel: systemProjectLabelValue' is added to the project registration + // namespace, which will cause it to be ignored and left in the System Project unless + // we apply the ProjectRegistrationNamespace logic first. + case h.isProjectRegistrationNamespace(namespace): + err := h.enqueueProjectNamespaces(namespace) + if err != nil { + logrus.Debugf("Error in call to isProjectRegistrationNamespace() while enqueuing project namespace %s: %s", namespace, err) + return namespace, err + } + if namespace.DeletionTimestamp != nil { + logrus.Debugf("%s has deletion timestamp %v in isProjectRegistrationNamespace()", namespace, namespace.DeletionTimestamp) + h.projectRegistrationNamespaceTracker.Delete(namespace) + } + return namespace, nil + case h.isSystemNamespace(namespace): + // nothing to do, we always ignore system namespaces + logrus.Debugf("Ignoring system namespace: %s", namespace) + return namespace, nil + default: + err := h.applyProjectRegistrationNamespaceForNamespace(namespace) + if err != nil { + logrus.Debugf("Default error in isProjectRegistrationNamespace() %s: %s", namespace, err) + return namespace, err + } + return namespace, nil + } +} + +func (h *handler) enqueueProjectNamespaces(projectRegistrationNamespace *corev1.Namespace) error { + if projectRegistrationNamespace == nil { + return nil + } + // ensure that we are working with the projectRegistrationNamespace that we expect, not the one we found + expectedNamespace, exists := h.projectRegistrationNamespaceTracker.Get(projectRegistrationNamespace.Name) + if !exists { + // we no longer expect this namespace to exist, so don't enqueue any namespaces + return nil + } + // projectRegistrationNamespace was modified or removed, so we should re-enqueue any namespaces tied to it + projectID, ok := expectedNamespace.Labels[h.opts.ProjectLabel] + if !ok { + return fmt.Errorf("could not find project that projectRegistrationNamespace %s is tied to", projectRegistrationNamespace.Name) + } + projectNamespaces, err := h.namespaceCache.GetByIndex(NamespacesByProjectExcludingRegistrationID, projectID) + if err != nil { + return err + } + for _, ns := range projectNamespaces { + h.namespaces.Enqueue(ns.Name) + } + logrus.Debugf("ProjectRegistrationNamespace %s was modified or removed in call to enqueueProjectNamespaces(). Reenqueiing any namepsaced tied to it.", projectRegistrationNamespace.Name) + return nil +} + +func (h *handler) applyProjectRegistrationNamespaceForNamespace(namespace *corev1.Namespace) error { + // get the project ID and generate the namespace object to be applied + projectID, inProject := h.getProjectIDFromNamespaceLabels(namespace) + + // update the namespace with the appropriate label on it + err := h.updateNamespaceWithHelmOperatorProjectLabel(namespace, projectID, inProject) + if err != nil { + logrus.Debugf("Error updating namespace %s with %s labels", namespace, projectID) + return nil + } + if !inProject { + return nil + } + + logrus.Infof("Calling projectRegistrationNamespaceApplyinator for project %s", projectID) + // Note: why do we use an Applyinator.Apply here instead of just directly + // running h.applyProjectRegistrationNamespace? + // + // If we ran the logic for applying a Project Registration Namespace here, + // on every time a Project Namespace was re-enqueued, that would result in projects + // with a lot of namespaces all trying to run the exact same apply operation + // at the exact same time; however, the client-go workqueue implementation + // (which lasso controllers use under the hood as well) allow us to add the registration + // namespace to the queue with certain guarantees, namely this one that we need: + // + // * Stingy: a single item will not be processed multiple times concurrently, + // and if an item is added multiple times before it can be processed, it + // will only be processed once. + // + // This ensures that the actual application of a project registration namespace + // will only happen once, regardless of how many enqueues, which prevents us + // from hammering wrangler.Apply operations and forcing wrangler.Apply to engage + // in rate limiting (and output noisy logs) + h.projectRegistrationNamespaceApplyinator.Apply(projectID) + + return nil +} + +func (h *handler) applyProjectRegistrationNamespace(projectID string) error { + // Calculate whether to add the orphaned label + var isOrphaned bool + projectNamespaces, err := h.namespaceCache.GetByIndex(NamespacesByProjectExcludingRegistrationID, projectID) + if err != nil { + return err + } + var numNamespaces int + for _, ns := range projectNamespaces { + if ns.DeletionTimestamp != nil { + // ignore namespaces that are being deleted + continue + } + numNamespaces++ + } + if numNamespaces == 0 { + // add orphaned label and trigger a warning + isOrphaned = true + } + + // get the resources and validate them + projectRegistrationNamespace := h.getProjectRegistrationNamespace(projectID, isOrphaned) + // ensure that the projectRegistrationNamespace created from this projectID is valid + if len(projectRegistrationNamespace.Name) > 63 { + // ensure that we don't try to create a namespace with too big of a name + logrus.Errorf("could not apply namespace with name %s: name is above 63 characters", projectRegistrationNamespace.Name) + return nil + } + + // Trigger the apply and set the projectRegistrationNamespace + err = h.namespaceApply.ApplyObjects(projectRegistrationNamespace) + if err != nil { + return err + } + + // get the projectRegistrationNamespace after applying to get a valid object to pass in as the owner of the next apply + projectRegistrationNamespace, err = h.namespaces.Get(projectRegistrationNamespace.Name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("unable to get project registration namespace from cache after create: %s", err) + } + h.projectRegistrationNamespaceTracker.Set(projectRegistrationNamespace) + + if projectRegistrationNamespace.DeletionTimestamp != nil { + // When a namespace gets deleted, the ConfigMap deployed in that namespace and all ProjectHelmCharts should also get deleted + // Therefore, we do not need to apply anything in this situation to avoid spamming logs with trying to apply + // a resource to a namespace that is being terminated + // + // We expect this to be recalled when the project registration namespace is recreated anyways + return nil + } + + // Trigger applying the data for this projectRegistrationNamespace + var objs []runtime.Object + objs = append(objs, h.getConfigMap(projectID, projectRegistrationNamespace)) + err = h.configureApplyForNamespace(projectRegistrationNamespace).ApplyObjects(objs...) + if err != nil { + return err + } + + // ensure that all ProjectHelmCharts are re-enqueued within this projectRegistrationNamespace + err = h.enqueueProjectHelmChartsForNamespace(projectRegistrationNamespace) + if err != nil { + return fmt.Errorf("unable to re-enqueue ProjectHelmCharts on reconciling change to namespaces in project %s: %s", projectID, err) + } + + return nil +} + +func (h *handler) updateNamespaceWithHelmOperatorProjectLabel(namespace *corev1.Namespace, projectID string, inProject bool) error { + if namespace.DeletionTimestamp != nil { + // no need to update a namespace about to be deleted + return nil + } + if len(h.opts.ProjectReleaseLabelValue) == 0 { + // do nothing, this annotation is irrelevant unless we create release namespaces + return nil + } + if len(projectID) == 0 || !inProject { + // ensure that the HelmProjectOperatorProjectLabel is removed if added + if namespace.Labels == nil { + return nil + } + if _, ok := namespace.Labels[common.HelmProjectOperatorProjectLabel]; !ok { + return nil + } + namespaceCopy := namespace.DeepCopy() + delete(namespaceCopy.Labels, common.HelmProjectOperatorProjectLabel) + _, err := h.namespaces.Update(namespaceCopy) + if err != nil { + return err + } + } + + namespaceCopy := namespace.DeepCopy() + if namespaceCopy.Labels == nil { + namespaceCopy.Labels = map[string]string{} + } + currLabel, ok := namespaceCopy.Labels[common.HelmProjectOperatorProjectLabel] + if !ok || currLabel != projectID { + namespaceCopy.Labels[common.HelmProjectOperatorProjectLabel] = projectID + } + _, err := h.namespaces.Update(namespaceCopy) + if err != nil { + return err + } + return nil +} + +func (h *handler) isProjectRegistrationNamespace(namespace *corev1.Namespace) bool { + if namespace == nil { + return false + } + return h.projectRegistrationNamespaceTracker.Has(namespace.Name) +} + +func (h *handler) isSystemNamespace(namespace *corev1.Namespace) bool { + if namespace == nil { + return false + } + isTrackedSystemNamespace := h.systemNamespaceTracker.Has(namespace.Name) + if isTrackedSystemNamespace { + return true + } + + var systemProjectLabelValues []string + if len(h.opts.SystemProjectLabelValues) != 0 { + systemProjectLabelValues = append(systemProjectLabelValues, h.opts.SystemProjectLabelValues...) + } + if len(h.opts.ProjectReleaseLabelValue) != 0 { + systemProjectLabelValues = append(systemProjectLabelValues, h.opts.ProjectReleaseLabelValue) + } + projectID, inProject := h.getProjectIDFromNamespaceLabels(namespace) + if !inProject { + return false + } + for _, systemProjectLabelValue := range systemProjectLabelValues { + // check if labels indicate this is a system project + if projectID == systemProjectLabelValue { + return true + } + } + return false +} diff --git a/internal/helm-project-operator/pkg/controllers/namespace/getter.go b/internal/helm-project-operator/pkg/controllers/namespace/getter.go new file mode 100644 index 00000000..087e6f66 --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/namespace/getter.go @@ -0,0 +1,160 @@ +package namespace + +import ( + "fmt" + "sort" + + v1alpha1 "github.com/rancher/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1" + corecontroller "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// ProjectGetter allows you to get target namespaces based on a project and identify namespaces as special namespaces in a project +type ProjectGetter interface { + // IsProjectRegistrationNamespace returns whether to watch for ProjectHelmCharts in the provided namespace + IsProjectRegistrationNamespace(namespace *corev1.Namespace) bool + + // IsSystemNamespace returns whether the provided namespace is considered a system namespace + IsSystemNamespace(namespace *corev1.Namespace) bool + + // GetTargetProjectNamespaces returns the list of namespaces that should be targeted for a given ProjectHelmChart + // Any namespace returned by this should not be a project registration namespace or a system namespace + GetTargetProjectNamespaces(projectHelmChart *v1alpha1.ProjectHelmChart) ([]string, error) +} + +// Checker is a function that checks a namespace object and returns true or false +type Checker func(namespace *corev1.Namespace) bool + +// NewLabelBasedProjectGetter returns a ProjectGetter that gets target project namespaces that meet the following criteria: +// 1) Must have the same projectLabel value as the namespace where the ProjectHelmChart lives in +// 2) Must not be a project registration namespace +// 3) Must not be a system namespace +func NewLabelBasedProjectGetter( + projectLabel string, + isProjectRegistrationNamespace Checker, + isSystemNamespace Checker, + namespaces corecontroller.NamespaceController, +) ProjectGetter { + return &projectGetter{ + namespaces: namespaces, + + isProjectRegistrationNamespace: isProjectRegistrationNamespace, + isSystemNamespace: isSystemNamespace, + + getProjectNamespaces: func(projectHelmChart *v1alpha1.ProjectHelmChart) (*corev1.NamespaceList, error) { + // source of truth is the projectLabel pair that exists on the namespace that the ProjectHelmChart lives within + namespace, err := namespaces.Get(projectHelmChart.Namespace, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + // The projectHelmChart is not in a namespace that exists anymore, this implies it may have been deleted + // Therefore, there are no project namespaces associated with this ProjectHelmChart + return nil, nil + } + return nil, err + } + projectLabelValue, ok := namespace.Labels[projectLabel] + if !ok { + return nil, fmt.Errorf("could not find value of label %s in namespace %s", projectLabel, namespace.Name) + } + return namespaces.List(metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", projectLabel, projectLabelValue), + }) + }, + } +} + +// NewSingleNamespaceProjectGetter returns a ProjectGetter that gets target project namespaces that meet the following criteria: +// 1) Must match the labels provided on spec.projectNamespaceSelector of the projectHelmChart in question +// 2) Must not be the registration namespace +// 3) Must not be part of the provided systemNamespaces +func NewSingleNamespaceProjectGetter( + registrationNamespace string, + systemNamespaces []string, + namespaces corecontroller.NamespaceController, +) ProjectGetter { + isSystemNamespace := make(map[string]bool) + for _, ns := range systemNamespaces { + isSystemNamespace[ns] = true + } + return &projectGetter{ + namespaces: namespaces, + + isProjectRegistrationNamespace: func(namespace *corev1.Namespace) bool { + // only one registrationNamespace exists + return namespace.Name == registrationNamespace + }, + isSystemNamespace: func(namespace *corev1.Namespace) bool { + // only track explicit systemNamespaces + return isSystemNamespace[namespace.Name] + }, + + getProjectNamespaces: func(projectHelmChart *v1alpha1.ProjectHelmChart) (*corev1.NamespaceList, error) { + // source of truth is the ProjectHelmChart spec.projectNamespaceSelector + selector, err := metav1.LabelSelectorAsSelector(projectHelmChart.Spec.ProjectNamespaceSelector) + if err != nil { + return nil, err + } + // List does not support the ability to ask for specific namespaces + // based on a metav1.LabelSelector, so we get everything and then filter + namespaceList, err := namespaces.List(metav1.ListOptions{}) + if err != nil { + return nil, nil + } + if namespaceList == nil { + return nil, nil + } + var namespaces []corev1.Namespace + for _, ns := range namespaceList.Items { + if !selector.Matches(labels.Set(ns.Labels)) { + continue + } + namespaces = append(namespaces, ns) + } + namespaceList.Items = namespaces + return namespaceList, nil + }, + } +} + +type projectGetter struct { + namespaces corecontroller.NamespaceController + + isProjectRegistrationNamespace Checker + isSystemNamespace Checker + + getProjectNamespaces func(projectHelmChart *v1alpha1.ProjectHelmChart) (*corev1.NamespaceList, error) +} + +// IsProjectRegistrationNamespace returns whether to watch for ProjectHelmCharts in the provided namespace +func (g *projectGetter) IsProjectRegistrationNamespace(namespace *corev1.Namespace) bool { + return g.isProjectRegistrationNamespace(namespace) +} + +// IsSystemNamespace returns whether the provided namespace is considered a system namespace +func (g *projectGetter) IsSystemNamespace(namespace *corev1.Namespace) bool { + return g.isSystemNamespace(namespace) +} + +// GetTargetProjectNamespaces returns the list of namespaces that should be targeted for a given ProjectHelmChart +// Any namespace returned by this should not be a project registration namespace or a system namespace +func (g *projectGetter) GetTargetProjectNamespaces(projectHelmChart *v1alpha1.ProjectHelmChart) ([]string, error) { + projectNamespaceList, err := g.getProjectNamespaces(projectHelmChart) + if err != nil { + return nil, err + } + if projectNamespaceList == nil { + return nil, nil + } + var namespaces []string + for _, ns := range projectNamespaceList.Items { + if g.isProjectRegistrationNamespace(&ns) || g.isSystemNamespace(&ns) { + continue + } + namespaces = append(namespaces, ns.Name) + } + sort.Strings(namespaces) + return namespaces, nil +} diff --git a/internal/helm-project-operator/pkg/controllers/namespace/indexers.go b/internal/helm-project-operator/pkg/controllers/namespace/indexers.go new file mode 100644 index 00000000..754bd5df --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/namespace/indexers.go @@ -0,0 +1,41 @@ +package namespace + +import ( + "github.com/rancher/helm-project-operator/pkg/controllers/common" + corev1 "k8s.io/api/core/v1" +) + +const ( + // NamespacesByProjectExcludingRegistrationID is an index mapping namespaces to project that they belong into + // The index will omit any namespaces considered to be the Project Registration namespace or a system namespace + NamespacesByProjectExcludingRegistrationID = "helm.cattle.io/namespaces-by-project-id-excluding-registration" +) + +// initIndexers initializes indexers that allow for more efficient computations on related resources without relying on additional +// calls to be made to the Kubernetes API by referencing the cache instead +func (h *handler) initIndexers() { + h.namespaceCache.AddIndexer(NamespacesByProjectExcludingRegistrationID, h.namespaceToProjectIDExcludingRegistration) +} + +func (h *handler) namespaceToProjectIDExcludingRegistration(namespace *corev1.Namespace) ([]string, error) { + if namespace == nil { + return nil, nil + } + if h.isSystemNamespace(namespace) { + return nil, nil + } + if h.isProjectRegistrationNamespace(namespace) { + return nil, nil + } + if namespace.Labels[common.HelmProjectOperatedLabel] == "true" { + // always ignore Helm Project Operated namespaces since those are only + // to be scoped to namespaces that are project registration namespaces + return nil, nil + } + projectID, inProject := h.getProjectIDFromNamespaceLabels(namespace) + if !inProject { + // nothing to do + return nil, nil + } + return []string{projectID}, nil +} diff --git a/internal/helm-project-operator/pkg/controllers/namespace/namespaces.go b/internal/helm-project-operator/pkg/controllers/namespace/namespaces.go new file mode 100644 index 00000000..217f5639 --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/namespace/namespaces.go @@ -0,0 +1,48 @@ +package namespace + +import ( + "fmt" + + "github.com/sirupsen/logrus" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// initSystemNamespaces initializes all System Namespaces on the Tracker +func (h *handler) initSystemNamespaces(systemNamespaceList []string, systemNamespaceTracker Tracker) { + for _, namespace := range systemNamespaceList { + systemNamespaceTracker.Set(&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}) + } +} + +// initProjectRegistrationNamespaces initializes all Project Registration Namespaces on the Tracker +// It also automatically triggers the creation of the Project Registration Namespaces if necessary +func (h *handler) initProjectRegistrationNamespaces() error { + namespaceList, err := h.namespaces.List(metav1.ListOptions{}) + if err != nil { + return fmt.Errorf("unable to list namespaces to enqueue all Helm charts: %s", err) + } + if namespaceList != nil { + logrus.Infof("Identifying and registering projectRegistrationNamespaces...") + // trigger the OnChange events for all namespaces before returning on a register + // + // this ensures that registration will create projectRegistrationNamespaces and + // have isProjectRegistration and isSystemNamespace up to sync before it provides + // the ProjectGetter interface to other controllers that need it. + // + // Q: Why don't we use Enqueue here? + // + // Enqueue will add it to the workqueue but there's no guarantee the namespace's processing + // will happen before this function exits, which is what we need to guarantee here. + // As a result, we explicitly call OnChange here to force the apply to happen and wait for it to finish + for _, ns := range namespaceList.Items { + _, err := h.OnMultiNamespaceChange(ns.Name, &ns) + if err != nil { + // encountered some error, just fail to start + // Possible TODO: Perhaps we should add a backoff retry here? + return fmt.Errorf("unable to initialize projectRegistrationNamespaces before starting other handlers that utilize ProjectGetter: %s", err) + } + } + } + return nil +} diff --git a/internal/helm-project-operator/pkg/controllers/namespace/reconcilers.go b/internal/helm-project-operator/pkg/controllers/namespace/reconcilers.go new file mode 100644 index 00000000..1d816c5b --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/namespace/reconcilers.go @@ -0,0 +1,59 @@ +package namespace + +import ( + "context" + + "github.com/rancher/wrangler/v3/pkg/apply" + "github.com/rancher/wrangler/v3/pkg/unstructured" + "github.com/sirupsen/logrus" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/dynamic" +) + +// addReconcilers registers reconcilers on the apply object that configure how it reconciles changes to specific resources +func (h *handler) addReconcilers(apply apply.Apply, dynamic dynamic.Interface) apply.Apply { + // force recreate configmaps since configmaps can have errors on updates + // for example, if a configmap has been modified to have immutable set to true, it will encounter an error + // another example is if a user tries to switch a key from data to binaryData or vice versa; in this case, + // the k8s API will throw an error due to trying to move a field across locations + r := forceRecreator{ + NamespaceableResourceInterface: dynamic.Resource(corev1.SchemeGroupVersion.WithResource("configmaps")), + } + apply = apply.WithReconciler(corev1.SchemeGroupVersion.WithKind("ConfigMap"), r.deleteAndReplace) + + logrus.Infof("Adding reconcilers on the apply object %s", apply) + return apply +} + +// forceRecreator is a wrapper on the dynamic.NamespaceableResourceInterface that implements an apply.Reconciler +// that uses the interface to delete and recreate a dynamic object on reconcile +type forceRecreator struct { + dynamic.NamespaceableResourceInterface + + deleteOptions metav1.DeleteOptions + createOptions metav1.CreateOptions +} + +func (r *forceRecreator) deleteAndReplace(oldObj runtime.Object, newObj runtime.Object) (bool, error) { + meta, err := meta.Accessor(oldObj) + if err != nil { + return false, err + } + nsed := r.NamespaceableResourceInterface.Namespace(meta.GetNamespace()) + // convert newObj to unstructured + uNewObj, err := unstructured.ToUnstructured(newObj) + if err != nil { + return false, err + } + // perform delete and recreate + if err := nsed.Delete(context.TODO(), meta.GetName(), r.deleteOptions); err != nil { + return false, err + } + if _, err := nsed.Create(context.TODO(), uNewObj, r.createOptions); err != nil { + return false, err + } + return true, nil +} diff --git a/internal/helm-project-operator/pkg/controllers/namespace/resolvers.go b/internal/helm-project-operator/pkg/controllers/namespace/resolvers.go new file mode 100644 index 00000000..11d814de --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/namespace/resolvers.go @@ -0,0 +1,45 @@ +package namespace + +import ( + "context" + + "github.com/rancher/wrangler/v3/pkg/relatedresource" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// Note: each resource created in resources.go should have a resolver handler here +// The only exception is namespaces since those are handled by the main controller OnChange + +// initResolvers initializes resolvers that need to be set to watch child resources of Project Registration Namespaces +func (h *handler) initResolvers(ctx context.Context) { + relatedresource.WatchClusterScoped( + ctx, "watch-project-registration-namespace-data", h.resolveProjectRegistrationNamespaceData, h.namespaces, + h.configmaps, + ) +} + +func (h *handler) resolveProjectRegistrationNamespaceData(namespace, name string, obj runtime.Object) ([]relatedresource.Key, error) { + if !h.projectRegistrationNamespaceTracker.Has(namespace) { + // no longer need to watch for changes to resources in this namespace since it is no longer tracked + // if the namespace ever becomes unorphaned, we can track it again + return nil, nil + } + if obj == nil { + return nil, nil + } + if configmap, ok := obj.(*corev1.ConfigMap); ok { + return h.resolveConfigMap(namespace, name, configmap) + } + return nil, nil +} + +func (h *handler) resolveConfigMap(namespace, name string, configmap *corev1.ConfigMap) ([]relatedresource.Key, error) { + // check if name matches + if name == h.getConfigMapName() { + return []relatedresource.Key{{ + Name: namespace, + }}, nil + } + return nil, nil +} diff --git a/internal/helm-project-operator/pkg/controllers/namespace/resources.go b/internal/helm-project-operator/pkg/controllers/namespace/resources.go new file mode 100644 index 00000000..62441644 --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/namespace/resources.go @@ -0,0 +1,48 @@ +package namespace + +import ( + "fmt" + "strings" + + "github.com/rancher/helm-project-operator/pkg/controllers/common" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Note: each resource created here should have a resolver set in resolvers.go +// The only exception is namespaces since those are handled by the main controller OnChange + +// getProjectRegistrationNamespace returns the namespace created on behalf of a new Project that has been identified based on +// unique values observed for all namespaces with the label h.opts.ProjectLabel +func (h *handler) getProjectRegistrationNamespace(projectID string, isOrphaned bool) *corev1.Namespace { + if len(h.opts.ProjectLabel) == 0 { + return nil + } + return &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(common.ProjectRegistrationNamespaceFmt, projectID), + Annotations: common.GetProjectNamespaceAnnotations(projectID, h.opts.ProjectLabel, h.opts.ClusterID), + Labels: common.GetProjectNamespaceLabels(projectID, h.opts.ProjectLabel, projectID, isOrphaned), + }, + } +} + +// getConfigMap returns the values.yaml and questions.yaml ConfigMap that is expected to be created in all Project Registration Namespaces +func (h *handler) getConfigMap(projectID string, namespace *corev1.Namespace) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: h.getConfigMapName(), + Namespace: namespace.Name, + Labels: common.GetCommonLabels(projectID), + }, + Data: map[string]string{ + "values.yaml": h.valuesYaml, + "questions.yaml": h.questionsYaml, + }, + } +} + +// getConfigMap name returns the name of the ConfigMap to be deployed in all Project Registration Namespaces +func (h *handler) getConfigMapName() string { + return strings.ReplaceAll(h.opts.HelmAPIVersion, "/", ".") +} diff --git a/internal/helm-project-operator/pkg/controllers/namespace/tracker.go b/internal/helm-project-operator/pkg/controllers/namespace/tracker.go new file mode 100644 index 00000000..4df1558d --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/namespace/tracker.go @@ -0,0 +1,72 @@ +package namespace + +import ( + "sync" + + corev1 "k8s.io/api/core/v1" +) + +// Getter gets a namespace that has been stored in a register +type Getter interface { + // Has implies that the namespace has been registered + Has(name string) bool + + // Get retrieves a registered namespace + Get(name string) (*corev1.Namespace, bool) +} + +// Tracker can store namespace references and get them +type Tracker interface { + Getter + + // Set registers a namespace + Set(namespace *corev1.Namespace) + + // Delete unregisters a namespace + Delete(namespace *corev1.Namespace) +} + +// NewTracker returns a new tracker that can track and get namespaces +func NewTracker() Tracker { + return &namespaceTracker{ + namespaceMap: make(map[string]*corev1.Namespace), + } +} + +type namespaceTracker struct { + namespaceMap map[string]*corev1.Namespace + mapLock sync.RWMutex +} + +// Has implies that the namespace has been registered +func (r *namespaceTracker) Has(name string) bool { + r.mapLock.RLock() + defer r.mapLock.RUnlock() + _, exists := r.namespaceMap[name] + return exists +} + +// Get retrieves a registered namespace +func (r *namespaceTracker) Get(name string) (*corev1.Namespace, bool) { + r.mapLock.RLock() + defer r.mapLock.RUnlock() + ns, exists := r.namespaceMap[name] + if !exists { + return nil, false + } + return ns, true +} + +// Set registers a namespace +func (r *namespaceTracker) Set(namespace *corev1.Namespace) { + r.mapLock.Lock() + defer r.mapLock.Unlock() + r.namespaceMap[namespace.Name] = namespace +} + +// Delete unregisters a namespace +func (r *namespaceTracker) Delete(namespace *corev1.Namespace) { + r.mapLock.Lock() + defer r.mapLock.Unlock() + delete(r.namespaceMap, namespace.Name) +} diff --git a/internal/helm-project-operator/pkg/controllers/namespace/utils.go b/internal/helm-project-operator/pkg/controllers/namespace/utils.go new file mode 100644 index 00000000..e9182b02 --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/namespace/utils.go @@ -0,0 +1,45 @@ +package namespace + +import ( + "fmt" + + "github.com/rancher/wrangler/v3/pkg/apply" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// configureApplyForNamespace configures the apply to treat the provided namespace as an owner +func (h *handler) configureApplyForNamespace(namespace *corev1.Namespace) apply.Apply { + return h.apply. + WithOwner(namespace). + // Why do we need the release name? + // To ensure that we don't override the set created by another instance of the Project Operator + // running under a different release name operating on the same project registration namespace + WithSetID(fmt.Sprintf("%s-%s-data", namespace.Name, h.opts.ReleaseName)) +} + +// getProjectIDFromNamespaceLabels returns projectIDs based on the label on the project +func (h *handler) getProjectIDFromNamespaceLabels(namespace *corev1.Namespace) (string, bool) { + if len(h.opts.ProjectLabel) == 0 { + // nothing to do, namespaces are not project scoped + return "", false + } + labels := namespace.GetLabels() + if labels == nil { + return "", false + } + projectID, namespaceInProject := labels[h.opts.ProjectLabel] + return projectID, namespaceInProject +} + +// enqueueProjectHelmChartsForNamespace simply enqueues all ProjectHelmCharts in a namespace +func (h *handler) enqueueProjectHelmChartsForNamespace(namespace *corev1.Namespace) error { + projectHelmCharts, err := h.projectHelmChartCache.List(namespace.Name, labels.Everything()) + if err != nil { + return err + } + for _, projectHelmChart := range projectHelmCharts { + h.projectHelmCharts.Enqueue(projectHelmChart.Namespace, projectHelmChart.Name) + } + return nil +} diff --git a/internal/helm-project-operator/pkg/controllers/parse.go b/internal/helm-project-operator/pkg/controllers/parse.go new file mode 100644 index 00000000..ee2347ab --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/parse.go @@ -0,0 +1,64 @@ +package controllers + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "encoding/base64" + "errors" + "fmt" + "io" + "os" + "strings" +) + +// parseValuesAndQuestions parses the base64TgzChart and emits the values.yaml and questions.yaml contained within it +// If values.yaml or questions.yaml are not specified, it will return an empty string for each +func parseValuesAndQuestions(base64TgzChart string) (string, string, error) { + tgzChartBytes, err := base64.StdEncoding.DecodeString(base64TgzChart) + if err != nil { + return "", "", fmt.Errorf("unable to decode base64TgzChart to tgzChart: %s", err) + } + gzipReader, err := gzip.NewReader(bytes.NewReader(tgzChartBytes)) + if err != nil { + return "", "", fmt.Errorf("unable to create gzipReader to read from base64TgzChart: %s", err) + } + defer gzipReader.Close() + tarReader := tar.NewReader(gzipReader) + var valuesYamlBuffer, questionsYamlBuffer bytes.Buffer + var foundValuesYaml, foundQuestionsYaml bool + for { + h, err := tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + return "", "", err + } + if h.Typeflag != tar.TypeReg { + continue + } + splitName := strings.SplitN(h.Name, string(os.PathSeparator), 2) + nameWithoutRootDir := splitName[0] + if len(splitName) > 1 { + nameWithoutRootDir = splitName[1] + } + if nameWithoutRootDir == "values.yaml" || nameWithoutRootDir == "values.yml" { + if foundValuesYaml { + // multiple values.yaml + return "", "", errors.New("multiple values.yaml or values.yml found in base64TgzChart provided") + } + foundValuesYaml = true + io.Copy(&valuesYamlBuffer, tarReader) + } + if nameWithoutRootDir == "questions.yaml" || nameWithoutRootDir == "questions.yml" { + if foundQuestionsYaml { + // multiple values.yaml + return "", "", errors.New("multiple questions.yaml or questions.yml found in base64TgzChart provided") + } + foundQuestionsYaml = true + io.Copy(&questionsYamlBuffer, tarReader) + } + } + return valuesYamlBuffer.String(), questionsYamlBuffer.String(), nil +} diff --git a/internal/helm-project-operator/pkg/controllers/project/cleanup.go b/internal/helm-project-operator/pkg/controllers/project/cleanup.go new file mode 100644 index 00000000..2602ac45 --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/project/cleanup.go @@ -0,0 +1,53 @@ +package project + +import ( + "fmt" + + "github.com/rancher/helm-project-operator/pkg/controllers/common" + "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// initRemoveCleanupLabels removes cleanup labels from all ProjectHelmCharts targeted by this operator +// This gets applied once on startup +func (h *handler) initRemoveCleanupLabels() error { + namespaceList, err := h.namespaces.List(metav1.ListOptions{}) + if err != nil { + return fmt.Errorf("unable to list namespaces to remove cleanup label from all ProjectHelmCharts") + } + if namespaceList == nil { + return nil + } + logrus.Infof("Removing cleanup label from all registered ProjectHelmCharts...") + // ensure all ProjectHelmCharts in every namespace no longer have the cleanup label on them + for _, ns := range namespaceList.Items { + projectHelmChartList, err := h.projectHelmCharts.List(ns.Name, metav1.ListOptions{}) + if err != nil { + return fmt.Errorf("unable to list ProjectHelmCharts in namespace %s to remove cleanup label", ns.Name) + } + if projectHelmChartList == nil { + continue + } + for _, projectHelmChart := range projectHelmChartList.Items { + shouldManage := h.shouldManage(&projectHelmChart) + if !shouldManage { + // not a valid ProjectHelmChart for this operator + continue + } + if projectHelmChart.Labels == nil { + continue + } + _, ok := projectHelmChart.Labels[common.HelmProjectOperatedCleanupLabel] + if !ok { + continue + } + projectHelmChartCopy := projectHelmChart.DeepCopy() + delete(projectHelmChartCopy.Labels, common.HelmProjectOperatedCleanupLabel) + _, err = h.projectHelmCharts.Update(projectHelmChartCopy) + if err != nil { + return fmt.Errorf("unable to remove cleanup label from ProjectHelmCharts %s/%s", projectHelmChart.Namespace, projectHelmChart.Name) + } + } + } + return nil +} diff --git a/internal/helm-project-operator/pkg/controllers/project/controller.go b/internal/helm-project-operator/pkg/controllers/project/controller.go new file mode 100644 index 00000000..3c25e91d --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/project/controller.go @@ -0,0 +1,354 @@ +package project + +import ( + "context" + "fmt" + + "github.com/k3s-io/helm-controller/pkg/controllers/chart" + k3shelmcontroller "github.com/k3s-io/helm-controller/pkg/generated/controllers/helm.cattle.io/v1" + helmlockercontroller "github.com/rancher/helm-locker/pkg/generated/controllers/helm.cattle.io/v1alpha1" + v1alpha1 "github.com/rancher/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1" + "github.com/rancher/helm-project-operator/pkg/controllers/common" + "github.com/rancher/helm-project-operator/pkg/controllers/namespace" + helmprojectcontroller "github.com/rancher/helm-project-operator/pkg/generated/controllers/helm.cattle.io/v1alpha1" + "github.com/rancher/helm-project-operator/pkg/remove" + "github.com/rancher/wrangler/v3/pkg/apply" + corecontroller "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1" + rbaccontroller "github.com/rancher/wrangler/v3/pkg/generated/controllers/rbac/v1" + "github.com/rancher/wrangler/v3/pkg/generic" + "github.com/sirupsen/logrus" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" +) + +var ( + DefaultJobImage = chart.DefaultJobImage +) + +type handler struct { + systemNamespace string + opts common.Options + valuesOverride v1alpha1.GenericMap + apply apply.Apply + projectHelmCharts helmprojectcontroller.ProjectHelmChartController + projectHelmChartCache helmprojectcontroller.ProjectHelmChartCache + configmaps corecontroller.ConfigMapController + configmapCache corecontroller.ConfigMapCache + roles rbaccontroller.RoleController + roleCache rbaccontroller.RoleCache + clusterrolebindings rbaccontroller.ClusterRoleBindingController + clusterrolebindingCache rbaccontroller.ClusterRoleBindingCache + helmCharts k3shelmcontroller.HelmChartController + helmReleases helmlockercontroller.HelmReleaseController + namespaces corecontroller.NamespaceController + namespaceCache corecontroller.NamespaceCache + rolebindings rbaccontroller.RoleBindingController + rolebindingCache rbaccontroller.RoleBindingCache + projectGetter namespace.ProjectGetter +} + +func Register( + ctx context.Context, + systemNamespace string, + opts common.Options, + valuesOverride v1alpha1.GenericMap, + apply apply.Apply, + projectHelmCharts helmprojectcontroller.ProjectHelmChartController, + projectHelmChartCache helmprojectcontroller.ProjectHelmChartCache, + configmaps corecontroller.ConfigMapController, + configmapCache corecontroller.ConfigMapCache, + roles rbaccontroller.RoleController, + roleCache rbaccontroller.RoleCache, + clusterrolebindings rbaccontroller.ClusterRoleBindingController, + clusterrolebindingCache rbaccontroller.ClusterRoleBindingCache, + helmCharts k3shelmcontroller.HelmChartController, + helmReleases helmlockercontroller.HelmReleaseController, + namespaces corecontroller.NamespaceController, + namespaceCache corecontroller.NamespaceCache, + rolebindings rbaccontroller.RoleBindingController, + rolebindingCache rbaccontroller.RoleBindingCache, + projectGetter namespace.ProjectGetter, +) { + + apply = apply. + // Why do we need the release name? + // To ensure that we don't override the set created by another instance of the Project Operator + // running under a different release name operating on the same project registration namespace + WithSetID(fmt.Sprintf("%s-project-helm-chart-applier", opts.ReleaseName)). + WithCacheTypes( + helmCharts, + helmReleases, + namespaces, + rolebindings). + WithNoDeleteGVK(namespaces.GroupVersionKind()) + + h := &handler{ + systemNamespace: systemNamespace, + opts: opts, + valuesOverride: valuesOverride, + apply: apply, + projectHelmCharts: projectHelmCharts, + projectHelmChartCache: projectHelmChartCache, + configmaps: configmaps, + configmapCache: configmapCache, + roles: roles, + clusterrolebindings: clusterrolebindings, + clusterrolebindingCache: clusterrolebindingCache, + roleCache: roleCache, + helmCharts: helmCharts, + helmReleases: helmReleases, + namespaces: namespaces, + namespaceCache: namespaceCache, + rolebindings: rolebindings, + rolebindingCache: rolebindingCache, + projectGetter: projectGetter, + } + + h.initIndexers() + + h.initResolvers(ctx) + + // Why do we need to add the managedBy string to the generatingHandlerName? + // + // By default, generating handlers use the name of the controller as the set ID for the wrangler.apply operation + // Therefore, if multiple iterations of the helm-controller are using the same set ID, they will try to overwrite each other's + // resources since each controller will detect the other's set as resources that need to be cleaned up to apply the new set + // + // To resolve this, we simply prefix the provided managedBy string to the generatingHandler controller's name only to ensure that the + // set ID specified will only target this particular controller + generatingHandlerName := fmt.Sprintf("%s-project-helm-chart-registration", opts.ControllerName) + helmprojectcontroller.RegisterProjectHelmChartGeneratingHandler(ctx, + projectHelmCharts, + apply, + "", + generatingHandlerName, + h.OnChange, + &generic.GeneratingHandlerOptions{ + AllowClusterScoped: true, + }) + + remove.RegisterScopedOnRemoveHandler(ctx, projectHelmCharts, "on-project-helm-chart-remove", + func(key string, obj runtime.Object) (bool, error) { + if obj == nil { + return false, nil + } + projectHelmChart, ok := obj.(*v1alpha1.ProjectHelmChart) + if !ok { + return false, nil + } + return h.shouldManage(projectHelmChart), nil + }, + helmprojectcontroller.FromProjectHelmChartHandlerToHandler(h.OnRemove), + ) + + err := h.initRemoveCleanupLabels() + if err != nil { + logrus.Fatal(err) + } +} + +func (h *handler) shouldManage(projectHelmChart *v1alpha1.ProjectHelmChart) bool { + if projectHelmChart == nil { + return false + } + namespace, err := h.namespaceCache.Get(projectHelmChart.Namespace) + if err != nil { + // If the namespace that the projectHelmChart resides in does not exist, it shouldn't be managed + // + // Note: we know that this error would only happen if the namespace is not found since the only valid error returned from this + // call is errors.NewNotFound(c.resource, name) + return false + } + isProjectRegistrationNamespace := h.projectGetter.IsProjectRegistrationNamespace(namespace) + if !isProjectRegistrationNamespace { + // only watching resources in registered namespaces + return false + } + if projectHelmChart.Spec.HelmAPIVersion != h.opts.HelmAPIVersion { + // only watch resources with the HelmAPIVersion this controller was configured with + return false + } + return true +} + +func (h *handler) OnChange(projectHelmChart *v1alpha1.ProjectHelmChart, projectHelmChartStatus v1alpha1.ProjectHelmChartStatus) ([]runtime.Object, v1alpha1.ProjectHelmChartStatus, error) { + var objs []runtime.Object + + // initial checks to see if we should handle this + shouldManage := h.shouldManage(projectHelmChart) + if !shouldManage { + logrus.Infof("should not manage project helm-chart %s%s", projectHelmChart.Namespace, projectHelmChart.Name) + return nil, projectHelmChartStatus, nil + } + if projectHelmChart.DeletionTimestamp != nil { + return nil, projectHelmChartStatus, nil + } + + // handle charts with cleanup label + if common.HasCleanupLabel(projectHelmChart) { + projectHelmChartStatus = h.getCleanupStatus(projectHelmChart, projectHelmChartStatus) + logrus.Infof("Cleaning up HelmChart and HelmRelease for ProjectHelmChart %s/%s", projectHelmChart.Namespace, projectHelmChart.Name) + return nil, projectHelmChartStatus, nil + } + + // get information about the projectHelmChart + projectID, err := h.getProjectID(projectHelmChart) + if err != nil { + logrus.Errorf("failed to get project id from project helm chart : %s", err) + return nil, projectHelmChartStatus, err + } + releaseNamespace, releaseName := h.getReleaseNamespaceAndName(projectHelmChart) + + // check if the releaseName is already tracked by another ProjectHelmChart + projectHelmCharts, err := h.projectHelmChartCache.GetByIndex(ProjectHelmChartByReleaseName, releaseName) + if err != nil { + logrus.Errorf("unable to get ProjectHelmCharts to verify if release is already tracked %s", err) + return nil, projectHelmChartStatus, fmt.Errorf("unable to get ProjectHelmCharts to verify if release is already tracked: %s", err) + } + for _, conflictingProjectHelmChart := range projectHelmCharts { + if conflictingProjectHelmChart == nil { + continue + } + if projectHelmChart.Name == conflictingProjectHelmChart.Name && projectHelmChart.Namespace == conflictingProjectHelmChart.Namespace { + logrus.Info("conflicting ProjectHelmChart is the same as we have found") + // looking at the same projectHelmChart that we have at hand + continue + } + if len(conflictingProjectHelmChart.Status.Status) == 0 { + // the other ProjectHelmChart hasn't been processed yet, so let it fail out whenever it is processed + logrus.Info("ProjectHelmChart hasn't been processed, delegating processing to whoever manages it?") + continue + } + if conflictingProjectHelmChart.Status.Status == "UnableToCreateHelmRelease" { + logrus.Infof("conflicting ProjectHelmChart status failed to deploy, continuing with given version") + // the other ProjectHelmChart is the one that will not be able to progress, so we can continue to update this one + continue + } + // we have found another ProjectHelmChart that already exists and is tracking this release with some non-conflicting status + err = fmt.Errorf( + "ProjectHelmChart %s/%s already tracks release %s/%s", + conflictingProjectHelmChart.Namespace, conflictingProjectHelmChart.Name, + releaseName, releaseNamespace, + ) + logrus.Error(err.Error()) + projectHelmChartStatus = h.getUnableToCreateHelmReleaseStatus(projectHelmChart, projectHelmChartStatus, err) + return nil, projectHelmChartStatus, nil + } + + // set basic statuses + projectHelmChartStatus.SystemNamespace = h.systemNamespace + projectHelmChartStatus.ReleaseNamespace = releaseNamespace + projectHelmChartStatus.ReleaseName = releaseName + + // gather target project namespaces + targetProjectNamespaces, err := h.projectGetter.GetTargetProjectNamespaces(projectHelmChart) + if err != nil { + logrus.Errorf("unable to find project namespaces to deploy ProjectHelmChart: %s", err) + return nil, projectHelmChartStatus, fmt.Errorf("unable to find project namespaces to deploy ProjectHelmChart: %s", err) + } + if len(targetProjectNamespaces) == 0 { + projectReleaseNamespace := h.getProjectReleaseNamespace(projectID, true, projectHelmChart) + if projectReleaseNamespace != nil { + objs = append(objs, projectReleaseNamespace) + } + projectHelmChartStatus = h.getNoTargetNamespacesStatus(projectHelmChart, projectHelmChartStatus) + return objs, projectHelmChartStatus, nil + } + + if releaseNamespace != h.systemNamespace && releaseNamespace != projectHelmChart.Namespace { + // need to add release namespace to list of objects to be created + projectReleaseNamespace := h.getProjectReleaseNamespace(projectID, false, projectHelmChart) + objs = append(objs, projectReleaseNamespace) + // need to add auto-generated release namespace to target namespaces + targetProjectNamespaces = append(targetProjectNamespaces, releaseNamespace) + } + projectHelmChartStatus.TargetNamespaces = targetProjectNamespaces + + // get values.yaml from ProjectHelmChart spec and default overrides + values := h.getValues(projectHelmChart, projectID, targetProjectNamespaces) + valuesContentBytes, err := values.ToYAML() + if err != nil { + err = fmt.Errorf("unable to marshall spec.values: %s", err) + projectHelmChartStatus = h.getValuesParseErrorStatus(projectHelmChart, projectHelmChartStatus, err) + return nil, projectHelmChartStatus, nil + } + + ns, err := h.namespaceCache.Get(releaseNamespace) + if ns == nil || apierrors.IsNotFound(err) { + // The release namespace does not exist yet, create it and leave the status as UnableToCreateHelmRelease + // + // Note: since we have a resolver that watches for the project release namespace, this handler will get re-enqueued + // + // Note: the reason why we need to do this check is to ensure that deleting a project release namespace will delete + // and recreate the HelmChart and HelmRelease resources, which will ensure that the HelmChart gets re-installed onto + // the newly created namespace. Without this, a deleted release namespace will always have ProjectHelmCharts stuck in + // WaitingForDashboardValues since the underlying helm release will never be recreated + err = fmt.Errorf("cannot find release namespace %s to deploy release", releaseNamespace) + projectHelmChartStatus = h.getUnableToCreateHelmReleaseStatus(projectHelmChart, projectHelmChartStatus, err) + return objs, projectHelmChartStatus, nil + } else if err != nil { + return nil, projectHelmChartStatus, err + } + + // get rolebindings that need to be created in release namespace + k8sRolesToRoleRefs, err := h.getSubjectRoleToRoleRefsFromRoles(projectHelmChart) + if err != nil { + return nil, projectHelmChartStatus, fmt.Errorf("unable to get release roles from project release namespace %s for %s/%s: %s", releaseNamespace, projectHelmChart.Namespace, projectHelmChart.Name, err) + } + k8sRolesToSubjects, err := h.getSubjectRoleToSubjectsFromBindings(projectHelmChart) + if err != nil { + return nil, projectHelmChartStatus, fmt.Errorf("unable to get rolebindings to default project operator roles from project registration namespace %s for %s/%s: %s", projectHelmChart.Namespace, projectHelmChart.Namespace, projectHelmChart.Name, err) + } + objs = append(objs, + h.getRoleBindings(projectID, k8sRolesToRoleRefs, k8sRolesToSubjects, projectHelmChart)..., + ) + + // append the helm chart and helm release + objs = append(objs, + h.getHelmChart(projectID, string(valuesContentBytes), projectHelmChart), + h.getHelmRelease(projectID, projectHelmChart), + ) + + // get dashboard values if available + dashboardValues, err := h.getDashboardValuesFromConfigmaps(projectHelmChart) + if err != nil { + return nil, projectHelmChartStatus, fmt.Errorf("unable to get dashboard values from status ConfigMaps: %s", err) + } + if len(dashboardValues) == 0 { + projectHelmChartStatus = h.getWaitingForDashboardValuesStatus(projectHelmChart, projectHelmChartStatus) + } else { + projectHelmChartStatus.DashboardValues = dashboardValues + projectHelmChartStatus = h.getDeployedStatus(projectHelmChart, projectHelmChartStatus) + } + return objs, projectHelmChartStatus, nil +} + +func (h *handler) OnRemove(key string, projectHelmChart *v1alpha1.ProjectHelmChart) (*v1alpha1.ProjectHelmChart, error) { + if projectHelmChart == nil { + return nil, nil + } + + // get information about the projectHelmChart + projectID, err := h.getProjectID(projectHelmChart) + if err != nil { + return projectHelmChart, err + } + + // Get orphaned release namsepace and apply it; if another ProjectHelmChart exists in this namespace, it will automatically remove + // the orphaned label on enqueuing the namespace since that will enqueue all ProjectHelmCharts associated with it + projectReleaseNamespace := h.getProjectReleaseNamespace(projectID, true, projectHelmChart) + if projectReleaseNamespace == nil { + // nothing to be done since this operator does not create project release namespaces + return projectHelmChart, nil + } + + // Why aren't we modifying the set ID or owner here? + // Since this applier runs without deleting objects whose GVKs indicate that they are namespaces, + // we don't have to worry about another controller using this same set ID (e.g. another Project Operator) + // that will delete this projectReleaseNamespace on seeing it + err = h.apply.ApplyObjects(projectReleaseNamespace) + if err != nil { + return projectHelmChart, fmt.Errorf("unable to add orphaned annotation to project release namespace %s", projectReleaseNamespace.Name) + } + return projectHelmChart, nil +} diff --git a/internal/helm-project-operator/pkg/controllers/project/indexers.go b/internal/helm-project-operator/pkg/controllers/project/indexers.go new file mode 100644 index 00000000..1ef9cfcb --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/project/indexers.go @@ -0,0 +1,141 @@ +package project + +import ( + "fmt" + + v1alpha1 "github.com/rancher/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1" + "github.com/rancher/helm-project-operator/pkg/controllers/common" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" +) + +// All namespaces +const ( + // ProjectHelmChartByReleaseName identifies a ProjectHelmChart by the underlying Helm release it is tied to + ProjectHelmChartByReleaseName = "helm.cattle.io/project-helm-chart-by-release-name" +) + +// Registration namespaces only +const ( + // RoleBindingInRegistrationNamespaceByRoleRef identifies the set of RoleBindings in a registration namespace + // that are tied to specific RoleRefs that need to be watched by the operator + RoleBindingInRegistrationNamespaceByRoleRef = "helm.cattle.io/role-binding-in-registration-ns-by-role-ref" + + // ClusterRoleBindingByRoleRef identifies the set of ClusterRoleBindings that are tied to RoleRefs that need + // to be watched by the operator + ClusterRoleBindingByRoleRef = "helm.cattle.io/cluster-role-binding-by-role-ref" + + // BindingReferencesDefaultOperatorRole is the value of the both of the above indices when a ClusterRoleBinding or RoleBinding + // is tied to a RoleRef that matches a default ClusterRole that is watched by the operator to create admin, edit, or view RoleBindings + // in the Project Release Namespace + BindingReferencesDefaultOperatorRole = "bound-to-default-role" +) + +// NamespacedBindingReferencesDefaultOperatorRole is the index used to mark a RoleBinding as one that targets +// one of the default operator roles (supplied in RuntimeOptions under AdminClusterRole, EditClusterRole, and ViewClusterRole) +func NamespacedBindingReferencesDefaultOperatorRole(namespace string) string { + return fmt.Sprintf("%s/%s", namespace, BindingReferencesDefaultOperatorRole) +} + +// Release namespaces only +const ( + // RoleInReleaseNamespaceByReleaseNamespaceName identifies a Role in a release namespace that needs to have RBAC synced + // on changes to RoleBindings in the Project Registration Namespace or ClusterRoleBindings. + // The value of this will be the namespace and name of the Helm release that it is for. + RoleInReleaseNamespaceByReleaseNamespaceName = "helm.cattle.io/role-in-release-ns-by-release-namespace-name" + + // ConfigMapInReleaseNamespaceByReleaseNamespaceName identifies a ConfigMap in a release namespace that is tied to the + // ProjectHelmChart's status in the release namespace. + // The value of this will be the namespace and name of the Helm release that it is for. + ConfigMapInReleaseNamespaceByReleaseNamespaceName = "helm.cattle.io/configmap-in-release-ns-by-release-namespace-name" +) + +// initIndexers initializes indexers that allow for more efficient computations on related resources without relying on additional +// calls to be made to the Kubernetes API by referencing the cache instead +func (h *handler) initIndexers() { + h.projectHelmChartCache.AddIndexer(ProjectHelmChartByReleaseName, h.projectHelmChartToReleaseName) + + h.rolebindingCache.AddIndexer(RoleBindingInRegistrationNamespaceByRoleRef, h.roleBindingInRegistrationNamespaceToRoleRef) + + h.clusterrolebindingCache.AddIndexer(ClusterRoleBindingByRoleRef, h.clusterRoleBindingToRoleRef) + + h.roleCache.AddIndexer(RoleInReleaseNamespaceByReleaseNamespaceName, h.roleInReleaseNamespaceToReleaseNamespaceName) + + h.configmapCache.AddIndexer(ConfigMapInReleaseNamespaceByReleaseNamespaceName, h.configMapInReleaseNamespaceToReleaseNamespaceName) +} + +func (h *handler) projectHelmChartToReleaseName(projectHelmChart *v1alpha1.ProjectHelmChart) ([]string, error) { + shouldManage := h.shouldManage(projectHelmChart) + if !shouldManage { + return nil, nil + } + _, releaseName := h.getReleaseNamespaceAndName(projectHelmChart) + return []string{releaseName}, nil +} + +func (h *handler) roleBindingInRegistrationNamespaceToRoleRef(rb *rbacv1.RoleBinding) ([]string, error) { + if rb == nil { + return nil, nil + } + namespace, err := h.namespaceCache.Get(rb.Namespace) + if err != nil { + // If we can't get the namespace the rolebinding resides in role binding resides in does not exist, we don't need to index + // it since it's probably gotten deleted anyways. + // + // Note: we know that this error would only happen if the namespace is not found since the only valid error returned from this + // call is errors.NewNotFound(c.resource, name) + return nil, nil + } + isProjectRegistrationNamespace := h.projectGetter.IsProjectRegistrationNamespace(namespace) + if !isProjectRegistrationNamespace { + return nil, nil + } + _, isDefaultRoleRef := common.IsDefaultClusterRoleRef(h.opts, rb.RoleRef.Name) + if !isDefaultRoleRef { + // we only care about rolebindings in the registration namespace that are tied to the default roles + // created by this operator + return nil, nil + } + // keep track of this rolebinding in the index so we can grab it later + return []string{NamespacedBindingReferencesDefaultOperatorRole(rb.Namespace)}, nil +} + +func (h *handler) clusterRoleBindingToRoleRef(crb *rbacv1.ClusterRoleBinding) ([]string, error) { + if crb == nil { + return nil, nil + } + _, isDefaultRoleRef := common.IsDefaultClusterRoleRef(h.opts, crb.RoleRef.Name) + if !isDefaultRoleRef { + // we only care about rolebindings in the registration namespace that are tied to the default roles + // created by this operator + return nil, nil + } + // keep track of this rolebinding in the index so we can grab it later + return []string{BindingReferencesDefaultOperatorRole}, nil +} + +func (h *handler) roleInReleaseNamespaceToReleaseNamespaceName(role *rbacv1.Role) ([]string, error) { + if role == nil { + return nil, nil + } + return h.getReleaseIndexFromNamespaceAndLabels(role.Namespace, role.Labels, common.HelmProjectOperatorProjectHelmChartRoleLabel) +} + +func (h *handler) configMapInReleaseNamespaceToReleaseNamespaceName(configmap *corev1.ConfigMap) ([]string, error) { + if configmap == nil { + return nil, nil + } + return h.getReleaseIndexFromNamespaceAndLabels(configmap.Namespace, configmap.Labels, common.HelmProjectOperatorDashboardValuesConfigMapLabel) +} + +func (h *handler) getReleaseIndexFromNamespaceAndLabels(namespace string, labels map[string]string, releaseLabel string) ([]string, error) { + if labels == nil { + return nil, nil + } + releaseName, ok := labels[releaseLabel] + if !ok { + return nil, nil + } + + return []string{fmt.Sprintf("%s/%s", namespace, releaseName)}, nil +} diff --git a/internal/helm-project-operator/pkg/controllers/project/merge.go b/internal/helm-project-operator/pkg/controllers/project/merge.go new file mode 100644 index 00000000..a17067e0 --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/project/merge.go @@ -0,0 +1,65 @@ +package project + +// Copied from https://github.com/rancher/wrangler/v3/blob/004e382969b42fb2f538ffd6699569d30e490428/pkg/data/merge.go#L3-L24 +// Why did we copy the code? The logic for checking bothMaps needs to account for more possible types than map[string]interface{}, +// namely v1alpha1.GenericMap and map[interface{}]interface{} + +func MergeMaps(base, overlay map[string]interface{}) map[string]interface{} { + result := map[string]interface{}{} + for k, v := range base { + result[k] = v + } + for k, v := range overlay { + if baseMap, overlayMap, bothMaps := bothMaps(result[k], v); bothMaps { + v = MergeMaps(baseMap, overlayMap) + } + result[k] = v + } + return result +} + +func bothMaps(left, right interface{}) (map[string]interface{}, map[string]interface{}, bool) { + leftMap, isMap := getMap(left) + if !isMap { + return nil, nil, false + } + rightMap, isMap := getMap(right) + if !isMap { + return nil, nil, false + } + return leftMap, rightMap, true +} + +func getMap(entry interface{}) (map[string]interface{}, bool) { + // check if map[string]interface{} + entryMapStringInterface, isMapStringInterface := entry.(map[string]interface{}) + if isMapStringInterface { + return entryMapStringInterface, true + } + + // check if v1alpha1.GenericMap + entryGenericMap, isGenericMap := entry.(map[string]interface{}) + if isGenericMap { + return entryGenericMap, true + } + + // check if map[interface{}]interface{} + entryMapInterfaceInterface, isMapInterfaceInterface := entry.(map[interface{}]interface{}) + if isMapInterfaceInterface { + return convertMapInterfaceInterfaceToMapStringInterface(entryMapInterfaceInterface) + } + + return nil, false +} + +func convertMapInterfaceInterfaceToMapStringInterface(entry map[interface{}]interface{}) (map[string]interface{}, bool) { + out := make(map[string]interface{}, len(entry)) + for k, v := range entry { + key, isString := k.(string) + if !isString { + return nil, false + } + out[key] = v + } + return out, true +} diff --git a/internal/helm-project-operator/pkg/controllers/project/registrationdata.go b/internal/helm-project-operator/pkg/controllers/project/registrationdata.go new file mode 100644 index 00000000..304d2d1a --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/project/registrationdata.go @@ -0,0 +1,80 @@ +package project + +import ( + "fmt" + + v1alpha1 "github.com/rancher/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1" + "github.com/rancher/helm-project-operator/pkg/controllers/common" + "github.com/sirupsen/logrus" + rbacv1 "k8s.io/api/rbac/v1" +) + +// Note: each resource created here should have a resolver set in resolvers.go + +// getSubjectRoleToSubjectsFromBindings gets all RoleBindings in the Project Registration Namespace that need to be synced to assign the corresponding +// permission in the Project Release Namespace. See pkg/controllers/project/resources.go for more information on how this is used +func (h *handler) getSubjectRoleToSubjectsFromBindings(projectHelmChart *v1alpha1.ProjectHelmChart) (map[string][]rbacv1.Subject, error) { + defaultClusterRoles := common.GetDefaultClusterRoles(h.opts) + subjectRoleToSubjects := make(map[string][]rbacv1.Subject) + subjectRoleToSubjectMap := make(map[string]map[string]rbacv1.Subject) + if len(defaultClusterRoles) == 0 { + // no roles to get get subjects for + return subjectRoleToSubjects, nil + } + for subjectRole := range defaultClusterRoles { + subjectRoleToSubjectMap[subjectRole] = make(map[string]rbacv1.Subject) + } + roleBindings, err := h.rolebindingCache.GetByIndex( + RoleBindingInRegistrationNamespaceByRoleRef, + NamespacedBindingReferencesDefaultOperatorRole(projectHelmChart.Namespace), + ) + if err != nil { + return nil, err + } + for _, rb := range roleBindings { + if rb == nil { + continue + } + subjectRole, isDefaultRoleRef := common.IsDefaultClusterRoleRef(h.opts, rb.RoleRef.Name) + if !isDefaultRoleRef { + logrus.Debugf("Role %s is not a default role for %s", subjectRole, projectHelmChart.Namespace) + continue + } + filteredSubjects := common.FilterToUsersAndGroups(rb.Subjects) + currSubjects := subjectRoleToSubjectMap[subjectRole] + for _, filteredSubject := range filteredSubjects { + // collect into a map to avoid putting duplicates of the same subject + // we use an index of kind and name since a Group can have the same name as a User, but should be considered separate + currSubjects[fmt.Sprintf("%s-%s", filteredSubject.Kind, filteredSubject.Name)] = filteredSubject + } + } + clusterRoleBindings, err := h.clusterrolebindingCache.GetByIndex(ClusterRoleBindingByRoleRef, BindingReferencesDefaultOperatorRole) + if err != nil { + return nil, err + } + for _, crb := range clusterRoleBindings { + if crb == nil { + continue + } + subjectRole, isDefaultRoleRef := common.IsDefaultClusterRoleRef(h.opts, crb.RoleRef.Name) + if !isDefaultRoleRef { + continue + } + filteredSubjects := common.FilterToUsersAndGroups(crb.Subjects) + currSubjects := subjectRoleToSubjectMap[subjectRole] + for _, filteredSubject := range filteredSubjects { + // collect into a map to avoid putting duplicates of the same subject + // we use an index of kind and name since a Group can have the same name as a User, but should be considered separate + currSubjects[fmt.Sprintf("%s-%s", filteredSubject.Kind, filteredSubject.Name)] = filteredSubject + } + } + // convert back into list so that no duplicates are created + for subjectRole := range defaultClusterRoles { + subjects := []rbacv1.Subject{} + for _, subject := range subjectRoleToSubjectMap[subjectRole] { + subjects = append(subjects, subject) + } + subjectRoleToSubjects[subjectRole] = subjects + } + return subjectRoleToSubjects, nil +} diff --git a/internal/helm-project-operator/pkg/controllers/project/releasedata.go b/internal/helm-project-operator/pkg/controllers/project/releasedata.go new file mode 100644 index 00000000..411cc994 --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/project/releasedata.go @@ -0,0 +1,114 @@ +package project + +import ( + "encoding/json" + "fmt" + "strings" + + v1alpha1 "github.com/rancher/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1" + "github.com/rancher/helm-project-operator/pkg/controllers/common" + "github.com/rancher/wrangler/v3/pkg/data" + "github.com/sirupsen/logrus" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" +) + +// Note: each resource created here should have a resolver set in resolvers.go + +// getDashboardValuesFromConfigMaps returns the generic map that represents a merge of all the contents of all ConfigMaps in the +// Project Release Namespace with the label helm.cattle.io/dashboard-values-configmap: {{ .Release.Name }}. +// +// Generally, these ConfigMaps should be part of the deployed Helm chart and should not have conflicts with each other +// It's also a common pattern to only have a single ConfigMap that this refers to. +func (h *handler) getDashboardValuesFromConfigmaps(projectHelmChart *v1alpha1.ProjectHelmChart) (v1alpha1.GenericMap, error) { + releaseNamespace, releaseName := h.getReleaseNamespaceAndName(projectHelmChart) + exists, err := h.verifyReleaseNamespaceExists(releaseNamespace) + if err != nil { + return nil, err + } + if !exists { + return nil, nil + } + configMaps, err := h.configmapCache.GetByIndex(ConfigMapInReleaseNamespaceByReleaseNamespaceName, fmt.Sprintf("%s/%s", releaseNamespace, releaseName)) + if err != nil { + return nil, err + } + var values v1alpha1.GenericMap + for _, configMap := range configMaps { + if configMap == nil { + continue + } + for jsonKey, jsonContent := range configMap.Data { + if !strings.HasSuffix(jsonKey, ".json") { + logrus.Errorf("dashboard values configmap %s/%s has non-JSON key %s, expected only keys ending with .json. skipping...", configMap.Namespace, configMap.Name, jsonKey) + continue + } + var jsonMap map[string]interface{} + err := json.Unmarshal([]byte(jsonContent), &jsonMap) + if err != nil { + logrus.Errorf("could not marshall content in dashboard values configmap %s/%s in key %s (err='%s'). skipping...", configMap.Namespace, configMap.Name, jsonKey, err) + continue + } + values = data.MergeMapsConcatSlice(values, jsonMap) + } + } + return values, nil +} + +// getSubjectRoleToRoleRefsFromRoles gets all Roles in the Project Release Namespace that need RoleBindings to be created automatically +// based on permissions set in the Project Registration namespace. See pkg/controllers/project/resources.go for more information on how this is used +func (h *handler) getSubjectRoleToRoleRefsFromRoles(projectHelmChart *v1alpha1.ProjectHelmChart) (map[string][]rbacv1.RoleRef, error) { + subjectRoleToRoleRefs := make(map[string][]rbacv1.RoleRef) + for subjectRole := range common.GetDefaultClusterRoles(h.opts) { + subjectRoleToRoleRefs[subjectRole] = []rbacv1.RoleRef{} + } + if len(subjectRoleToRoleRefs) == 0 { + // no roles were defined to be auto-aggregated + return subjectRoleToRoleRefs, nil + } + releaseNamespace, releaseName := h.getReleaseNamespaceAndName(projectHelmChart) + exists, err := h.verifyReleaseNamespaceExists(releaseNamespace) + if err != nil { + return nil, err + } + if !exists { + return nil, nil + } + roles, err := h.roleCache.GetByIndex(RoleInReleaseNamespaceByReleaseNamespaceName, fmt.Sprintf("%s/%s", releaseNamespace, releaseName)) + if err != nil { + return nil, err + } + for _, role := range roles { + if role == nil { + continue + } + subjectRole, ok := role.Labels[common.HelmProjectOperatorProjectHelmChartRoleAggregateFromLabel] + if !ok { + // cannot assign roles if this label is not provided + continue + } + roleRefs, ok := subjectRoleToRoleRefs[subjectRole] + if !ok { + // label value is invalid since it does not point to default subject role name + continue + } + subjectRoleToRoleRefs[subjectRole] = append(roleRefs, rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "Role", + Name: role.Name, + }) + } + return subjectRoleToRoleRefs, nil +} + +func (h *handler) verifyReleaseNamespaceExists(releaseNamespace string) (bool, error) { + _, err := h.namespaceCache.Get(releaseNamespace) + if err != nil { + if apierrors.IsNotFound(err) { + // release namespace has not been created yet + return false, nil + } + return false, err + } + return true, nil +} diff --git a/internal/helm-project-operator/pkg/controllers/project/resolvers.go b/internal/helm-project-operator/pkg/controllers/project/resolvers.go new file mode 100644 index 00000000..880562e5 --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/project/resolvers.go @@ -0,0 +1,244 @@ +package project + +import ( + "context" + + helmcontrollerv1 "github.com/k3s-io/helm-controller/pkg/apis/helm.cattle.io/v1" + helmlockerv1alpha1 "github.com/rancher/helm-locker/pkg/apis/helm.cattle.io/v1alpha1" + "github.com/rancher/helm-project-operator/pkg/controllers/common" + "github.com/rancher/wrangler/v3/pkg/apply" + "github.com/rancher/wrangler/v3/pkg/relatedresource" + "github.com/sirupsen/logrus" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" +) + +// Note: each resource created in resources.go, registrationdata.go, or releasedata.go should have a resolver handler here +// The only exception is ProjectHelmCharts since those are handled by the main generating controller + +// initResolvers initializes resolvers that need to be set to watch child resources of ProjectHelmCharts +func (h *handler) initResolvers(ctx context.Context) { + if len(h.opts.ProjectLabel) != 0 && len(h.opts.ProjectReleaseLabelValue) == 0 { + // Only trigger watching project release namespace if it is created by the operator + relatedresource.Watch( + ctx, "watch-project-release-namespace", h.resolveProjectReleaseNamespace, h.projectHelmCharts, + h.namespaces, + ) + } + + relatedresource.Watch( + ctx, "watch-system-namespace-chart-data", h.resolveSystemNamespaceData, h.projectHelmCharts, + h.helmCharts, h.helmReleases, + ) + + relatedresource.Watch( + ctx, "watch-project-registration-chart-data", h.resolveProjectRegistrationNamespaceData, h.projectHelmCharts, + h.rolebindings, h.clusterrolebindings, + ) + + relatedresource.Watch( + ctx, "watch-project-release-chart-data", h.resolveProjectReleaseNamespaceData, h.projectHelmCharts, + h.rolebindings, h.configmaps, h.roles, + ) +} + +// Project Release Namespace + +func (h *handler) resolveProjectReleaseNamespace(namespace, name string, obj runtime.Object) ([]relatedresource.Key, error) { + if obj == nil { + return nil, nil + } + ns, ok := obj.(*corev1.Namespace) + if !ok { + return nil, nil + } + // since the release namespace will be created and owned by the ProjectHelmChart, + // we can simply leverage is annotations to identify what we should resolve to. + // If the release namespace is orphaned, the owner annotation should be removed automatically + return h.resolveProjectHelmChartOwned(ns.Annotations) +} + +// System Namespace Data + +func (h *handler) resolveSystemNamespaceData(namespace, name string, obj runtime.Object) ([]relatedresource.Key, error) { + if namespace != h.systemNamespace { + return nil, nil + } + if obj == nil { + return nil, nil + } + // since the HelmChart and HelmRelease will be created and owned by the ProjectHelmChart, + // we can simply leverage is annotations to identify what we should resolve to. + if helmChart, ok := obj.(*helmcontrollerv1.HelmChart); ok { + return h.resolveProjectHelmChartOwned(helmChart.Annotations) + } + if helmRelease, ok := obj.(*helmlockerv1alpha1.HelmRelease); ok { + return h.resolveProjectHelmChartOwned(helmRelease.Annotations) + } + return nil, nil +} + +// Project Registration Namespace Data + +func (h *handler) resolveProjectRegistrationNamespaceData(namespace, name string, obj runtime.Object) ([]relatedresource.Key, error) { + //h.projectHelmCharts, h.rolebindings, h.clusterrolebindings + + if obj == nil { + return nil, nil + } + if rb, ok := obj.(*rbacv1.RoleBinding); ok { + logrus.Debugf("Resolving project registration namespace rolebindings for %s", namespace) + return h.resolveProjectRegistrationNamespaceRoleBinding(namespace, name, rb) + } + if crb, ok := obj.(*rbacv1.ClusterRoleBinding); ok { + return h.resolveClusterRoleBinding(namespace, name, crb) + } + return nil, nil +} + +func (h *handler) resolveProjectRegistrationNamespaceRoleBinding(namespace, name string, rb *rbacv1.RoleBinding) ([]relatedresource.Key, error) { + namespaceObj, err := h.namespaceCache.Get(namespace) + if err != nil { + logrus.Debugf("Namespace not found %s: ", namespace) + return nil, err + } + isProjectRegistrationNamespace := h.projectGetter.IsProjectRegistrationNamespace(namespaceObj) + if !isProjectRegistrationNamespace { + logrus.Debugf("%s is not a project registration namespace: ", namespace) + return nil, nil + } + + // we want to re-enqueue the ProjectHelmChart if the rolebinding's ref points to one of the operator default roles + _, isDefaultRoleRef := common.IsDefaultClusterRoleRef(h.opts, rb.RoleRef.Name) + if !isDefaultRoleRef { + return nil, nil + } + // re-enqueue all HelmCharts in this project registration namespace + projectHelmCharts, err := h.projectHelmChartCache.List(namespace, labels.Everything()) + if err != nil { + logrus.Debugf("Error in resolveProjectRegistrationNamespaceRoleBinding while re-enqueuing HelmCharts in %s", namespace) + return nil, err + } + var keys []relatedresource.Key + for _, projectHelmChart := range projectHelmCharts { + if projectHelmChart == nil { + continue + } + keys = append(keys, relatedresource.Key{ + Namespace: namespace, + Name: projectHelmChart.Name, + }) + } + return keys, nil +} + +func (h *handler) resolveClusterRoleBinding(namespace, name string, crb *rbacv1.ClusterRoleBinding) ([]relatedresource.Key, error) { + // we want to re-enqueue the ProjectHelmChart if the rolebinding's ref points to one of the operator default roles + _, isDefaultRoleRef := common.IsDefaultClusterRoleRef(h.opts, crb.RoleRef.Name) + if !isDefaultRoleRef { + return nil, nil + } + // re-enqueue all HelmCharts in all Project Registration namespaces + namespaces, err := h.namespaceCache.List(labels.Everything()) + if err != nil { + return nil, err + } + var keys []relatedresource.Key + for _, namespace := range namespaces { + if namespace == nil { + continue + } + isProjectRegistrationNamespace := h.projectGetter.IsProjectRegistrationNamespace(namespace) + if !isProjectRegistrationNamespace { + continue + } + projectHelmCharts, err := h.projectHelmChartCache.List(namespace.Name, labels.Everything()) + if err != nil { + logrus.Debugf("Error in resolveClusterRoleBinding while re-enqueuing HelmCharts in %s", namespace) + return nil, err + } + for _, projectHelmChart := range projectHelmCharts { + if projectHelmChart == nil { + continue + } + keys = append(keys, relatedresource.Key{ + Namespace: projectHelmChart.Namespace, + Name: projectHelmChart.Name, + }) + } + } + return keys, nil +} + +// Project Release Namespace Data + +func (h *handler) resolveProjectReleaseNamespaceData(namespace, name string, obj runtime.Object) ([]relatedresource.Key, error) { + if obj == nil { + return nil, nil + } + if rb, ok := obj.(*rbacv1.RoleBinding); ok { + // since the rolebinding will be created and owned by the ProjectHelmChart, + // we can simply leverage is annotations to identify what we should resolve to. + return h.resolveProjectHelmChartOwned(rb.Annotations) + } + if configmap, ok := obj.(*corev1.ConfigMap); ok { + return h.resolveByProjectReleaseLabelValue(configmap.Labels, common.HelmProjectOperatorDashboardValuesConfigMapLabel) + } + if role, ok := obj.(*rbacv1.Role); ok { + return h.resolveByProjectReleaseLabelValue(role.Labels, common.HelmProjectOperatorProjectHelmChartRoleLabel) + } + return nil, nil +} + +// Common + +func (h *handler) resolveProjectHelmChartOwned(annotations map[string]string) ([]relatedresource.Key, error) { + // Q: Why aren't we using relatedresource.OwnerResolver? + // A: in k8s, you can't set an owner reference across namespaces, which means that when --project-label is provided + // (where the ProjectHelmChart will be outside the systemNamespace where the HelmCharts and HelmReleases are created), + // ownerReferences will not be set on the object. However, wrangler annotations will be set since those objects are + // created via a wrangler apply. Therefore, we leverage those annotations to figure out which ProjectHelmChart to enqueue + if annotations == nil { + return nil, nil + } + ownerNamespace, ok := annotations[apply.LabelNamespace] + if !ok { + return nil, nil + } + ownerName, ok := annotations[apply.LabelName] + if !ok { + return nil, nil + } + + return []relatedresource.Key{{ + Namespace: ownerNamespace, + Name: ownerName, + }}, nil +} + +func (h *handler) resolveByProjectReleaseLabelValue(labels map[string]string, projectReleaseLabel string) ([]relatedresource.Key, error) { + if labels == nil { + return nil, nil + } + releaseName, ok := labels[projectReleaseLabel] + if !ok { + return nil, nil + } + projectHelmCharts, err := h.projectHelmChartCache.GetByIndex(ProjectHelmChartByReleaseName, releaseName) + if err != nil { + return nil, err + } + var keys []relatedresource.Key + for _, projectHelmChart := range projectHelmCharts { + if projectHelmChart == nil { + continue + } + keys = append(keys, relatedresource.Key{ + Namespace: projectHelmChart.Namespace, + Name: projectHelmChart.Name, + }) + } + return keys, nil +} diff --git a/internal/helm-project-operator/pkg/controllers/project/resources.go b/internal/helm-project-operator/pkg/controllers/project/resources.go new file mode 100644 index 00000000..a1b6dabf --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/project/resources.go @@ -0,0 +1,111 @@ +package project + +import ( + helmcontrollerv1 "github.com/k3s-io/helm-controller/pkg/apis/helm.cattle.io/v1" + "github.com/k3s-io/helm-controller/pkg/controllers/chart" + helmlockerv1alpha1 "github.com/rancher/helm-locker/pkg/apis/helm.cattle.io/v1alpha1" + "github.com/rancher/helm-locker/pkg/controllers/release" + v1alpha1 "github.com/rancher/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1" + "github.com/rancher/helm-project-operator/pkg/controllers/common" + v1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// Note: each resource created here should have a resolver set in resolvers.go +// The only exception is ProjectHelmCharts since those are handled by the main generating controller + +// getHelmChart returns the HelmChart created on behalf of this ProjectHelmChart +func (h *handler) getHelmChart(projectID string, valuesContent string, projectHelmChart *v1alpha1.ProjectHelmChart) *helmcontrollerv1.HelmChart { + // must be in system namespace since helm controllers are configured to only watch one namespace + jobImage := DefaultJobImage + if len(h.opts.HelmJobImage) > 0 { + jobImage = h.opts.HelmJobImage + } + releaseNamespace, releaseName := h.getReleaseNamespaceAndName(projectHelmChart) + helmChart := helmcontrollerv1.NewHelmChart(h.systemNamespace, releaseName, helmcontrollerv1.HelmChart{ + Spec: helmcontrollerv1.HelmChartSpec{ + TargetNamespace: releaseNamespace, + Chart: releaseName, + JobImage: jobImage, + ChartContent: h.opts.ChartContent, + ValuesContent: valuesContent, + }, + }) + helmChart.SetLabels(common.GetHelmResourceLabels(projectID, projectHelmChart.Spec.HelmAPIVersion)) + helmChart.SetAnnotations(map[string]string{ + chart.ManagedBy: h.opts.ControllerName, + }) + return helmChart +} + +// getHelmRelease returns the HelmRelease created on behalf of this ProjectHelmChart +func (h *handler) getHelmRelease(projectID string, projectHelmChart *v1alpha1.ProjectHelmChart) *helmlockerv1alpha1.HelmRelease { + // must be in system namespace since helmlocker controllers are configured to only watch one namespace + releaseNamespace, releaseName := h.getReleaseNamespaceAndName(projectHelmChart) + helmRelease := helmlockerv1alpha1.NewHelmRelease(h.systemNamespace, releaseName, helmlockerv1alpha1.HelmRelease{ + Spec: helmlockerv1alpha1.HelmReleaseSpec{ + Release: helmlockerv1alpha1.ReleaseKey{ + Namespace: releaseNamespace, + Name: releaseName, + }, + }, + }) + helmRelease.SetLabels(common.GetHelmResourceLabels(projectID, projectHelmChart.Spec.HelmAPIVersion)) + helmRelease.SetAnnotations(map[string]string{ + release.ManagedBy: h.opts.ControllerName, + }) + return helmRelease +} + +// getProjectReleaseNamespace returns the Project Release Namespace created on behalf of this ProjectHelmChart, if required +func (h *handler) getProjectReleaseNamespace(projectID string, isOrphaned bool, projectHelmChart *v1alpha1.ProjectHelmChart) *v1.Namespace { + releaseNamespace, _ := h.getReleaseNamespaceAndName(projectHelmChart) + if releaseNamespace == h.systemNamespace || releaseNamespace == projectHelmChart.Namespace { + return nil + } + projectReleaseNamespace := &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: releaseNamespace, + Annotations: common.GetProjectNamespaceAnnotations(h.opts.ProjectReleaseLabelValue, h.opts.ProjectLabel, h.opts.ClusterID), + Labels: common.GetProjectNamespaceLabels(projectID, h.opts.ProjectLabel, h.opts.ProjectReleaseLabelValue, isOrphaned), + }, + } + return projectReleaseNamespace +} + +// getRoleBindings returns the RoleBindings created on behalf of this ProjectHelmChart in the Project Release Namespace based on Roles created in the +// Project Release Namespace and RoleBindings attached to the default operator roles (configured as AdminClusterRole, EditClusterRole, and ViewClusterRole +// +// in the providedRuntimeOptions) in the Project Registration Namespace only. To update these RoleBindings in the release namespace, you will need to assign +// +// additional permissions to the default roles in the Project Registration Namespace or manually assign RoleBindings in the release namespace. +func (h *handler) getRoleBindings(projectID string, k8sRoleToRoleRefs map[string][]rbacv1.RoleRef, k8sRoleToSubjects map[string][]rbacv1.Subject, projectHelmChart *v1alpha1.ProjectHelmChart) []runtime.Object { + var objs []runtime.Object + releaseNamespace, _ := h.getReleaseNamespaceAndName(projectHelmChart) + + for subjectRole := range common.GetDefaultClusterRoles(h.opts) { + // note: these role refs point to roles in the release namespace + roleRefs := k8sRoleToRoleRefs[subjectRole] + // note: these subjects are inferred from the rolebindings tied to the default roles in the registration namespace + subjects := k8sRoleToSubjects[subjectRole] + if len(subjects) == 0 { + // no need to create empty RoleBindings + continue + } + for _, roleRef := range roleRefs { + objs = append(objs, &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleRef.Name, + Namespace: releaseNamespace, + Labels: common.GetCommonLabels(projectID), + }, + RoleRef: roleRef, + Subjects: subjects, + }) + } + } + + return objs +} diff --git a/internal/helm-project-operator/pkg/controllers/project/status.go b/internal/helm-project-operator/pkg/controllers/project/status.go new file mode 100644 index 00000000..40f50a6e --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/project/status.go @@ -0,0 +1,69 @@ +package project + +import ( + "fmt" + + v1alpha1 "github.com/rancher/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1" + "github.com/rancher/helm-project-operator/pkg/controllers/common" +) + +// getCleanupStatus returns the status on seeing the cleanup label on a ProjectHelmChart +func (h *handler) getCleanupStatus(projectHelmChart *v1alpha1.ProjectHelmChart, projectHelmChartStatus v1alpha1.ProjectHelmChartStatus) v1alpha1.ProjectHelmChartStatus { + return v1alpha1.ProjectHelmChartStatus{ + Status: "AwaitingOperatorRedeployment", + StatusMessage: fmt.Sprintf( + "ProjectHelmChart was marked with label %s=true, which indicates that the resource should be cleaned up "+ + "until the Project Operator that responds to ProjectHelmCharts in %s with spec.helmApiVersion=%s "+ + "is redeployed onto the cluster. On redeployment, this label will automatically be removed by the operator.", + common.HelmProjectOperatedCleanupLabel, projectHelmChart.Namespace, projectHelmChart.Spec.HelmAPIVersion, + ), + } +} + +// getUnableToCreateHelmReleaseStatus returns the status on seeing a conflicting ProjectHelmChart already tracking the desired Helm release +func (h *handler) getUnableToCreateHelmReleaseStatus(projectHelmChart *v1alpha1.ProjectHelmChart, projectHelmChartStatus v1alpha1.ProjectHelmChartStatus, err error) v1alpha1.ProjectHelmChartStatus { + releaseNamespace, releaseName := h.getReleaseNamespaceAndName(projectHelmChart) + return v1alpha1.ProjectHelmChartStatus{ + Status: "UnableToCreateHelmRelease", + StatusMessage: fmt.Sprintf( + "Unable to create a release (%s/%s) for ProjectHelmChart: %s", + releaseName, releaseNamespace, err, + ), + } +} + +// getNoTargetNamespacesStatus returns the status on seeing that a ProjectHelmChart's projectNamespaceSelector (or +// the Project Registration Namespace's namespaceSelector) targets no namespaces +func (h *handler) getNoTargetNamespacesStatus(projectHelmChart *v1alpha1.ProjectHelmChart, projectHelmChartStatus v1alpha1.ProjectHelmChartStatus) v1alpha1.ProjectHelmChartStatus { + return v1alpha1.ProjectHelmChartStatus{ + Status: "NoTargetProjectNamespaces", + StatusMessage: "There are no project namespaces to deploy a ProjectHelmChart.", + } +} + +// getValuesParseErrorStatus returns the status on encountering an error with parsing the provided contents of spec.values on the ProjectHelmChart +func (h *handler) getValuesParseErrorStatus(projectHelmChart *v1alpha1.ProjectHelmChart, projectHelmChartStatus v1alpha1.ProjectHelmChartStatus, err error) v1alpha1.ProjectHelmChartStatus { + // retain existing status if possible + projectHelmChartStatus.Status = "UnableToParseValues" + projectHelmChartStatus.StatusMessage = fmt.Sprintf("Unable to convert provided spec.values into valid configuration of ProjectHelmChart: %s", err) + return projectHelmChartStatus +} + +// getWaitingForDashboardValuesStatus returns the transitionary status that occurs after deploying a Helm chart but before a dashboard configmap is created +// If a ProjectHelmChart is stuck in this status, it is likely either an error on the Operator for not creating this ConfigMap or there might be an issue +// with the underlying Job ran by the child HelmChart resource created on this ProjectHelmChart's behalf +func (h *handler) getWaitingForDashboardValuesStatus(projectHelmChart *v1alpha1.ProjectHelmChart, projectHelmChartStatus v1alpha1.ProjectHelmChartStatus) v1alpha1.ProjectHelmChartStatus { + // retain existing status + projectHelmChartStatus.Status = "WaitingForDashboardValues" + projectHelmChartStatus.StatusMessage = "Waiting for status.dashboardValues content to be provided by the deployed Helm release, but HelmChart and HelmRelease should be deployed." + projectHelmChartStatus.DashboardValues = nil + return projectHelmChartStatus +} + +// getDeployedStatus returns the status that indicates the ProjectHelmChart is successfully deployed +func (h *handler) getDeployedStatus(projectHelmChart *v1alpha1.ProjectHelmChart, projectHelmChartStatus v1alpha1.ProjectHelmChartStatus) v1alpha1.ProjectHelmChartStatus { + // retain existing status + projectHelmChartStatus.Status = "Deployed" + projectHelmChartStatus.StatusMessage = "ProjectHelmChart has been successfully deployed!" + return projectHelmChartStatus +} diff --git a/internal/helm-project-operator/pkg/controllers/project/utils.go b/internal/helm-project-operator/pkg/controllers/project/utils.go new file mode 100644 index 00000000..17704983 --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/project/utils.go @@ -0,0 +1,70 @@ +package project + +import ( + "fmt" + + v1alpha1 "github.com/rancher/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1" + "github.com/rancher/helm-project-operator/pkg/controllers/common" +) + +// getProjectID returns the projectID tied to this ProjectHelmChart +func (h *handler) getProjectID(projectHelmChart *v1alpha1.ProjectHelmChart) (string, error) { + if len(h.opts.ProjectLabel) == 0 { + // use the projectHelmChart's name as the projectID + return projectHelmChart.Name, nil + } + projectRegistrationNamespace, err := h.namespaceCache.Get(projectHelmChart.Namespace) + if err != nil { + return "", fmt.Errorf("unable to parse projectID for projectHelmChart %s/%s: %s", projectHelmChart.Namespace, projectHelmChart.Name, err) + } + projectID, ok := projectRegistrationNamespace.Labels[h.opts.ProjectLabel] + if !ok { + return "", nil + } + return projectID, nil +} + +// getProjectNamespaceSelector returns the projectNamespaceSelector tied to this ProjectHelmChart +func (h *handler) getProjectNamespaceSelector(projectHelmChart *v1alpha1.ProjectHelmChart, projectID string) map[string]interface{} { + if len(h.opts.ProjectLabel) == 0 { + // Use the projectHelmChart selector as the namespaceSelector + if projectHelmChart.Spec.ProjectNamespaceSelector == nil { + return map[string]interface{}{} + } + return map[string]interface{}{ + "matchLabels": projectHelmChart.Spec.ProjectNamespaceSelector.MatchLabels, + "matchExpressions": projectHelmChart.Spec.ProjectNamespaceSelector.MatchExpressions, + } + } + if len(h.opts.ProjectReleaseLabelValue) == 0 { + // Release namespace is not created, so use namespaceSelector provided tied to projectID + return map[string]interface{}{ + "matchLabels": map[string]string{ + h.opts.ProjectLabel: projectID, + }, + } + } + // use the HelmProjectOperated label + return map[string]interface{}{ + "matchLabels": map[string]string{ + common.HelmProjectOperatorProjectLabel: projectID, + }, + } +} + +// getReleaseNamespaceAndName returns the name of the Project Release namespace and the name of the Helm Release +// that will be deployed into the Project Release namespace on behalf of the ProjectHelmChart +func (h *handler) getReleaseNamespaceAndName(projectHelmChart *v1alpha1.ProjectHelmChart) (string, string) { + projectReleaseName := fmt.Sprintf("%s-%s", projectHelmChart.Name, h.opts.ReleaseName) + if h.opts.Singleton { + // This changes the naming scheme of the deployed resources such that only one can every be created per namespace + projectReleaseName = fmt.Sprintf("%s-%s", projectHelmChart.Namespace, h.opts.ReleaseName) + } + if len(h.opts.ProjectLabel) == 0 || len(h.opts.ProjectReleaseLabelValue) == 0 { + // Underlying Helm releases will be created in the namespace where the ProjectHelmChart is registered (project registration namespace) + // The project registration namespace will either be the system namespace or auto-generated namespaces depending on the user values provided + return projectHelmChart.Namespace, projectReleaseName + } + // Underlying Helm releases will be created in dedicated project release namespaces + return projectReleaseName, projectReleaseName +} diff --git a/internal/helm-project-operator/pkg/controllers/project/values.go b/internal/helm-project-operator/pkg/controllers/project/values.go new file mode 100644 index 00000000..b0bed93a --- /dev/null +++ b/internal/helm-project-operator/pkg/controllers/project/values.go @@ -0,0 +1,41 @@ +package project + +import ( + v1alpha1 "github.com/rancher/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1" +) + +// getValues returns the values.yaml that should be applied for this ProjectHelmChart after processing default and required overrides +func (h *handler) getValues(projectHelmChart *v1alpha1.ProjectHelmChart, projectID string, targetProjectNamespaces []string) v1alpha1.GenericMap { + // default values that are set if the user does not provide them + values := map[string]interface{}{ + "global": map[string]interface{}{ + "cattle": map[string]interface{}{ + "systemDefaultRegistry": h.opts.SystemDefaultRegistry, + "url": h.opts.CattleURL, + }, + }, + } + + // overlay provided values, which will override the above values if provided + values = MergeMaps(values, projectHelmChart.Spec.Values) + + // overlay operator provided values overrides, which will override the above values even if provided + values = MergeMaps(values, h.valuesOverride) + + // required project-based values that must be set even if user tries to override them + requiredOverrides := map[string]interface{}{ + "global": map[string]interface{}{ + "cattle": map[string]interface{}{ + "clusterId": h.opts.ClusterID, + "projectNamespaces": targetProjectNamespaces, + "projectID": projectID, + "releaseProjectID": h.opts.ProjectReleaseLabelValue, + "projectNamespaceSelector": h.getProjectNamespaceSelector(projectHelmChart, projectID), + }, + }, + } + // overlay required values, which will override the above values even if provided + values = MergeMaps(values, requiredOverrides) + + return values +} diff --git a/internal/helm-project-operator/pkg/crd/crds.go b/internal/helm-project-operator/pkg/crd/crds.go new file mode 100644 index 00000000..e7116889 --- /dev/null +++ b/internal/helm-project-operator/pkg/crd/crds.go @@ -0,0 +1,180 @@ +package crd + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + + helmcontrollercrd "github.com/k3s-io/helm-controller/pkg/crd" + helmlockercrd "github.com/rancher/helm-locker/pkg/crd" + v1alpha1 "github.com/rancher/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1" + "github.com/rancher/wrangler/v3/pkg/crd" + "github.com/rancher/wrangler/v3/pkg/yaml" + "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" +) + +// WriteFiles writes CRDs and dependent CRDs to the paths specified +// +// Note: It is recommended to write CRDs to the templates directory (or similar) and to write +// CRD dependencies to the crds/ directory since you do not want the uninstall or upgrade of the +// CRD chart to destroy existing dependent CRDs in the cluster as that could break other components +// +// i.e. if you uninstall the HelmChart CRD, it can destroy an RKE2 or K3s cluster that also uses those CRs +// to manage internal Kubernetes component state +func WriteFiles(crdDirpath, crdDepDirpath string) error { + objs, depObjs, err := Objects(false) + if err != nil { + return err + } + if err := writeFiles(crdDirpath, objs); err != nil { + return err + } + return writeFiles(crdDepDirpath, depObjs) +} + +func writeFiles(dirpath string, objs []runtime.Object) error { + if err := os.MkdirAll(dirpath, 0755); err != nil { + return err + } + + objMap := make(map[string][]byte) + + for _, o := range objs { + data, err := yaml.Export(o) + if err != nil { + return err + } + meta, err := meta.Accessor(o) + if err != nil { + return err + } + key := strings.SplitN(meta.GetName(), ".", 2)[0] + objMap[key] = data + } + + var wg sync.WaitGroup + wg.Add(len(objMap)) + for key, data := range objMap { + go func(key string, data []byte) { + defer wg.Done() + f, err := os.Create(filepath.Join(dirpath, fmt.Sprintf("crd-%s.yaml", key))) + if err != nil { + logrus.Error(err) + } + defer f.Close() + _, err = f.Write(data) + if err != nil { + logrus.Error(err) + } + }(key, data) + } + wg.Wait() + + return nil +} + +// Print prints CRDs to out and dependent CRDs to depOut +func Print(out io.Writer, depOut io.Writer) { + objs, depObjs, err := Objects(false) + if err != nil { + logrus.Fatalf("%s", err) + } + if err := print(out, objs); err != nil { + logrus.Fatalf("%s", err) + } + if err := print(depOut, depObjs); err != nil { + logrus.Fatalf("%s", err) + } +} + +func print(out io.Writer, objs []runtime.Object) error { + data, err := yaml.Export(objs...) + if err != nil { + return err + } + _, err = out.Write(data) + return err +} + +// Objects returns runtime.Objects for every CRD or CRD Dependency this operator relies on +func Objects(v1beta1 bool) (crds, crdDeps []runtime.Object, err error) { + crdDefs, crdDepDefs := List() + crds, err = objects(v1beta1, crdDefs) + if err != nil { + return nil, nil, err + } + crdDeps, err = objects(v1beta1, crdDepDefs) + if err != nil { + return nil, nil, err + } + return +} + +func objects(v1beta1 bool, crdDefs []crd.CRD) (crds []runtime.Object, err error) { + for _, crdDef := range crdDefs { + if v1beta1 { + crd, err := crdDef.ToCustomResourceDefinitionV1Beta1() + if err != nil { + return nil, err + } + crds = append(crds, crd) + } else { + crd, err := crdDef.ToCustomResourceDefinition() + if err != nil { + return nil, err + } + crds = append(crds, crd) + } + } + return +} + +// List returns the list of CRDs and dependent CRDs for this operator +func List() ([]crd.CRD, []crd.CRD) { + crds := []crd.CRD{ + newCRD(&v1alpha1.ProjectHelmChart{}, func(c crd.CRD) crd.CRD { + return c. + WithColumn("Status", ".status.status"). + WithColumn("System Namespace", ".status.systemNamespace"). + WithColumn("Release Namespace", ".status.releaseNamespace"). + WithColumn("Release Name", ".status.releaseName"). + WithColumn("Target Namespaces", ".status.targetNamespaces") + }), + } + crdDeps := append(helmcontrollercrd.List(), helmlockercrd.List()...) + return crds, crdDeps +} + +// Create creates all CRDs and dependent CRDs in the cluster +func Create(ctx context.Context, cfg *rest.Config) error { + factory, err := crd.NewFactoryFromClient(cfg) + if err != nil { + return err + } + + crds, crdDeps := List() + return factory.BatchCreateCRDs(ctx, append(crds, crdDeps...)...).BatchWait() +} + +func newCRD(obj interface{}, customize func(crd.CRD) crd.CRD) crd.CRD { + crd := crd.CRD{ + GVK: schema.GroupVersionKind{ + Group: "helm.cattle.io", + Version: "v1alpha1", + }, + Status: true, + SchemaObject: obj, + } + if customize != nil { + crd = customize(crd) + } + return crd +} diff --git a/internal/helm-project-operator/pkg/generated/controllers/helm.cattle.io/factory.go b/internal/helm-project-operator/pkg/generated/controllers/helm.cattle.io/factory.go new file mode 100644 index 00000000..87186f5c --- /dev/null +++ b/internal/helm-project-operator/pkg/generated/controllers/helm.cattle.io/factory.go @@ -0,0 +1,67 @@ +/* +Copyright 2023 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package helm + +import ( + "github.com/rancher/wrangler/v3/pkg/generic" + "k8s.io/client-go/rest" +) + +type Factory struct { + *generic.Factory +} + +func NewFactoryFromConfigOrDie(config *rest.Config) *Factory { + f, err := NewFactoryFromConfig(config) + if err != nil { + panic(err) + } + return f +} + +func NewFactoryFromConfig(config *rest.Config) (*Factory, error) { + return NewFactoryFromConfigWithOptions(config, nil) +} + +func NewFactoryFromConfigWithNamespace(config *rest.Config, namespace string) (*Factory, error) { + return NewFactoryFromConfigWithOptions(config, &FactoryOptions{ + Namespace: namespace, + }) +} + +type FactoryOptions = generic.FactoryOptions + +func NewFactoryFromConfigWithOptions(config *rest.Config, opts *FactoryOptions) (*Factory, error) { + f, err := generic.NewFactoryFromConfigWithOptions(config, opts) + return &Factory{ + Factory: f, + }, err +} + +func NewFactoryFromConfigWithOptionsOrDie(config *rest.Config, opts *FactoryOptions) *Factory { + f, err := NewFactoryFromConfigWithOptions(config, opts) + if err != nil { + panic(err) + } + return f +} + +func (c *Factory) Helm() Interface { + return New(c.ControllerFactory()) +} diff --git a/internal/helm-project-operator/pkg/generated/controllers/helm.cattle.io/interface.go b/internal/helm-project-operator/pkg/generated/controllers/helm.cattle.io/interface.go new file mode 100644 index 00000000..bc93dd0f --- /dev/null +++ b/internal/helm-project-operator/pkg/generated/controllers/helm.cattle.io/interface.go @@ -0,0 +1,43 @@ +/* +Copyright 2023 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package helm + +import ( + v1alpha1 "github.com/rancher/helm-project-operator/pkg/generated/controllers/helm.cattle.io/v1alpha1" + "github.com/rancher/lasso/pkg/controller" +) + +type Interface interface { + V1alpha1() v1alpha1.Interface +} + +type group struct { + controllerFactory controller.SharedControllerFactory +} + +// New returns a new Interface. +func New(controllerFactory controller.SharedControllerFactory) Interface { + return &group{ + controllerFactory: controllerFactory, + } +} + +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.controllerFactory) +} diff --git a/internal/helm-project-operator/pkg/generated/controllers/helm.cattle.io/v1alpha1/interface.go b/internal/helm-project-operator/pkg/generated/controllers/helm.cattle.io/v1alpha1/interface.go new file mode 100644 index 00000000..a0c302af --- /dev/null +++ b/internal/helm-project-operator/pkg/generated/controllers/helm.cattle.io/v1alpha1/interface.go @@ -0,0 +1,48 @@ +/* +Copyright 2023 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/rancher/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1" + "github.com/rancher/lasso/pkg/controller" + "github.com/rancher/wrangler/v3/pkg/schemes" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func init() { + schemes.Register(v1alpha1.AddToScheme) +} + +type Interface interface { + ProjectHelmChart() ProjectHelmChartController +} + +func New(controllerFactory controller.SharedControllerFactory) Interface { + return &version{ + controllerFactory: controllerFactory, + } +} + +type version struct { + controllerFactory controller.SharedControllerFactory +} + +func (c *version) ProjectHelmChart() ProjectHelmChartController { + return NewProjectHelmChartController(schema.GroupVersionKind{Group: "helm.cattle.io", Version: "v1alpha1", Kind: "ProjectHelmChart"}, "projecthelmcharts", true, c.controllerFactory) +} diff --git a/internal/helm-project-operator/pkg/generated/controllers/helm.cattle.io/v1alpha1/projecthelmchart.go b/internal/helm-project-operator/pkg/generated/controllers/helm.cattle.io/v1alpha1/projecthelmchart.go new file mode 100644 index 00000000..4ce6be28 --- /dev/null +++ b/internal/helm-project-operator/pkg/generated/controllers/helm.cattle.io/v1alpha1/projecthelmchart.go @@ -0,0 +1,376 @@ +/* +Copyright 2023 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/rancher/helm-project-operator/pkg/apis/helm.cattle.io/v1alpha1" + "github.com/rancher/lasso/pkg/client" + "github.com/rancher/lasso/pkg/controller" + "github.com/rancher/wrangler/v3/pkg/apply" + "github.com/rancher/wrangler/v3/pkg/condition" + "github.com/rancher/wrangler/v3/pkg/generic" + "github.com/rancher/wrangler/v3/pkg/kv" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" +) + +type ProjectHelmChartHandler func(string, *v1alpha1.ProjectHelmChart) (*v1alpha1.ProjectHelmChart, error) + +type ProjectHelmChartController interface { + generic.ControllerMeta + ProjectHelmChartClient + + OnChange(ctx context.Context, name string, sync ProjectHelmChartHandler) + OnRemove(ctx context.Context, name string, sync ProjectHelmChartHandler) + Enqueue(namespace, name string) + EnqueueAfter(namespace, name string, duration time.Duration) + + Cache() ProjectHelmChartCache +} + +type ProjectHelmChartClient interface { + Create(*v1alpha1.ProjectHelmChart) (*v1alpha1.ProjectHelmChart, error) + Update(*v1alpha1.ProjectHelmChart) (*v1alpha1.ProjectHelmChart, error) + UpdateStatus(*v1alpha1.ProjectHelmChart) (*v1alpha1.ProjectHelmChart, error) + Delete(namespace, name string, options *metav1.DeleteOptions) error + Get(namespace, name string, options metav1.GetOptions) (*v1alpha1.ProjectHelmChart, error) + List(namespace string, opts metav1.ListOptions) (*v1alpha1.ProjectHelmChartList, error) + Watch(namespace string, opts metav1.ListOptions) (watch.Interface, error) + Patch(namespace, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ProjectHelmChart, err error) +} + +type ProjectHelmChartCache interface { + Get(namespace, name string) (*v1alpha1.ProjectHelmChart, error) + List(namespace string, selector labels.Selector) ([]*v1alpha1.ProjectHelmChart, error) + + AddIndexer(indexName string, indexer ProjectHelmChartIndexer) + GetByIndex(indexName, key string) ([]*v1alpha1.ProjectHelmChart, error) +} + +type ProjectHelmChartIndexer func(obj *v1alpha1.ProjectHelmChart) ([]string, error) + +type projectHelmChartController struct { + controller controller.SharedController + client *client.Client + gvk schema.GroupVersionKind + groupResource schema.GroupResource +} + +func NewProjectHelmChartController(gvk schema.GroupVersionKind, resource string, namespaced bool, controller controller.SharedControllerFactory) ProjectHelmChartController { + c := controller.ForResourceKind(gvk.GroupVersion().WithResource(resource), gvk.Kind, namespaced) + return &projectHelmChartController{ + controller: c, + client: c.Client(), + gvk: gvk, + groupResource: schema.GroupResource{ + Group: gvk.Group, + Resource: resource, + }, + } +} + +func FromProjectHelmChartHandlerToHandler(sync ProjectHelmChartHandler) generic.Handler { + return func(key string, obj runtime.Object) (ret runtime.Object, err error) { + var v *v1alpha1.ProjectHelmChart + if obj == nil { + v, err = sync(key, nil) + } else { + v, err = sync(key, obj.(*v1alpha1.ProjectHelmChart)) + } + if v == nil { + return nil, err + } + return v, err + } +} + +func (c *projectHelmChartController) Updater() generic.Updater { + return func(obj runtime.Object) (runtime.Object, error) { + newObj, err := c.Update(obj.(*v1alpha1.ProjectHelmChart)) + if newObj == nil { + return nil, err + } + return newObj, err + } +} + +func UpdateProjectHelmChartDeepCopyOnChange(client ProjectHelmChartClient, obj *v1alpha1.ProjectHelmChart, handler func(obj *v1alpha1.ProjectHelmChart) (*v1alpha1.ProjectHelmChart, error)) (*v1alpha1.ProjectHelmChart, error) { + if obj == nil { + return obj, nil + } + + copyObj := obj.DeepCopy() + newObj, err := handler(copyObj) + if newObj != nil { + copyObj = newObj + } + if obj.ResourceVersion == copyObj.ResourceVersion && !equality.Semantic.DeepEqual(obj, copyObj) { + return client.Update(copyObj) + } + + return copyObj, err +} + +func (c *projectHelmChartController) AddGenericHandler(ctx context.Context, name string, handler generic.Handler) { + c.controller.RegisterHandler(ctx, name, controller.SharedControllerHandlerFunc(handler)) +} + +func (c *projectHelmChartController) AddGenericRemoveHandler(ctx context.Context, name string, handler generic.Handler) { + c.AddGenericHandler(ctx, name, generic.NewRemoveHandler(name, c.Updater(), handler)) +} + +func (c *projectHelmChartController) OnChange(ctx context.Context, name string, sync ProjectHelmChartHandler) { + c.AddGenericHandler(ctx, name, FromProjectHelmChartHandlerToHandler(sync)) +} + +func (c *projectHelmChartController) OnRemove(ctx context.Context, name string, sync ProjectHelmChartHandler) { + c.AddGenericHandler(ctx, name, generic.NewRemoveHandler(name, c.Updater(), FromProjectHelmChartHandlerToHandler(sync))) +} + +func (c *projectHelmChartController) Enqueue(namespace, name string) { + c.controller.Enqueue(namespace, name) +} + +func (c *projectHelmChartController) EnqueueAfter(namespace, name string, duration time.Duration) { + c.controller.EnqueueAfter(namespace, name, duration) +} + +func (c *projectHelmChartController) Informer() cache.SharedIndexInformer { + return c.controller.Informer() +} + +func (c *projectHelmChartController) GroupVersionKind() schema.GroupVersionKind { + return c.gvk +} + +func (c *projectHelmChartController) Cache() ProjectHelmChartCache { + return &projectHelmChartCache{ + indexer: c.Informer().GetIndexer(), + resource: c.groupResource, + } +} + +func (c *projectHelmChartController) Create(obj *v1alpha1.ProjectHelmChart) (*v1alpha1.ProjectHelmChart, error) { + result := &v1alpha1.ProjectHelmChart{} + return result, c.client.Create(context.TODO(), obj.Namespace, obj, result, metav1.CreateOptions{}) +} + +func (c *projectHelmChartController) Update(obj *v1alpha1.ProjectHelmChart) (*v1alpha1.ProjectHelmChart, error) { + result := &v1alpha1.ProjectHelmChart{} + return result, c.client.Update(context.TODO(), obj.Namespace, obj, result, metav1.UpdateOptions{}) +} + +func (c *projectHelmChartController) UpdateStatus(obj *v1alpha1.ProjectHelmChart) (*v1alpha1.ProjectHelmChart, error) { + result := &v1alpha1.ProjectHelmChart{} + return result, c.client.UpdateStatus(context.TODO(), obj.Namespace, obj, result, metav1.UpdateOptions{}) +} + +func (c *projectHelmChartController) Delete(namespace, name string, options *metav1.DeleteOptions) error { + if options == nil { + options = &metav1.DeleteOptions{} + } + return c.client.Delete(context.TODO(), namespace, name, *options) +} + +func (c *projectHelmChartController) Get(namespace, name string, options metav1.GetOptions) (*v1alpha1.ProjectHelmChart, error) { + result := &v1alpha1.ProjectHelmChart{} + return result, c.client.Get(context.TODO(), namespace, name, result, options) +} + +func (c *projectHelmChartController) List(namespace string, opts metav1.ListOptions) (*v1alpha1.ProjectHelmChartList, error) { + result := &v1alpha1.ProjectHelmChartList{} + return result, c.client.List(context.TODO(), namespace, result, opts) +} + +func (c *projectHelmChartController) Watch(namespace string, opts metav1.ListOptions) (watch.Interface, error) { + return c.client.Watch(context.TODO(), namespace, opts) +} + +func (c *projectHelmChartController) Patch(namespace, name string, pt types.PatchType, data []byte, subresources ...string) (*v1alpha1.ProjectHelmChart, error) { + result := &v1alpha1.ProjectHelmChart{} + return result, c.client.Patch(context.TODO(), namespace, name, pt, data, result, metav1.PatchOptions{}, subresources...) +} + +type projectHelmChartCache struct { + indexer cache.Indexer + resource schema.GroupResource +} + +func (c *projectHelmChartCache) Get(namespace, name string) (*v1alpha1.ProjectHelmChart, error) { + obj, exists, err := c.indexer.GetByKey(namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(c.resource, name) + } + return obj.(*v1alpha1.ProjectHelmChart), nil +} + +func (c *projectHelmChartCache) List(namespace string, selector labels.Selector) (ret []*v1alpha1.ProjectHelmChart, err error) { + + err = cache.ListAllByNamespace(c.indexer, namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ProjectHelmChart)) + }) + + return ret, err +} + +func (c *projectHelmChartCache) AddIndexer(indexName string, indexer ProjectHelmChartIndexer) { + utilruntime.Must(c.indexer.AddIndexers(map[string]cache.IndexFunc{ + indexName: func(obj interface{}) (strings []string, e error) { + return indexer(obj.(*v1alpha1.ProjectHelmChart)) + }, + })) +} + +func (c *projectHelmChartCache) GetByIndex(indexName, key string) (result []*v1alpha1.ProjectHelmChart, err error) { + objs, err := c.indexer.ByIndex(indexName, key) + if err != nil { + return nil, err + } + result = make([]*v1alpha1.ProjectHelmChart, 0, len(objs)) + for _, obj := range objs { + result = append(result, obj.(*v1alpha1.ProjectHelmChart)) + } + return result, nil +} + +type ProjectHelmChartStatusHandler func(obj *v1alpha1.ProjectHelmChart, status v1alpha1.ProjectHelmChartStatus) (v1alpha1.ProjectHelmChartStatus, error) + +type ProjectHelmChartGeneratingHandler func(obj *v1alpha1.ProjectHelmChart, status v1alpha1.ProjectHelmChartStatus) ([]runtime.Object, v1alpha1.ProjectHelmChartStatus, error) + +func RegisterProjectHelmChartStatusHandler(ctx context.Context, controller ProjectHelmChartController, condition condition.Cond, name string, handler ProjectHelmChartStatusHandler) { + statusHandler := &projectHelmChartStatusHandler{ + client: controller, + condition: condition, + handler: handler, + } + controller.AddGenericHandler(ctx, name, FromProjectHelmChartHandlerToHandler(statusHandler.sync)) +} + +func RegisterProjectHelmChartGeneratingHandler(ctx context.Context, controller ProjectHelmChartController, apply apply.Apply, + condition condition.Cond, name string, handler ProjectHelmChartGeneratingHandler, opts *generic.GeneratingHandlerOptions) { + statusHandler := &projectHelmChartGeneratingHandler{ + ProjectHelmChartGeneratingHandler: handler, + apply: apply, + name: name, + gvk: controller.GroupVersionKind(), + } + if opts != nil { + statusHandler.opts = *opts + } + controller.OnChange(ctx, name, statusHandler.Remove) + RegisterProjectHelmChartStatusHandler(ctx, controller, condition, name, statusHandler.Handle) +} + +type projectHelmChartStatusHandler struct { + client ProjectHelmChartClient + condition condition.Cond + handler ProjectHelmChartStatusHandler +} + +func (a *projectHelmChartStatusHandler) sync(key string, obj *v1alpha1.ProjectHelmChart) (*v1alpha1.ProjectHelmChart, error) { + if obj == nil { + return obj, nil + } + + origStatus := obj.Status.DeepCopy() + obj = obj.DeepCopy() + newStatus, err := a.handler(obj, obj.Status) + if err != nil { + // Revert to old status on error + newStatus = *origStatus.DeepCopy() + } + + if a.condition != "" { + if errors.IsConflict(err) { + a.condition.SetError(&newStatus, "", nil) + } else { + a.condition.SetError(&newStatus, "", err) + } + } + if !equality.Semantic.DeepEqual(origStatus, &newStatus) { + if a.condition != "" { + // Since status has changed, update the lastUpdatedTime + a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) + } + + var newErr error + obj.Status = newStatus + newObj, newErr := a.client.UpdateStatus(obj) + if err == nil { + err = newErr + } + if newErr == nil { + obj = newObj + } + } + return obj, err +} + +type projectHelmChartGeneratingHandler struct { + ProjectHelmChartGeneratingHandler + apply apply.Apply + opts generic.GeneratingHandlerOptions + gvk schema.GroupVersionKind + name string +} + +func (a *projectHelmChartGeneratingHandler) Remove(key string, obj *v1alpha1.ProjectHelmChart) (*v1alpha1.ProjectHelmChart, error) { + if obj != nil { + return obj, nil + } + + obj = &v1alpha1.ProjectHelmChart{} + obj.Namespace, obj.Name = kv.RSplit(key, "/") + obj.SetGroupVersionKind(a.gvk) + + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects() +} + +func (a *projectHelmChartGeneratingHandler) Handle(obj *v1alpha1.ProjectHelmChart, status v1alpha1.ProjectHelmChartStatus) (v1alpha1.ProjectHelmChartStatus, error) { + if !obj.DeletionTimestamp.IsZero() { + return status, nil + } + + objs, newStatus, err := a.ProjectHelmChartGeneratingHandler(obj, status) + if err != nil { + return newStatus, err + } + + return newStatus, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects(objs...) +} diff --git a/internal/helm-project-operator/pkg/operator/init.go b/internal/helm-project-operator/pkg/operator/init.go new file mode 100644 index 00000000..79e4c91a --- /dev/null +++ b/internal/helm-project-operator/pkg/operator/init.go @@ -0,0 +1,34 @@ +package operator + +import ( + "context" + "fmt" + + "github.com/rancher/helm-project-operator/pkg/controllers" + "github.com/rancher/helm-project-operator/pkg/controllers/common" + "github.com/rancher/helm-project-operator/pkg/crd" + "github.com/rancher/wrangler/v3/pkg/ratelimit" + "k8s.io/client-go/tools/clientcmd" +) + +// Init sets up a new Helm Project Operator with the provided options and configuration +func Init(ctx context.Context, systemNamespace string, cfg clientcmd.ClientConfig, opts common.Options) error { + if systemNamespace == "" { + return fmt.Errorf("system namespace was not specified, unclear where to place HelmCharts or HelmReleases") + } + if err := opts.Validate(); err != nil { + return err + } + + clientConfig, err := cfg.ClientConfig() + if err != nil { + return err + } + clientConfig.RateLimiter = ratelimit.None + + if err := crd.Create(ctx, clientConfig); err != nil { + return err + } + + return controllers.Register(ctx, systemNamespace, cfg, opts) +} diff --git a/internal/helm-project-operator/pkg/remove/handler.go b/internal/helm-project-operator/pkg/remove/handler.go new file mode 100644 index 00000000..c4867fe9 --- /dev/null +++ b/internal/helm-project-operator/pkg/remove/handler.go @@ -0,0 +1,39 @@ +package remove + +import ( + "context" + + "github.com/rancher/wrangler/v3/pkg/generic" + "k8s.io/apimachinery/pkg/runtime" +) + +// Controller is an interface that allows the ScopedOnRemoveHandler to register a generic RemoveHandler +type Controller interface { + AddGenericHandler(ctx context.Context, name string, handler generic.Handler) + Updater() generic.Updater +} + +// ScopeFunc is a function that determines whether the ScopedOnRemoveHandler should manage the lifecycle of the given object +type ScopeFunc func(key string, obj runtime.Object) (bool, error) + +// RegisterScopedOnRemoveHandler registers a handler that does the same thing as an OnRemove handler but only applies finalizers or sync logic +// to objects that pass the provided scopeFunc; this ensures that finalizers are not added to all resources across an entire cluster but are +// instead only scoped to resources that this controller is meant to watch. +// +// TODO: move this to rancher/wrangler as a generic construct to be used across multiple controllers as part of the auto-generated code +func RegisterScopedOnRemoveHandler(ctx context.Context, controller Controller, name string, scopeFunc ScopeFunc, handler generic.Handler) { + onRemove := generic.NewRemoveHandler(name, controller.Updater(), handler) + controller.AddGenericHandler(ctx, name, func(key string, obj runtime.Object) (runtime.Object, error) { + if obj == nil { + return nil, nil + } + isScoped, err := scopeFunc(key, obj) + if err != nil { + return obj, err + } + if !isScoped { + return obj, nil + } + return onRemove(key, obj) + }) +} diff --git a/internal/helm-project-operator/pkg/test/data/example-chart/example-chart.tgz.base64 b/internal/helm-project-operator/pkg/test/data/example-chart/example-chart.tgz.base64 new file mode 100644 index 00000000..e965846b --- /dev/null +++ b/internal/helm-project-operator/pkg/test/data/example-chart/example-chart.tgz.base64 @@ -0,0 +1,29 @@ +H4sIFAAAAAAA/ykAK2FIUjBjSE02THk5NWIzVjBkUzVpWlM5Nk9WVjZNV2xqYW5keVRRbz1IZWxt +AOwaXW8budHP+hUD58Et4N2sJdsqtkULnx30rkWuB18QoCj6MFqOtIy55IbkSlEd//eC5O5KK0uW +nah2c9E8WDI5nA/OJ0nRJyxKQVGWo7avL93feI6FONghJEmSnJ+e+s8kSVY/k7N+cnByOhz0B8lZ +MuwfJCeDYX9wAMkuhdgElbGoD5Kv5rWq3DcCKKWyaLmSJu0BZGhRqEmcobWCYq5eZ6QtH3NiKWiU +WU56LVrOGSOZwqHVFR2uRVEmBcFl9el4xiVTM7MWqyRdcGuix2FrEoSGIokFpdBx5h6W/D1pw5VM +YdrvYVm2/yZxEic9RibTvLR+6E1YCz+SKOAXrT5QZuEfJWm0SkOguI7LtEvzpe35VOjG/xRFRWbX +CWBL/PfP++cr8X96Muzv4/85YCLUCEUd+laQ+waQicpY0j9dpXB46EfKEBH3R37GgkyJGZkU/vXv +tTO/kqDMKp3C7Z1HqKP2l3s0zdxYKq5ojJWw1zThxup5O1tpUX/XI8yCpK9ewaUmtATXP1xcgiaj +Kp2RgbHS8CvpKc/oIstUJa0BlAwqQ9rUS4OufnkKLnH1Ah9D+loJMoHFMhMWRPMo4HHA5mjB5psy +x4wLAVhZVaDlGQoxrzn65T9wybiceHFbZguuP6oZMOWZcAMzpW/+sgbrXU6gOvxmaLPcbwEKAaYa +OZkMjFQlGVgFhFkOf69GpCVZMq1al8HuTjLg0mtVGxN0sIYvFSAb0y6EmOWkya+ot8Bth+8nwg4x +KoWaEwuq+HQDIxLKKW/VH4GPgVtQI0N6SgawkXohNEKJ2vKsEqhB1yIuBNguLPyO4kkMxLj9vfcF +Pgb0ZgD6xI2tbcnNQtjRfFneGbe55yNwRGLB+ignUSzXsCBG5IZDZo2cvBFOJpomaCkaa1UcpXD0 +Jzd+DK1cfz46dtuw6jStz+Cy1ywEqG1Vx9WSxqOAWbtBli98waqgraMXr3Gqn5ULilnORbBq4yIj +ynHKlXbbZJULBD/9sDMde5zWR8P2FiQtZChhRJApOeaTShNbMmigjsD4eEzaYRuyoMbLlA2g8cRD +3LtZqytbOz8ruDz2G3vsDT7lNAPfYRhXtM1C8caROtmgnbxgLNjc61zHvYIcp9TVrOTZjRspoCrr +5a3R36kmsfncUicchhZdnslJCJW6GBfsGbuIbv23VJQCLZnXwSAFljvoBrbU/4H73q3/wyTZ1/9n +gU6TfNK74ZKlcOmN/xbLXkEWGw8N3W8xj4JvRAWW9ajPNSnc3kJ8HVJQ3BZ/uLtrfTwsTOFz1HO4 +70O36WbhM1j1TywEfAYumYv1U7eybSYiudRndNeHFqZJv/faku2kOXsUyZ+uNpNqziGPJbnaAT1B +/8i07dRT9qFpwjZzCs1XVKfvSLft10Ns1nZsm3mEVZHv5B4iW2nxAJGQ/rductPFsk2koijqfV0A +RP19COxD4BsOgU31n6HJRwo1853r1zUB287/w3v3f8Ph4Gxf/58Dbm8jfwySbNWD3Bk7ro8d66ba +U3KDdHfXSaYeCSubK83/409i8c0fjDsetWnWLV+TYVeTKNzdRb6Vf2SuhbpXD8f3x53N1nL16+sd +engLNjT5DY0vOh+m0CgdhCDJHD1d+ZuJCLDkf9WqKr2eERxaMnbBxF+TNLchAcMhLI36PevMTEmP +6pEjN3R0r0Lu1qjuYPa92bTW+bdqUnfC/t5MWuv8ciZd4vvU/L+9/oc3gai9EHh6K7Dt/W9wftat +//1+cna6r//PAV9y/Fn1jV3E+2Z/Wx/yjVDuM/5gnPgbjlR/M0quNL0vven/R9CN/+s3F1dv38QF +2ymPbfHfP119/xv0z/f3f88Cr1aes3vvFs8d3AACq4piXg/cex5R0r8HiLG/d196JVj7FhavUi81 +L1BzMQcuLUlGzF/bu6LG5QTKSpfKkIn3Afs/g278f6zc1iu5258AbIv/k+HZ6vv/sL+P/2eB1uKh +oZyi5jhyfbarn75Q+6Lt6n470vndzNs5XDUTdl5SCsbq5nFS08eKa2JL72kT1wynYc1LK7+HPexh +D98x/DcAAP//C9RRoAAsAAA= diff --git a/internal/helm-project-operator/pkg/test/testdata.go b/internal/helm-project-operator/pkg/test/testdata.go new file mode 100644 index 00000000..765612e2 --- /dev/null +++ b/internal/helm-project-operator/pkg/test/testdata.go @@ -0,0 +1,20 @@ +package test + +import ( + "embed" + "io/fs" + "path" +) + +var ( + //go:embed data + TestDataFS embed.FS +) + +func TestData(filename string) []byte { + data, err := fs.ReadFile(TestDataFS, path.Join("data", filename)) + if err != nil { + panic(err) + } + return data +} diff --git a/internal/helm-project-operator/pkg/version/version.go b/internal/helm-project-operator/pkg/version/version.go new file mode 100644 index 00000000..028b81b3 --- /dev/null +++ b/internal/helm-project-operator/pkg/version/version.go @@ -0,0 +1,13 @@ +package version + +import "fmt" + +var ( + Version = "v0.0.0-dev" + GitCommit = "HEAD" +) + +// FriendlyVersion outputs a version that will be displayed on running --version on the binary +func FriendlyVersion() string { + return fmt.Sprintf("%s (%s)", Version, GitCommit) +}